summaryrefslogtreecommitdiff
path: root/gcc_arm/config/arm
diff options
context:
space:
mode:
authorcamthesaxman <camthesaxman@users.noreply.github.com>2020-01-29 18:17:43 -0600
committercamthesaxman <camthesaxman@users.noreply.github.com>2020-01-29 18:17:43 -0600
commitcdc6e2c50f96119bdc4c1205ff5901ca82ec8357 (patch)
tree3e9217eabcf444e166008411f445315606dded59 /gcc_arm/config/arm
parent27176890c4a688ea7de44d3f55af32827016a9fd (diff)
add old compiler with ARM support
Diffstat (limited to 'gcc_arm/config/arm')
-rwxr-xr-xgcc_arm/config/arm/README-interworking742
-rwxr-xr-xgcc_arm/config/arm/aof.h453
-rwxr-xr-xgcc_arm/config/arm/aout.h323
-rwxr-xr-xgcc_arm/config/arm/arm.c7001
-rwxr-xr-xgcc_arm/config/arm/arm.h2218
-rwxr-xr-xgcc_arm/config/arm/arm.md6496
-rwxr-xr-xgcc_arm/config/arm/arm_010110a.h2211
-rwxr-xr-xgcc_arm/config/arm/arm_020422.c7160
-rwxr-xr-xgcc_arm/config/arm/arm_020422.h2309
-rwxr-xr-xgcc_arm/config/arm/arm_020422.md6508
-rwxr-xr-xgcc_arm/config/arm/arm_020428.h2309
-rwxr-xr-xgcc_arm/config/arm/arm_990720.h2210
-rwxr-xr-xgcc_arm/config/arm/arm_990720.md6488
-rwxr-xr-xgcc_arm/config/arm/coff.h211
-rwxr-xr-xgcc_arm/config/arm/ecos-elf.h29
-rwxr-xr-xgcc_arm/config/arm/elf.h374
-rwxr-xr-xgcc_arm/config/arm/lib1funcs.asm580
-rwxr-xr-xgcc_arm/config/arm/lib1thumb.asm572
-rwxr-xr-xgcc_arm/config/arm/lib1thumb_981111.asm747
-rwxr-xr-xgcc_arm/config/arm/linux-aout.h58
-rwxr-xr-xgcc_arm/config/arm/linux-elf.h204
-rwxr-xr-xgcc_arm/config/arm/linux-elf26.h32
-rwxr-xr-xgcc_arm/config/arm/linux-gas.h87
-rwxr-xr-xgcc_arm/config/arm/linux.h72
-rwxr-xr-xgcc_arm/config/arm/netbsd.h161
-rwxr-xr-xgcc_arm/config/arm/pe.c521
-rwxr-xr-xgcc_arm/config/arm/pe.h295
-rwxr-xr-xgcc_arm/config/arm/riscix.h151
-rwxr-xr-xgcc_arm/config/arm/riscix1-1.h100
-rwxr-xr-xgcc_arm/config/arm/rix-gas.h43
-rwxr-xr-xgcc_arm/config/arm/semi.h55
-rwxr-xr-xgcc_arm/config/arm/semiaof.h59
-rwxr-xr-xgcc_arm/config/arm/t-arm-elf35
-rwxr-xr-xgcc_arm/config/arm/t-bare34
-rwxr-xr-xgcc_arm/config/arm/t-linux42
-rwxr-xr-xgcc_arm/config/arm/t-netbsd7
-rwxr-xr-xgcc_arm/config/arm/t-pe31
-rwxr-xr-xgcc_arm/config/arm/t-pe-thumb37
-rwxr-xr-xgcc_arm/config/arm/t-riscix3
-rwxr-xr-xgcc_arm/config/arm/t-semi47
-rwxr-xr-xgcc_arm/config/arm/t-semiaof64
-rwxr-xr-xgcc_arm/config/arm/t-thumb31
-rwxr-xr-xgcc_arm/config/arm/t-thumb-elf32
-rwxr-xr-xgcc_arm/config/arm/tcoff.h192
-rwxr-xr-xgcc_arm/config/arm/telf-oabi.h244
-rwxr-xr-xgcc_arm/config/arm/telf-oabi_020422.h237
-rwxr-xr-xgcc_arm/config/arm/telf.h450
-rwxr-xr-xgcc_arm/config/arm/telf_020422.h443
-rwxr-xr-xgcc_arm/config/arm/thumb.c2132
-rwxr-xr-xgcc_arm/config/arm/thumb.c.orig2132
-rwxr-xr-xgcc_arm/config/arm/thumb.c.rej168
-rwxr-xr-xgcc_arm/config/arm/thumb.h1195
-rwxr-xr-xgcc_arm/config/arm/thumb.h.orig1195
-rwxr-xr-xgcc_arm/config/arm/thumb.md1174
-rwxr-xr-xgcc_arm/config/arm/thumb.md.orig1174
-rwxr-xr-xgcc_arm/config/arm/thumb.md.rej168
-rwxr-xr-xgcc_arm/config/arm/thumb_000513.h1187
-rwxr-xr-xgcc_arm/config/arm/thumb_010110a.c2124
-rwxr-xr-xgcc_arm/config/arm/thumb_010110a.md1166
-rwxr-xr-xgcc_arm/config/arm/thumb_010309a.c2132
-rwxr-xr-xgcc_arm/config/arm/thumb_020422.c2291
-rwxr-xr-xgcc_arm/config/arm/thumb_020422.h1295
-rwxr-xr-xgcc_arm/config/arm/thumb_020422.md1194
-rwxr-xr-xgcc_arm/config/arm/thumb_020428.h1297
-rwxr-xr-xgcc_arm/config/arm/thumb_020428.md1194
-rwxr-xr-xgcc_arm/config/arm/thumb_981111.md1166
-rwxr-xr-xgcc_arm/config/arm/tpe.h427
-rwxr-xr-xgcc_arm/config/arm/unknown-elf-oabi.h36
-rwxr-xr-xgcc_arm/config/arm/unknown-elf.h166
-rwxr-xr-xgcc_arm/config/arm/unknown-elf_020422.h163
-rwxr-xr-xgcc_arm/config/arm/x-riscix8
-rwxr-xr-xgcc_arm/config/arm/xm-arm.h68
-rwxr-xr-xgcc_arm/config/arm/xm-linux.h24
-rwxr-xr-xgcc_arm/config/arm/xm-netbsd.h7
-rwxr-xr-xgcc_arm/config/arm/xm-thumb.h1
75 files changed, 77992 insertions, 0 deletions
diff --git a/gcc_arm/config/arm/README-interworking b/gcc_arm/config/arm/README-interworking
new file mode 100755
index 0000000..46b76c9
--- /dev/null
+++ b/gcc_arm/config/arm/README-interworking
@@ -0,0 +1,742 @@
+ Arm / Thumb Interworking
+ ========================
+
+The Cygnus GNU Pro Toolkit for the ARM7T processor supports function
+calls between code compiled for the ARM instruction set and code
+compiled for the Thumb instruction set and vice versa. This document
+describes how that interworking support operates and explains the
+command line switches that should be used in order to produce working
+programs.
+
+Note: The Cygnus GNU Pro Toolkit does not support switching between
+compiling for the ARM instruction set and the Thumb instruction set
+on anything other than a per file basis. There are in fact two
+completely separate compilers, one that produces ARM assembler
+instructions and one that produces Thumb assembler instructions. The
+two compilers share the same assembler, linker and so on.
+
+
+1. Explicit interworking support for C and C++ files
+====================================================
+
+By default if a file is compiled without any special command line
+switches then the code produced will not support interworking.
+Provided that a program is made up entirely from object files and
+libraries produced in this way and which contain either exclusively
+ARM instructions or exclusively Thumb instructions then this will not
+matter and a working executable will be created. If an attempt is
+made to link together mixed ARM and Thumb object files and libraries,
+then warning messages will be produced by the linker and a non-working
+executable will be created.
+
+In order to produce code which does support interworking it should be
+compiled with the
+
+ -mthumb-interwork
+
+command line option. Provided that a program is made up entirely from
+object files and libraries built with this command line switch a
+working executable will be produced, even if both ARM and Thumb
+instructions are used by the various components of the program. (No
+warning messages will be produced by the linker either).
+
+Note that specifying -mthumb-interwork does result in slightly larger,
+slower code being produced. This is why interworking support must be
+specifically enabled by a switch.
+
+
+2. Explicit interworking support for assembler files
+====================================================
+
+If assembler files are to be included into an interworking program
+then the following rules must be obeyed:
+
+ * Any externally visible functions must return by using the BX
+ instruction.
+
+ * Normal function calls can just use the BL instruction. The
+ linker will automatically insert code to switch between ARM
+ and Thumb modes as necessary.
+
+ * Calls via function pointers should use the BX instruction if
+ the call is made in ARM mode:
+
+ .code 32
+ mov lr, pc
+ bx rX
+
+ This code sequence will not work in Thumb mode however, since
+ the mov instruction will not set the bottom bit of the lr
+ register. Instead a branch-and-link to the _call_via_rX
+ functions should be used instead:
+
+ .code 16
+ bl _call_via_rX
+
+ where rX is replaced by the name of the register containing
+ the function address.
+
+ * All externally visible functions which should be entered in
+ Thumb mode must have the .thumb_func pseudo op specified just
+ before their entry point. eg:
+
+ .code 16
+ .global function
+ .thumb_func
+ function:
+ ...start of function....
+
+ * All assembler files must be assembled with the switch
+ -mthumb-interwork specified on the command line. (If the file
+ is assembled by calling gcc it will automatically pass on the
+ -mthumb-interwork switch to the assembler, provided that it
+ was specified on the gcc command line in the first place.)
+
+
+3. Support for old, non-interworking aware code.
+================================================
+
+If it is necessary to link together code produced by an older,
+non-interworking aware compiler, or code produced by the new compiler
+but without the -mthumb-interwork command line switch specified, then
+there are two command line switches that can be used to support this.
+
+The switch
+
+ -mcaller-super-interworking
+
+will allow calls via function pointers in Thumb mode to work,
+regardless of whether the function pointer points to old,
+non-interworking aware code or not. Specifying this switch does
+produce slightly slower code however.
+
+Note: There is no switch to allow calls via function pointers in ARM
+mode to be handled specially. Calls via function pointers from
+interworking aware ARM code to non-interworking aware ARM code work
+without any special considerations by the compiler. Calls via
+function pointers from interworking aware ARM code to non-interworking
+aware Thumb code however will not work. (Actually under some
+circumstances they may work, but there are no guarantees). This is
+because only the new compiler is able to produce Thumb code, and this
+compiler already has a command line switch to produce interworking
+aware code.
+
+
+The switch
+
+ -mcallee-super-interworking
+
+will allow non-interworking aware ARM or Thumb code to call Thumb
+functions, either directly or via function pointers. Specifying this
+switch does produce slightly larger, slower code however.
+
+Note: There is no switch to allow non-interworking aware ARM or Thumb
+code to call ARM functions. There is no need for any special handling
+of calls from non-interworking aware ARM code to interworking aware
+ARM functions, they just work normally. Calls from non-interworking
+aware Thumb functions to ARM code however, will not work. There is no
+option to support this, since it is always possible to recompile the
+Thumb code to be interworking aware.
+
+As an alternative to the command line switch
+-mcallee-super-interworking, which affects all externally visible
+functions in a file, it is possible to specify an attribute or
+declspec for individual functions, indicating that that particular
+function should support being called by non-interworking aware code.
+The function should be defined like this:
+
+ int __attribute__((interfacearm)) function
+ {
+ ... body of function ...
+ }
+
+or
+
+ int __declspec(interfacearm) function
+ {
+ ... body of function ...
+ }
+
+
+
+4. Interworking support in dlltool
+==================================
+
+It is possible to create DLLs containing mixed ARM and Thumb code. It
+is also possible to call Thumb code in a DLL from an ARM program and
+vice versa. It is even possible to call ARM DLLs that have been compiled
+without interworking support (say by an older version of the compiler),
+from Thumb programs and still have things work properly.
+
+ A version of the `dlltool' program which supports the `--interwork'
+command line switch is needed, as well as the following special
+considerations when building programs and DLLs:
+
+*Use `-mthumb-interwork'*
+ When compiling files for a DLL or a program the `-mthumb-interwork'
+ command line switch should be specified if calling between ARM and
+ Thumb code can happen. If a program is being compiled and the
+ mode of the DLLs that it uses is not known, then it should be
+ assumed that interworking might occur and the switch used.
+
+*Use `-m thumb'*
+ If the exported functions from a DLL are all Thumb encoded then the
+ `-m thumb' command line switch should be given to dlltool when
+ building the stubs. This will make dlltool create Thumb encoded
+ stubs, rather than its default of ARM encoded stubs.
+
+ If the DLL consists of both exported Thumb functions and exported
+ ARM functions then the `-m thumb' switch should not be used.
+ Instead the Thumb functions in the DLL should be compiled with the
+ `-mcallee-super-interworking' switch, or with the `interfacearm'
+ attribute specified on their prototypes. In this way they will be
+ given ARM encoded prologues, which will work with the ARM encoded
+ stubs produced by dlltool.
+
+*Use `-mcaller-super-interworking'*
+ If it is possible for Thumb functions in a DLL to call
+ non-interworking aware code via a function pointer, then the Thumb
+ code must be compiled with the `-mcaller-super-interworking'
+ command line switch. This will force the function pointer calls
+ to use the _interwork_call_via_rX stub functions which will
+ correctly restore Thumb mode upon return from the called function.
+
+*Link with `libgcc.a'*
+ When the dll is built it may have to be linked with the GCC
+ library (`libgcc.a') in order to extract the _call_via_rX functions
+ or the _interwork_call_via_rX functions. This represents a partial
+ redundancy since the same functions *may* be present in the
+ application itself, but since they only take up 372 bytes this
+ should not be too much of a consideration.
+
+*Use `--support-old-code'*
+ When linking a program with an old DLL which does not support
+ interworking, the `--support-old-code' command line switch to the
+ linker should be used. This causes the linker to generate special
+ interworking stubs which can cope with old, non-interworking aware
+ ARM code, at the cost of generating bulkier code. The linker will
+ still generate a warning message along the lines of:
+ "Warning: input file XXX does not support interworking, whereas YYY does."
+ but this can now be ignored because the --support-old-code switch
+ has been used.
+
+
+
+5. How interworking support works
+=================================
+
+Switching between the ARM and Thumb instruction sets is accomplished
+via the BX instruction which takes as an argument a register name.
+Control is transfered to the address held in this register (with the
+bottom bit masked out), and if the bottom bit is set, then Thumb
+instruction processing is enabled, otherwise ARM instruction
+processing is enabled.
+
+When the -mthumb-interwork command line switch is specified, gcc
+arranges for all functions to return to their caller by using the BX
+instruction. Thus provided that the return address has the bottom bit
+correctly initialised to indicate the instruction set of the caller,
+correct operation will ensue.
+
+When a function is called explicitly (rather than via a function
+pointer), the compiler generates a BL instruction to do this. The
+Thumb version of the BL instruction has the special property of
+setting the bottom bit of the LR register after it has stored the
+return address into it, so that a future BX instruction will correctly
+return the instruction after the BL instruction, in Thumb mode.
+
+The BL instruction does not change modes itself however, so if an ARM
+function is calling a Thumb function, or vice versa, it is necessary
+to generate some extra instructions to handle this. This is done in
+the linker when it is storing the address of the referenced function
+into the BL instruction. If the BL instruction is an ARM style BL
+instruction, but the referenced function is a Thumb function, then the
+linker automatically generates a calling stub that converts from ARM
+mode to Thumb mode, puts the address of this stub into the BL
+instruction, and puts the address of the referenced function into the
+stub. Similarly if the BL instruction is a Thumb BL instruction, and
+the referenced function is an ARM function, the linker generates a
+stub which converts from Thumb to ARM mode, puts the address of this
+stub into the BL instruction, and the address of the referenced
+function into the stub.
+
+This is why it is necessary to mark Thumb functions with the
+.thumb_func pseudo op when creating assembler files. This pseudo op
+allows the assembler to distinguish between ARM functions and Thumb
+functions. (The Thumb version of GCC automatically generates these
+pseudo ops for any Thumb functions that it generates).
+
+Calls via function pointers work differently. Whenever the address of
+a function is taken, the linker examines the type of the function
+being referenced. If the function is a Thumb function, then it sets
+the bottom bit of the address. Technically this makes the address
+incorrect, since it is now one byte into the start of the function,
+but this is never a problem because:
+
+ a. with interworking enabled all calls via function pointer
+ are done using the BX instruction and this ignores the
+ bottom bit when computing where to go to.
+
+ b. the linker will always set the bottom bit when the address
+ of the function is taken, so it is never possible to take
+ the address of the function in two different places and
+ then compare them and find that they are not equal.
+
+As already mentioned any call via a function pointer will use the BX
+instruction (provided that interworking is enabled). The only problem
+with this is computing the return address for the return from the
+called function. For ARM code this can easily be done by the code
+sequence:
+
+ mov lr, pc
+ bx rX
+
+(where rX is the name of the register containing the function
+pointer). This code does not work for the Thumb instruction set,
+since the MOV instruction will not set the bottom bit of the LR
+register, so that when the called function returns, it will return in
+ARM mode not Thumb mode. Instead the compiler generates this
+sequence:
+
+ bl _call_via_rX
+
+(again where rX is the name if the register containing the function
+pointer). The special call_via_rX functions look like this:
+
+ .thumb_func
+_call_via_r0:
+ bx r0
+ nop
+
+The BL instruction ensures that the correct return address is stored
+in the LR register and then the BX instruction jumps to the address
+stored in the function pointer, switch modes if necessary.
+
+
+6. How caller-super-interworking support works
+==============================================
+
+When the -mcaller-super-interworking command line switch is specified
+it changes the code produced by the Thumb compiler so that all calls
+via function pointers (including virtual function calls) now go via a
+different stub function. The code to call via a function pointer now
+looks like this:
+
+ bl _interwork_call_via_r0
+
+Note: The compiler does not insist that r0 be used to hold the
+function address. Any register will do, and there are a suite of stub
+functions, one for each possible register. The stub functions look
+like this:
+
+ .code 16
+ .thumb_func
+_interwork_call_via_r0
+ bx pc
+ nop
+
+ .code 32
+ tst r0, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx r0
+
+The stub first switches to ARM mode, since it is a lot easier to
+perform the necessary operations using ARM instructions. It then
+tests the bottom bit of the register containing the address of the
+function to be called. If this bottom bit is set then the function
+being called uses Thumb instructions and the BX instruction to come
+will switch back into Thumb mode before calling this function. (Note
+that it does not matter how this called function chooses to return to
+its caller, since the both the caller and callee are Thumb functions,
+and mode switching is necessary). If the function being called is an
+ARM mode function however, the stub pushes the return address (with
+its bottom bit set) onto the stack, replaces the return address with
+the address of the a piece of code called '_arm_return' and then
+performs a BX instruction to call the function.
+
+The '_arm_return' code looks like this:
+
+ .code 32
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+ .code 16
+
+
+It simply retrieves the return address from the stack, and then
+performs a BX operation to return to the caller and switch back into
+Thumb mode.
+
+
+7. How callee-super-interworking support works
+==============================================
+
+When -mcallee-super-interworking is specified on the command line the
+Thumb compiler behaves as if every externally visible function that it
+compiles has had the (interfacearm) attribute specified for it. What
+this attribute does is to put a special, ARM mode header onto the
+function which forces a switch into Thumb mode:
+
+ without __attribute__((interfacearm)):
+
+ .code 16
+ .thumb_func
+ function:
+ ... start of function ...
+
+ with __attribute__((interfacearm)):
+
+ .code 32
+ function:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .thumb_func
+ .real_start_of_function:
+
+ ... start of function ...
+
+Note that since the function now expects to be entered in ARM mode, it
+no longer has the .thumb_func pseudo op specified for its name.
+Instead the pseudo op is attached to a new label .real_start_of_<name>
+(where <name> is the name of the function) which indicates the start
+of the Thumb code. This does have the interesting side effect in that
+if this function is now called from a Thumb mode piece of code
+outsside of the current file, the linker will generate a calling stub
+to switch from Thumb mode into ARM mode, and then this is immediately
+overridden by the function's header which switches back into Thumb
+mode.
+
+In addition the (interfacearm) attribute also forces the function to
+return by using the BX instruction, even if has not been compiled with
+the -mthumb-interwork command line flag, so that the correct mode will
+be restored upon exit from the function.
+
+
+8. Some examples
+================
+
+ Given these two test files:
+
+ int arm (void) { return 1 + thumb (); }
+
+ int thumb (void) { return 2 + arm (); }
+
+ The following pieces of assembler are produced by the ARM and Thumb
+version of GCC depending upon the command line options used:
+
+ `-O2':
+ .code 32 .code 16
+ .global _arm .global _thumb
+ .thumb_func
+ _arm: _thumb:
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, pc} pop {pc}
+
+ Note how the functions return without using the BX instruction. If
+these files were assembled and linked together they would fail to work
+because they do not change mode when returning to their caller.
+
+ `-O2 -mthumb-interwork':
+
+ .code 32 .code 16
+ .global _arm .global _thumb
+ .thumb_func
+ _arm: _thumb:
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, lr} pop {r1}
+ bx lr bx r1
+
+ Now the functions use BX to return their caller. They have grown by
+4 and 2 bytes respectively, but they can now successfully be linked
+together and be expect to work. The linker will replace the
+destinations of the two BL instructions with the addresses of calling
+stubs which convert to the correct mode before jumping to the called
+function.
+
+ `-O2 -mcallee-super-interworking':
+
+ .code 32 .code 32
+ .global _arm .global _thumb
+ _arm: _thumb:
+ orr r12, pc, #1
+ bx r12
+ mov ip, sp .code 16
+ stmfd sp!, {fp, ip, lr, pc} push {lr}
+ sub fp, ip, #4
+ bl _thumb bl _arm
+ add r0, r0, #1 add r0, r0, #2
+ ldmea fp, {fp, sp, lr} pop {r1}
+ bx lr bx r1
+
+ The thumb function now has an ARM encoded prologue, and it no longer
+has the `.thumb-func' pseudo op attached to it. The linker will not
+generate a calling stub for the call from arm() to thumb(), but it will
+still have to generate a stub for the call from thumb() to arm(). Also
+note how specifying `--mcallee-super-interworking' automatically
+implies `-mthumb-interworking'.
+
+
+9. Some Function Pointer Examples
+=================================
+
+ Given this test file:
+
+ int func (void) { return 1; }
+
+ int call (int (* ptr)(void)) { return ptr (); }
+
+ The following varying pieces of assembler are produced by the Thumb
+version of GCC depending upon the command line options used:
+
+ `-O2':
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __call_via_r0
+ pop {pc}
+
+ Note how the two functions have different exit sequences. In
+particular call() uses pop {pc} to return, which would not work if the
+caller was in ARM mode. func() however, uses the BX instruction, even
+though `-mthumb-interwork' has not been specified, as this is the most
+efficient way to exit a function when the return address is held in the
+link register.
+
+ `-O2 -mthumb-interwork':
+
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ This time both functions return by using the BX instruction. This
+means that call() is now two bytes longer and several cycles slower
+than the previous version.
+
+ `-O2 -mcaller-super-interworking':
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .thumb_func
+ _call:
+ push {lr}
+ bl __interwork_call_via_r0
+ pop {pc}
+
+ Very similar to the first (non-interworking) version, except that a
+different stub is used to call via the function pointer. This new stub
+will work even if the called function is not interworking aware, and
+tries to return to call() in ARM mode. Note that the assembly code for
+call() is still not interworking aware itself, and so should not be
+called from ARM code.
+
+ `-O2 -mcallee-super-interworking':
+
+ .code 32
+ .globl _func
+ _func:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_func
+ .thumb_func
+ .real_start_of_func:
+ mov r0, #1
+ bx lr
+
+ .code 32
+ .globl _call
+ _call:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_call
+ .thumb_func
+ .real_start_of_call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ Now both functions have an ARM coded prologue, and both functions
+return by using the BX instruction. These functions are interworking
+aware therefore and can safely be called from ARM code. The code for
+the call() function is now 10 bytes longer than the original, non
+interworking aware version, an increase of over 200%.
+
+ If a prototype for call() is added to the source code, and this
+prototype includes the `interfacearm' attribute:
+
+ int __attribute__((interfacearm)) call (int (* ptr)(void));
+
+ then this code is produced (with only -O2 specified on the command
+line):
+
+ .code 16
+ .globl _func
+ .thumb_func
+ _func:
+ mov r0, #1
+ bx lr
+
+ .globl _call
+ .code 32
+ _call:
+ orr r12, pc, #1
+ bx r12
+
+ .code 16
+ .globl .real_start_of_call
+ .thumb_func
+ .real_start_of_call:
+ push {lr}
+ bl __call_via_r0
+ pop {r1}
+ bx r1
+
+ So now both call() and func() can be safely called via
+non-interworking aware ARM code. If, when such a file is assembled,
+the assembler detects the fact that call() is being called by another
+function in the same file, it will automatically adjust the target of
+the BL instruction to point to .real_start_of_call. In this way there
+is no need for the linker to generate a Thumb-to-ARM calling stub so
+that call can be entered in ARM mode.
+
+
+10. How to use dlltool to build ARM/Thumb DLLs
+==============================================
+ Given a program (`prog.c') like this:
+
+ extern int func_in_dll (void);
+
+ int main (void) { return func_in_dll(); }
+
+ And a DLL source file (`dll.c') like this:
+
+ int func_in_dll (void) { return 1; }
+
+ Here is how to build the DLL and the program for a purely ARM based
+environment:
+
+*Step One
+ Build a `.def' file describing the DLL:
+
+ ; example.def
+ ; This file describes the contents of the DLL
+ LIBRARY example
+ HEAPSIZE 0x40000, 0x2000
+ EXPORTS
+ func_in_dll 1
+
+*Step Two
+ Compile the DLL source code:
+
+ arm-pe-gcc -O2 -c dll.c
+
+*Step Three
+ Use `dlltool' to create an exports file and a library file:
+
+ dlltool --def example.def --output-exp example.o --output-lib example.a
+
+*Step Four
+ Link together the complete DLL:
+
+ arm-pe-ld dll.o example.o -o example.dll
+
+*Step Five
+ Compile the program's source code:
+
+ arm-pe-gcc -O2 -c prog.c
+
+*Step Six
+ Link together the program and the DLL's library file:
+
+ arm-pe-gcc prog.o example.a -o prog
+
+ If instead this was a Thumb DLL being called from an ARM program, the
+steps would look like this. (To save space only those steps that are
+different from the previous version are shown):
+
+*Step Two
+ Compile the DLL source code (using the Thumb compiler):
+
+ thumb-pe-gcc -O2 -c dll.c -mthumb-interwork
+
+*Step Three
+ Build the exports and library files (and support interworking):
+
+ dlltool -d example.def -z example.o -l example.a --interwork -m thumb
+
+*Step Five
+ Compile the program's source code (and support interworking):
+
+ arm-pe-gcc -O2 -c prog.c -mthumb-interwork
+
+ If instead, the DLL was an old, ARM DLL which does not support
+interworking, and which cannot be rebuilt, then these steps would be
+used.
+
+*Step One
+ Skip. If you do not have access to the sources of a DLL, there is
+ no point in building a `.def' file for it.
+
+*Step Two
+ Skip. With no DLL sources there is nothing to compile.
+
+*Step Three
+ Skip. Without a `.def' file you cannot use dlltool to build an
+ exports file or a library file.
+
+*Step Four
+ Skip. Without a set of DLL object files you cannot build the DLL.
+ Besides it has already been built for you by somebody else.
+
+*Step Five
+ Compile the program's source code, this is the same as before:
+
+ arm-pe-gcc -O2 -c prog.c
+
+*Step Six
+ Link together the program and the DLL's library file, passing the
+ `--support-old-code' option to the linker:
+
+ arm-pe-gcc prog.o example.a -Wl,--support-old-code -o prog
+
+ Ignore the warning message about the input file not supporting
+ interworking as the --support-old-code switch has taken care if this.
diff --git a/gcc_arm/config/arm/aof.h b/gcc_arm/config/arm/aof.h
new file mode 100755
index 0000000..6c21850
--- /dev/null
+++ b/gcc_arm/config/arm/aof.h
@@ -0,0 +1,453 @@
+/* Definitions of target machine for GNU compiler, for Advanced RISC Machines
+ ARM compilation, AOF Assembler.
+ Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@armltd.co.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+
+#define AOF_ASSEMBLER
+
+#define LINK_LIBGCC_SPECIAL 1
+
+#define LINK_SPEC "%{aof} %{bin} %{aif} %{ihf} %{shl,*} %{reent*} %{split} \
+ %{ov*,*} %{reloc*} -nodebug"
+
+#define STARTFILE_SPEC "crtbegin.o%s"
+
+#define ENDFILE_SPEC "crtend.o%s"
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "%{g -g} -arch 4 \
+-apcs 3%{mapcs-32:/32bit}%{mapcs-26:/26bit}%{!mapcs-26:%{!macps-32:/26bit}}"
+#endif
+
+#ifndef LIB_SPEC
+#define LIB_SPEC "%{Eb: armlib_h.32b%s}%{!Eb: armlib_h.32l%s}"
+#endif
+
+#define LIBGCC_SPEC "libgcc.a%s"
+
+/* Dividing the Output into Sections (Text, Data, ...) */
+/* AOF Assembler syntax is a nightmare when it comes to areas, since once
+ we change from one area to another, we can't go back again. Instead,
+ we must create a new area with the same attributes and add the new output
+ to that. Unfortunately, there is nothing we can do here to guarantee that
+ two areas with the same attributes will be linked adjacently in the
+ resulting executable, so we have to be careful not to do pc-relative
+ addressing across such boundaries. */
+char *aof_text_section ();
+#define TEXT_SECTION_ASM_OP aof_text_section ()
+
+#define SELECT_RTX_SECTION(MODE,RTX) text_section ();
+
+char *aof_data_section ();
+#define DATA_SECTION_ASM_OP aof_data_section ()
+
+#define EXTRA_SECTIONS in_zero_init, in_ctor, in_dtor, in_common
+
+#define EXTRA_SECTION_FUNCTIONS \
+ZERO_INIT_SECTION \
+CTOR_SECTION \
+DTOR_SECTION \
+COMMON_SECTION
+
+#define ZERO_INIT_SECTION \
+void \
+zero_init_section () \
+{ \
+ static int zero_init_count = 1; \
+ if (in_section != in_zero_init) \
+ { \
+ fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", \
+ zero_init_count++); \
+ in_section = in_zero_init; \
+ } \
+}
+
+#define CTOR_SECTION \
+void \
+ctor_section () \
+{ \
+ static int ctors_once = 0; \
+ if (in_section != in_ctor) \
+ { \
+ if (ctors_once) \
+ { \
+ fprintf (stderr, \
+ "Attempt to output more than one ctor section\n"); \
+ abort (); \
+ } \
+ fprintf (asm_out_file, "\t%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctor; \
+ ctors_once = 1; \
+ } \
+}
+
+#define DTOR_SECTION \
+void \
+dtor_section () \
+{ \
+ static int dtors_once = 0; \
+ if (in_section != in_dtor) \
+ { \
+ if (dtors_once) \
+ { \
+ fprintf (stderr, \
+ "Attempt to output more than one dtor section\n"); \
+ abort (); \
+ } \
+ fprintf (asm_out_file, "\t%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtor; \
+ dtors_once = 1; \
+ } \
+}
+
+/* Used by ASM_OUTPUT_COMMON (below) to tell varasm.c that we've
+ changed areas. */
+#define COMMON_SECTION \
+void \
+common_section () \
+{ \
+ static int common_count = 1; \
+ if (in_section != in_common) \
+ { \
+ in_section = in_common; \
+ } \
+}
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+extern func_ptr __CTOR_END__[1]; \
+func_ptr __CTOR_LIST__[1] = {__CTOR_END__};
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DO_GLOBAL_CTORS_BODY \
+do { \
+ func_ptr *ptr = __CTOR_LIST__ + 1; \
+ while (*ptr) \
+ (*ptr++) (); \
+} while (0)
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+extern func_ptr __DTOR_END__[1]; \
+func_ptr __DTOR_LIST__[1] = {__DTOR_END__};
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define DO_GLOBAL_DTORS_BODY \
+do { \
+ func_ptr *ptr = __DTOR_LIST__ + 1; \
+ while (*ptr) \
+ (*ptr++) (); \
+} while (0)
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#ifndef ARM_OS_NAME
+#define ARM_OS_NAME "(generic)"
+#endif
+
+/* For the AOF linker, we need to reference __main to force the standard
+ library to get linked in. */
+
+#define ASM_FILE_START(STREAM) \
+{ \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for ARM/%s\n", \
+ ASM_COMMENT_START, version_string, ARM_OS_NAME); \
+ fprintf ((STREAM), "__a1\tRN\t0\n"); \
+ fprintf ((STREAM), "__a2\tRN\t1\n"); \
+ fprintf ((STREAM), "__a3\tRN\t2\n"); \
+ fprintf ((STREAM), "__a4\tRN\t3\n"); \
+ fprintf ((STREAM), "__v1\tRN\t4\n"); \
+ fprintf ((STREAM), "__v2\tRN\t5\n"); \
+ fprintf ((STREAM), "__v3\tRN\t6\n"); \
+ fprintf ((STREAM), "__v4\tRN\t7\n"); \
+ fprintf ((STREAM), "__v5\tRN\t8\n"); \
+ fprintf ((STREAM), "__v6\tRN\t9\n"); \
+ fprintf ((STREAM), "__sl\tRN\t10\n"); \
+ fprintf ((STREAM), "__fp\tRN\t11\n"); \
+ fprintf ((STREAM), "__ip\tRN\t12\n"); \
+ fprintf ((STREAM), "__sp\tRN\t13\n"); \
+ fprintf ((STREAM), "__lr\tRN\t14\n"); \
+ fprintf ((STREAM), "__pc\tRN\t15\n"); \
+ fprintf ((STREAM), "__f0\tFN\t0\n"); \
+ fprintf ((STREAM), "__f1\tFN\t1\n"); \
+ fprintf ((STREAM), "__f2\tFN\t2\n"); \
+ fprintf ((STREAM), "__f3\tFN\t3\n"); \
+ fprintf ((STREAM), "__f4\tFN\t4\n"); \
+ fprintf ((STREAM), "__f5\tFN\t5\n"); \
+ fprintf ((STREAM), "__f6\tFN\t6\n"); \
+ fprintf ((STREAM), "__f7\tFN\t7\n"); \
+ text_section (); \
+}
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define ASM_FILE_END(STREAM) \
+do \
+{ \
+ if (flag_pic) \
+ aof_dump_pic_table (STREAM); \
+ aof_dump_imports (STREAM); \
+ fputs ("\tEND\n", (STREAM)); \
+} while (0);
+
+#define ASM_IDENTIFY_GCC(STREAM) fputs ("|gcc2_compiled.|\n", (STREAM))
+
+#define ASM_COMMENT_START ";"
+
+#define ASM_APP_ON ""
+
+#define ASM_APP_OFF ""
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+ ASM_OUTPUT_DOUBLE((STREAM),(VALUE))
+
+#define ASM_OUTPUT_DOUBLE(STREAM,VALUE) \
+do { \
+ char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE ((VALUE), l); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.14g", dstr); \
+ fprintf ((STREAM), "\tDCD &%lx, &%lx\t%s double %s\n", \
+ l[0], l[1], ASM_COMMENT_START, dstr); \
+} while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM,VALUE) \
+do { \
+ char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), l); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.7g", dstr); \
+ fprintf ((STREAM), "\tDCD &%lx\t%s double %s\n", \
+ l, ASM_COMMENT_START, dstr); \
+} while (0)
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+ (fprintf ((STREAM), "\tDCD\t"), \
+ output_addr_const ((STREAM), (VALUE)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+ (fprintf ((STREAM), "\tDCW\t"), \
+ output_addr_const ((STREAM), (VALUE)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+ (fprintf ((STREAM), "\tDCB\t"), \
+ output_addr_const ((STREAM), (VALUE)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\tDCB\t%d\n", (VALUE))
+
+#define ASM_OUTPUT_ASCII(STREAM,PTR,LEN) \
+{ \
+ int i; \
+ char *ptr = (PTR); \
+ fprintf ((STREAM), "\tDCB"); \
+ for (i = 0; i < (LEN); i++) \
+ fprintf ((STREAM), " &%02x%s", \
+ (unsigned ) *(ptr++), \
+ (i + 1 < (LEN) \
+ ? ((i & 3) == 3 ? "\n\tDCB" : ",") \
+ : "\n")); \
+}
+
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == '\n')
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Output of Uninitialized Variables */
+
+#define ASM_OUTPUT_COMMON(STREAM,NAME,SIZE,ROUNDED) \
+ (common_section (), \
+ fprintf ((STREAM), "\tAREA "), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ", DATA, COMMON\n\t%% %d\t%s size=%d\n", \
+ (ROUNDED), ASM_COMMENT_START, SIZE))
+
+#define ASM_OUTPUT_LOCAL(STREAM,NAME,SIZE,ROUNDED) \
+ (zero_init_section (), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), "\n"), \
+ fprintf ((STREAM), "\t%% %d\t%s size=%d\n", \
+ (ROUNDED), ASM_COMMENT_START, SIZE))
+
+/* Output and Generation of Labels */
+
+extern int arm_main_function;
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+do { \
+ fprintf ((STREAM), "\tEXPORT\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fputc ('\n', (STREAM)); \
+ if ((NAME)[0] == 'm' && ! strcmp ((NAME), "main")) \
+ arm_main_function = 1; \
+} while (0)
+
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+do { \
+ assemble_name (STREAM,NAME); \
+ fputs ("\n", STREAM); \
+} while (0)
+
+#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) \
+{ \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ if (! TREE_PUBLIC (DECL)) \
+ { \
+ fputs ("\tKEEP ", STREAM); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ aof_delete_import ((NAME)); \
+}
+
+#define ASM_DECLARE_OBJECT_NAME(STREAM,NAME,DECL) \
+{ \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ if (! TREE_PUBLIC (DECL)) \
+ { \
+ fputs ("\tKEEP ", STREAM); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ } \
+ aof_delete_import ((NAME)); \
+}
+
+#define ASM_OUTPUT_EXTERNAL(STREAM,DECL,NAME) \
+ aof_add_import ((NAME))
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(STREAM,SYMREF) \
+ (fprintf ((STREAM), "\tIMPORT\t"), \
+ assemble_name ((STREAM), XSTR ((SYMREF), 0)), \
+ fputc ('\n', (STREAM)))
+
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "|%s|", NAME)
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*|%s..%d|", (PREFIX), (NUM))
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* How initialization functions are handled */
+
+#define CTORS_SECTION_ASM_OP "AREA\t|C$$gnu_ctorsvec|, DATA, READONLY"
+#define DTORS_SECTION_ASM_OP "AREA\t|C$$gnu_dtorsvec|, DATA, READONLY"
+
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctor_section (); \
+ fprintf ((STREAM), "\tDCD\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fputc ('\n', (STREAM)); \
+} while (0);
+
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtor_section (); \
+ fprintf ((STREAM), "\tDCD\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fputc ('\n', (STREAM)); \
+} while (0);
+
+/* Output of Assembler Instructions */
+
+#define REGISTER_NAMES \
+{ \
+ "a1", "a2", "a3", "a4", \
+ "v1", "v2", "v3", "v4", \
+ "v5", "v6", "sl", "fp", \
+ "ip", "sp", "lr", "pc", \
+ "f0", "f1", "f2", "f3", \
+ "f4", "f5", "f6", "f7", \
+ "cc", "sfp", "afp" \
+}
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"r0", 0}, {"a1", 0}, \
+ {"r1", 1}, {"a2", 1}, \
+ {"r2", 2}, {"a3", 2}, \
+ {"r3", 3}, {"a4", 3}, \
+ {"r4", 4}, {"v1", 4}, \
+ {"r5", 5}, {"v2", 5}, \
+ {"r6", 6}, {"v3", 6}, \
+ {"r7", 7}, {"wr", 7}, \
+ {"r8", 8}, {"v5", 8}, \
+ {"r9", 9}, {"v6", 9}, \
+ {"r10", 10}, {"sl", 10}, {"v7", 10}, \
+ {"r11", 11}, {"fp", 11}, \
+ {"r12", 12}, {"ip", 12}, \
+ {"r13", 13}, {"sp", 13}, \
+ {"r14", 14}, {"lr", 14}, \
+ {"r15", 15}, {"pc", 15} \
+}
+
+#define REGISTER_PREFIX "__"
+#define USER_LABEL_PREFIX ""
+#define LOCAL_LABEL_PREFIX ""
+
+/* Output of Dispatch Tables */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf ((STREAM), "\tb\t|L..%d|\n", (VALUE))
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf ((STREAM), "\tDCD\t|L..%d|\n", (VALUE))
+
+/* A label marking the start of a jump table is a data label. */
+#define ASM_OUTPUT_CASE_LABEL(STREAM,PREFIX,NUM,TABLE) \
+ fprintf ((STREAM), "\tALIGN\n|%s..%d|\n", (PREFIX), (NUM))
+
+/* Assembler Commands for Alignment */
+
+#define ASM_OUTPUT_SKIP(STREAM,NBYTES) \
+ fprintf ((STREAM), "\t%%\t%d\n", (NBYTES))
+
+#define ASM_OUTPUT_ALIGN(STREAM,POWER) \
+do { \
+ register int amount = 1 << (POWER); \
+ if (amount == 2) \
+ fprintf ((STREAM), "\tALIGN 2\n"); \
+ else if (amount == 4) \
+ fprintf ((STREAM), "\tALIGN\n"); \
+ else \
+ fprintf ((STREAM), "\tALIGN %d\n", amount); \
+} while (0)
+
+#include "arm/arm.h"
+
+#undef DBX_DEBUGGING_INFO
diff --git a/gcc_arm/config/arm/aout.h b/gcc_arm/config/arm/aout.h
new file mode 100755
index 0000000..42a12ea
--- /dev/null
+++ b/gcc_arm/config/arm/aout.h
@@ -0,0 +1,323 @@
+/* Definitions of target machine for GNU compiler, for ARM with a.out
+ Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rearnsha@armltd.co.uk).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef ARM_OS_NAME
+#define ARM_OS_NAME "(generic)"
+#endif
+
+/* The text to go at the start of the assembler file */
+#ifndef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+{ \
+ fprintf (STREAM,"%srfp\t.req\t%sr9\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%ssl\t.req\t%sr10\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%sfp\t.req\t%sr11\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%sip\t.req\t%sr12\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%ssp\t.req\t%sr13\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%slr\t.req\t%sr14\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf (STREAM,"%spc\t.req\t%sr15\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+}
+#endif
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF ""
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* Note: If USER_LABEL_PREFIX or LOCAL_LABEL_PREFIX are changed,
+ make sure that this change is reflected in the function
+ coff_arm_is_local_label_name() in bfd/coff-arm.c */
+#ifndef REGISTER_PREFIX
+#define REGISTER_PREFIX ""
+#endif
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+#endif
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX ""
+#endif
+
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "cc", "sfp", "afp" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"rfp", 9}, /* Gcc used to call it this */ \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* Arm Assembler barfs on dollars */
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL
+
+/* DBX register number for a given compiler register number */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Generate DBX debugging information. riscix.h will undefine this because
+ the native assembler does not support stabs. */
+#define DBX_DEBUGGING_INFO 1
+
+/* Acorn dbx moans about continuation chars, so don't use any. */
+#ifndef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 0
+#endif
+
+/* Output a source filename for the debugger. RISCiX dbx insists that the
+ ``desc'' field is set to compiler version number >= 315 (sic). */
+#define DBX_OUTPUT_MAIN_SOURCE_FILENAME(STREAM,NAME) \
+do { \
+ fprintf (STREAM, ".stabs \"%s\",%d,0,315,%s\n", (NAME), N_SO, \
+ &ltext_label_name[1]); \
+ text_section (); \
+ ASM_OUTPUT_INTERNAL_LABEL (STREAM, "Ltext", 0); \
+} while (0)
+
+/* Output a function label definition. */
+#ifndef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) ASM_OUTPUT_LABEL (STREAM, NAME)
+#endif
+
+#ifndef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+do { \
+ assemble_name (STREAM,NAME); \
+ fputs (":\n", STREAM); \
+} while (0)
+#endif
+
+/* Output a globalising directive for a label. */
+#ifndef ASM_GLOBALIZE_LABEL
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf (STREAM, "\t.global\t"), \
+ assemble_name (STREAM, NAME), \
+ fputc ('\n',STREAM))
+#endif
+
+/* Make an internal label into a string. */
+#ifndef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
+ sprintf (STRING, "*%s%s%d", LOCAL_LABEL_PREFIX, PREFIX, NUM)
+#endif
+
+/* Nothing special is done about jump tables */
+/* #define ASM_OUTPUT_CASE_LABEL(STREAM,PREFIX,NUM,TABLE) */
+/* #define ASM_OUTPUT_CASE_END(STREAM,NUM,TABLE) */
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", LOCAL_LABEL_PREFIX, (VALUE))
+
+/* Output various types of constants. For real numbers we output hex, with
+ a comment containing the "human" value, this allows us to pass NaN's which
+ the riscix assembler doesn't understand (it also makes cross-assembling
+ less likely to fail). */
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (12); \
+ /* END CYGNUS LOCAL */ \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (8); \
+ /* END CYGNUS LOCAL */ \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+#define ASM_OUTPUT_INT(STREAM, EXP) \
+ { \
+ fprintf (STREAM, "\t.word\t"); \
+ OUTPUT_INT_ADDR_CONST (STREAM, (EXP)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4), \
+ /* END CYGNUS LOCAL */ \
+ fputc ('\n', STREAM); \
+ }
+
+#define ASM_OUTPUT_SHORT(STREAM, EXP) \
+ (fprintf (STREAM, "\t.short\t"), \
+ output_addr_const (STREAM, (EXP)), \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (2), \
+ /* END CYGNUS LOCAL */ \
+ fputc ('\n', STREAM))
+
+#define ASM_OUTPUT_CHAR(STREAM, EXP) \
+ (fprintf (STREAM, "\t.byte\t"), \
+ output_addr_const (STREAM, (EXP)), \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (1), \
+ /* END CYGNUS LOCAL */ \
+ fputc ('\n', STREAM))
+
+#define ASM_OUTPUT_BYTE(STREAM, VALUE) \
+ /* CYGNUS LOCAL */ \
+ (fprintf (STREAM, "\t.byte\t%d\n", VALUE), \
+ arm_increase_location (1))
+ /* END CYGNUS LOCAL */
+
+#define ASM_OUTPUT_ASCII(STREAM, PTR, LEN) \
+ output_ascii_pseudo_op ((STREAM), (unsigned char *)(PTR), (LEN))
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ /* CYGNUS LOCAL */ \
+ (arm_increase_location (NBYTES), \
+ fprintf (STREAM, "\t.space\t%d\n", NBYTES)) \
+ /* END CYGNUS LOCAL */
+
+/* Align output to a power of two. Horrible /bin/as. */
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do \
+ { \
+ register int amount = 1 << (POWER); \
+ /* CYGNUS LOCAL */ \
+ extern int arm_text_location; \
+ /* END CYGNUS LOCAL */ \
+ \
+ if (amount == 2) \
+ fprintf (STREAM, "\t.even\n"); \
+ else if (amount != 1) \
+ fprintf (STREAM, "\t.align\t%d\n", amount - 4); \
+ \
+ /* CYGNUS LOCAL */ \
+ if (in_text_section ()) \
+ arm_text_location = ((arm_text_location + amount - 1) \
+ & ~(amount - 1)); \
+ /* END CYGNUS LOCAL */ \
+ } while (0)
+
+/* Output a common block */
+#ifndef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf (STREAM, "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf (STREAM, ", %d\t%s %d\n", ROUNDED, ASM_COMMENT_START, SIZE))
+#endif
+
+/* Output a local common block. /bin/as can't do this, so hack a
+ `.space' into the bss segment. Note that this is *bad* practice,
+ which is guaranteed NOT to work since it doesn't define STATIC
+ COMMON space but merely STATIC BSS space. */
+#ifndef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM,NAME,SIZE,ALIGN) \
+ do { \
+ bss_section (); \
+ ASM_OUTPUT_ALIGN (STREAM, floor_log2 (ALIGN / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (STREAM, NAME); \
+ fprintf (STREAM, "\t.space\t%d\n", SIZE); \
+ } while (0)
+#endif
+
+/* Output a zero-initialized block. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(STREAM,DECL,NAME,SIZE,ALIGN) \
+ asm_output_aligned_bss (STREAM, DECL, NAME, SIZE, ALIGN)
+#endif
+
+/* Output a source line for the debugger. */
+/* #define ASM_OUTPUT_SOURCE_LINE(STREAM,LINE) */
+
+/* Output a #ident directive. */
+#ifndef ASM_OUTPUT_IDENT
+#define ASM_OUTPUT_IDENT(STREAM,STRING) \
+ fprintf (STREAM, "%s - - - ident %s\n", ASM_COMMENT_START, STRING)
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* This works for GAS and some other assemblers. */
+#define SET_ASM_OP ".set"
+
+#include "arm/arm.h"
diff --git a/gcc_arm/config/arm/arm.c b/gcc_arm/config/arm/arm.c
new file mode 100755
index 0000000..06d942a
--- /dev/null
+++ b/gcc_arm/config/arm/arm.c
@@ -0,0 +1,7001 @@
+/* Output routines for GCC for ARM.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rearnsha@arm.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include <stdio.h>
+#include <string.h>
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "reload.h"
+#include "tree.h"
+#include "expr.h"
+#include "toplev.h"
+
+/* The maximum number of insns skipped which will be conditionalised if
+ possible. */
+static int max_insns_skipped = 5;
+
+extern FILE *asm_out_file;
+/* Some function declarations. */
+
+/* CYGNUS LOCAL */
+void arm_increase_location PROTO ((int));
+static int get_prologue_size PROTO ((void));
+/* END CYGNUS LOCAL */
+
+static HOST_WIDE_INT int_log2 PROTO ((HOST_WIDE_INT));
+static char *output_multi_immediate PROTO ((rtx *, char *, char *, int,
+ HOST_WIDE_INT));
+static int arm_gen_constant PROTO ((enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, rtx, rtx, int, int));
+static int arm_naked_function_p PROTO ((tree));
+static void init_fpa_table PROTO ((void));
+static enum machine_mode select_dominance_cc_mode PROTO ((enum rtx_code, rtx,
+ rtx, HOST_WIDE_INT));
+static HOST_WIDE_INT add_constant PROTO ((rtx, enum machine_mode, int *));
+static void dump_table PROTO ((rtx));
+static int fixit PROTO ((rtx, enum machine_mode, int));
+static rtx find_barrier PROTO ((rtx, int));
+static int broken_move PROTO ((rtx));
+static char *fp_const_from_val PROTO ((REAL_VALUE_TYPE *));
+static int eliminate_lr2ip PROTO ((rtx *));
+static char *shift_op PROTO ((rtx, HOST_WIDE_INT *));
+static int pattern_really_clobbers_lr PROTO ((rtx));
+static int function_really_clobbers_lr PROTO ((rtx));
+static void emit_multi_reg_push PROTO ((int));
+static void emit_sfm PROTO ((int, int));
+static enum arm_cond_code get_arm_condition_code PROTO ((rtx));
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. */
+
+rtx arm_compare_op0, arm_compare_op1;
+int arm_compare_fp;
+
+/* CYGNUS LOCAL: Definition of arm_cpu deleted. */
+
+/* What type of floating point are we tuning for? */
+enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available? */
+enum floating_point_type arm_fpu_arch;
+
+/* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
+enum prog_mode_type arm_prgmode;
+
+/* CYGNUS LOCAL: Name changed to fpe. */
+/* Set by the -mfpe=... option */
+char *target_fpe_name = NULL;
+/* END CYGNUS LOCAL */
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+/* Bit values used to identify processor capabilities. */
+#define FL_CO_PROC 0x01 /* Has external co-processor bus */
+#define FL_FAST_MULT 0x02 /* Fast multiply */
+#define FL_MODE26 0x04 /* 26-bit mode support */
+#define FL_MODE32 0x08 /* 32-bit mode support */
+#define FL_ARCH4 0x10 /* Architecture rel 4 */
+#define FL_THUMB 0x20 /* Thumb aware */
+#define FL_LDSCHED 0x40 /* Load scheduling necessary */
+#define FL_STRONG 0x80 /* StrongARM */
+
+/* The bits in this mask specify which instructions we are allowed to generate. */
+static int insn_flags = 0;
+/* The bits in this mask specify which instruction scheduling options should
+ be used. Note - there is an overlap with the FL_FAST_MULT. For some
+ hardware we want to be able to generate the multiply instructions, but to
+ tune as if they were not present in the architecture. */
+static int tune_flags = 0;
+
+/* The following are used in the arm.md file as equivalents to bits
+ in the above two flag variables. */
+
+/* Nonzero if this is an "M" variant of the processor. */
+int arm_fast_multiply = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+int arm_arch4 = 0;
+
+/* Nonzero if this chip can benefit from load scheduling. */
+int arm_ld_sched = 0;
+
+/* Nonzero if this chip is a StrongARM. */
+int arm_is_strong = 0;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+int arm_is_6_or_7 = 0;
+
+/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
+ must report the mode of the memory reference from PRINT_OPERAND to
+ PRINT_OPERAND_ADDRESS. */
+enum machine_mode output_memory_reference_mode;
+
+/* Nonzero if the prologue must setup `fp'. */
+int current_function_anonymous_args;
+
+/* The register number to be used for the PIC offset register. */
+int arm_pic_register = 9;
+
+/* Location counter of .text segment. */
+int arm_text_location = 0;
+
+/* Set to one if we think that lr is only saved because of subroutine calls,
+ but all of these can be `put after' return insns */
+int lr_save_eliminated;
+
+/* Set to 1 when a return insn is output, this means that the epilogue
+ is not needed. */
+
+static int return_used_this_function;
+
+/* Set to 1 after arm_reorg has started. Reset to start at the start of
+ the next function. */
+static int after_arm_reorg = 0;
+
+/* The maximum number of insns to be used when loading a constant. */
+static int arm_constant_limit = 3;
+
+/* CYGNUS LOCAL unknown */
+/* A hash table is used to store text segment labels and their associated
+ offset from the start of the text segment. */
+struct label_offset
+{
+ char * name;
+ int offset;
+ struct label_offset * cdr;
+};
+
+#define LABEL_HASH_SIZE 257
+
+static struct label_offset * offset_table [LABEL_HASH_SIZE];
+/* END CYGNUS LOCAL */
+
+/* For an explanation of these variables, see final_prescan_insn below. */
+int arm_ccfsm_state;
+enum arm_cond_code arm_current_cc;
+rtx arm_target_insn;
+int arm_target_label;
+
+/* The condition codes of the ARM, and the inverse function. */
+char *arm_condition_codes[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
+};
+
+static enum arm_cond_code get_arm_condition_code ();
+
+
+/* Initialization code */
+
+struct processors
+{
+ char * name;
+ unsigned int flags;
+};
+
+/* Not all of these give usefully different compilation alternatives,
+ but there is no simple way of generalizing them. */
+static struct processors all_cores[] =
+{
+ /* ARM Cores */
+
+ {"arm2", FL_CO_PROC | FL_MODE26 },
+ {"arm250", FL_CO_PROC | FL_MODE26 },
+ {"arm3", FL_CO_PROC | FL_MODE26 },
+ {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm610", FL_MODE26 | FL_MODE32 },
+ {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT }, /* arm7m doesn't exist on its own, */
+ {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* but only with D, (and I), */
+ {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT }, /* but those don't alter the code, */
+ {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* so arm7m is sometimes used. */
+ {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
+ {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm710", FL_MODE26 | FL_MODE32 },
+ {"arm710c", FL_MODE26 | FL_MODE32 },
+ {"arm7100", FL_MODE26 | FL_MODE32 },
+ {"arm7500", FL_MODE26 | FL_MODE32 },
+ {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* Doesn't really have an external co-proc, but does have embedded fpu. */
+ {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
+ {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+
+ {NULL, 0}
+};
+
+static struct processors all_architectures[] =
+{
+ /* ARM Architectures */
+
+ {"armv2", FL_CO_PROC | FL_MODE26 },
+ {"armv2a", FL_CO_PROC | FL_MODE26 },
+ {"armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
+ {"armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
+ /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
+ implementations that support it, so we will leave it out for now. */
+ {"armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
+ {NULL, 0}
+};
+
+/* This is a magic stucture. The 'string' field is magically filled in
+ with a pointer to the value specified by the user on the command line
+ assuming that the user has specified such a value. */
+
+struct arm_cpu_select arm_select[] =
+{
+ /* string name processors */
+ { NULL, "-mcpu=", all_cores },
+ { NULL, "-march=", all_architectures },
+ { NULL, "-mtune=", all_cores }
+};
+
+/* Return the number of bits set in value' */
+static unsigned int
+bit_count (value)
+ signed int value;
+{
+ unsigned int count = 0;
+
+ while (value)
+ {
+ value &= ~(value & - value);
+ ++ count;
+ }
+
+ return count;
+}
+
+/* Fix up any incompatible options that the user has specified.
+ This has now turned into a maze. */
+void
+arm_override_options ()
+{
+ unsigned i;
+
+ /* Set up the flags based on the cpu/architecture selected by the user. */
+ for (i = sizeof (arm_select) / sizeof (arm_select[0]); i--;)
+ {
+ struct arm_cpu_select * ptr = arm_select + i;
+
+ if (ptr->string != NULL && ptr->string[0] != '\0')
+ {
+ const struct processors * sel;
+
+ for (sel = ptr->processors; sel->name != NULL; sel ++)
+ if (! strcmp (ptr->string, sel->name))
+ {
+ if (i == 2)
+ tune_flags = sel->flags;
+ else
+ {
+ /* If we have been given an architecture and a processor
+ make sure that they are compatible. We only generate
+ a warning though, and we prefer the CPU over the
+ architecture. */
+ if (insn_flags != 0 && (insn_flags ^ sel->flags))
+ warning ("switch -mcpu=%s conflicts with -march= switch",
+ ptr->string);
+
+ insn_flags = sel->flags;
+ }
+
+ break;
+ }
+
+ if (sel->name == NULL)
+ error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+ }
+ }
+
+ /* If the user did not specify a processor, choose one for them. */
+ if (insn_flags == 0)
+ {
+ struct processors * sel;
+ unsigned int sought;
+ static struct cpu_default
+ {
+ int cpu;
+ char * name;
+ }
+ cpu_defaults[] =
+ {
+ { TARGET_CPU_arm2, "arm2" },
+ { TARGET_CPU_arm6, "arm6" },
+ { TARGET_CPU_arm610, "arm610" },
+ { TARGET_CPU_arm710, "arm710" },
+ { TARGET_CPU_arm7m, "arm7m" },
+ { TARGET_CPU_arm7500fe, "arm7500fe" },
+ { TARGET_CPU_arm7tdmi, "arm7tdmi" },
+ { TARGET_CPU_arm8, "arm8" },
+ { TARGET_CPU_arm810, "arm810" },
+ { TARGET_CPU_arm9, "arm9" },
+ { TARGET_CPU_strongarm, "strongarm" },
+ { TARGET_CPU_generic, "arm" },
+ { 0, 0 }
+ };
+ struct cpu_default * def;
+
+ /* Find the default. */
+ for (def = cpu_defaults; def->name; def ++)
+ if (def->cpu == TARGET_CPU_DEFAULT)
+ break;
+
+ /* Make sure we found the default CPU. */
+ if (def->name == NULL)
+ abort ();
+
+ /* Find the default CPU's flags. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if (! strcmp (def->name, sel->name))
+ break;
+
+ if (sel->name == NULL)
+ abort ();
+
+ insn_flags = sel->flags;
+
+ /* Now check to see if the user has specified some command line
+ switch that require certain abilities from the cpu. */
+ sought = 0;
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ sought |= (FL_THUMB | FL_MODE32);
+
+ /* Force apcs-32 to be used for interworking. */
+ target_flags |= ARM_FLAG_APCS_32;
+
+ /* There are no ARM processor that supports both APCS-26 and
+ interworking. Therefore we force FL_MODE26 to be removed
+ from insn_flags here (if it was set), so that the search
+ below will always be able to find a compatible processor. */
+ insn_flags &= ~ FL_MODE26;
+ }
+
+ if (! TARGET_APCS_32)
+ sought |= FL_MODE26;
+
+ if (sought != 0 && ((sought & insn_flags) != sought))
+ {
+ /* Try to locate a CPU type that supports all of the abilities
+ of the default CPU, plus the extra abilities requested by
+ the user. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if ((sel->flags & sought) == (sought | insn_flags))
+ break;
+
+ if (sel->name == NULL)
+ {
+ unsigned int current_bit_count = 0;
+ struct processors * best_fit = NULL;
+
+ /* Ideally we would like to issue an error message here
+ saying that it was not possible to find a CPU compatible
+ with the default CPU, but which also supports the command
+ line options specified by the programmer, and so they
+ ought to use the -mcpu=<name> command line option to
+ override the default CPU type.
+
+ Unfortunately this does not work with multilibing. We
+ need to be able to support multilibs for -mapcs-26 and for
+ -mthumb-interwork and there is no CPU that can support both
+ options. Instead if we cannot find a cpu that has both the
+ characteristics of the default cpu and the given command line
+ options we scan the array again looking for a best match. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if ((sel->flags & sought) == sought)
+ {
+ unsigned int count;
+
+ count = bit_count (sel->flags & insn_flags);
+
+ if (count >= current_bit_count)
+ {
+ best_fit = sel;
+ current_bit_count = count;
+ }
+ }
+
+ if (best_fit == NULL)
+ abort ();
+ else
+ sel = best_fit;
+ }
+
+ insn_flags = sel->flags;
+ }
+ }
+
+ /* If tuning has not been specified, tune for whichever processor or
+ architecture has been selected. */
+ if (tune_flags == 0)
+ tune_flags = insn_flags;
+
+ /* Make sure that the processor choice does not conflict with any of the
+ other command line choices. */
+ if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
+ {
+ /* If APCS-32 was not the default then it must have been set by the
+ user, so issue a warning message. If the user has specified
+ "-mapcs-32 -mcpu=arm2" then we loose here. */
+ if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
+ warning ("target CPU does not support APCS-32" );
+ target_flags &= ~ ARM_FLAG_APCS_32;
+ }
+ else if (! TARGET_APCS_32 && !(insn_flags & FL_MODE26))
+ {
+ warning ("target CPU does not support APCS-26" );
+ target_flags |= ARM_FLAG_APCS_32;
+ }
+
+ if (TARGET_THUMB_INTERWORK && !(insn_flags & FL_THUMB))
+ {
+ warning ("target CPU does not support interworking" );
+ target_flags &= ~ARM_FLAG_THUMB;
+ }
+
+ /* If interworking is enabled then APCS-32 must be selected as well. */
+ if (TARGET_THUMB_INTERWORK)
+ {
+ if (! TARGET_APCS_32)
+ warning ("interworking forces APCS-32 to be used" );
+ target_flags |= ARM_FLAG_APCS_32;
+ }
+
+ if (TARGET_APCS_STACK && ! TARGET_APCS)
+ {
+ warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
+ target_flags |= ARM_FLAG_APCS_FRAME;
+ }
+
+ if (write_symbols != NO_DEBUG && flag_omit_frame_pointer)
+ warning ("-g with -fomit-frame-pointer may not give sensible debugging");
+
+ if (TARGET_POKE_FUNCTION_NAME)
+ target_flags |= ARM_FLAG_APCS_FRAME;
+
+ if (TARGET_APCS_REENT && flag_pic)
+ fatal ("-fpic and -mapcs-reent are incompatible");
+
+ if (TARGET_APCS_REENT)
+ warning ("APCS reentrant code not supported. Ignored");
+
+ /* If stack checking is disabled, we can use r10 as the PIC register,
+ which keeps r9 available. */
+ if (flag_pic && ! TARGET_APCS_STACK)
+ arm_pic_register = 10;
+
+ /* Well, I'm about to have a go, but pic is NOT going to be compatible
+ with APCS reentrancy, since that requires too much support in the
+ assembler and linker, and the ARMASM assembler seems to lack some
+ required directives. */
+ if (flag_pic)
+ warning ("Position independent code not supported");
+
+ if (TARGET_APCS_FLOAT)
+ warning ("Passing floating point arguments in fp regs not yet supported");
+
+ /* Initialise boolean versions of the flags, for use in the arm.md file. */
+ arm_fast_multiply = insn_flags & FL_FAST_MULT;
+ arm_arch4 = insn_flags & FL_ARCH4;
+
+ arm_ld_sched = tune_flags & FL_LDSCHED;
+ arm_is_strong = tune_flags & FL_STRONG;
+ arm_is_6_or_7 = ((tune_flags & (FL_MODE26 | FL_MODE32))
+ && !(tune_flags & FL_ARCH4));
+
+ /* Default value for floating point code... if no co-processor
+ bus, then schedule for emulated floating point. Otherwise,
+ assume the user has an FPA.
+ Note: this does not prevent use of floating point instructions,
+ -msoft-float does that. */
+ arm_fpu = (tune_flags & FL_CO_PROC) ? FP_HARD : FP_SOFT3;
+
+ if (target_fpe_name)
+ {
+ if (! strcmp (target_fpe_name, "2"))
+ arm_fpu_arch = FP_SOFT2;
+ else if (! strcmp (target_fpe_name, "3"))
+ arm_fpu_arch = FP_SOFT3;
+ else
+ fatal ("Invalid floating point emulation option: -mfpe-%s",
+ target_fpe_name);
+ }
+ else
+ arm_fpu_arch = FP_DEFAULT;
+
+ if (TARGET_FPE && arm_fpu != FP_HARD)
+ arm_fpu = FP_SOFT2;
+
+ /* For arm2/3 there is no need to do any scheduling if there is only
+ a floating point emulator, or we are doing software floating-point. */
+ if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD) && (tune_flags & FL_MODE32) == 0)
+ flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+
+ arm_prog_mode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
+
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ /* If optimizing for space, don't synthesize constants.
+ For processors with load scheduling, it never costs more than 2 cycles
+ to load a constant, and the load scheduler may well reduce that to 1. */
+ if (optimize_size || (tune_flags & FL_LDSCHED))
+ arm_constant_limit = 1;
+
+ /* If optimizing for size, bump the number of instructions that we
+ are prepared to conditionally execute (even on a StrongARM).
+ Otherwise for the StrongARM, which has early execution of branches,
+ a sequence that is worth skipping is shorter. */
+ if (optimize_size)
+ max_insns_skipped = 6;
+ else if (arm_is_strong)
+ max_insns_skipped = 3;
+}
+
+
+/* Return 1 if it is possible to return using a single instruction */
+
+int
+use_return_insn (iscond)
+ int iscond;
+{
+ int regno;
+
+ if (!reload_completed ||current_function_pretend_args_size
+ || current_function_anonymous_args
+ || ((get_frame_size () + current_function_outgoing_args_size != 0)
+ /* CYGNUS LOCAL nickc */
+ && !(TARGET_APCS && frame_pointer_needed)))
+ /* END CYGNUS LOCAL */
+ return 0;
+
+ /* Can't be done if interworking with Thumb, and any registers have been
+ stacked. Similarly, on StrongARM, conditional returns are expensive
+ if they aren't taken and registers have been stacked. */
+ if (iscond && arm_is_strong && frame_pointer_needed)
+ return 0;
+ if ((iscond && arm_is_strong)
+ || TARGET_THUMB_INTERWORK)
+ for (regno = 0; regno < 16; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ /* Can't be done if any of the FPU regs are pushed, since this also
+ requires an insn */
+ for (regno = 16; regno < 24; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return 0;
+
+ return 1;
+}
+
+/* Return TRUE if int I is a valid immediate ARM constant. */
+
+int
+const_ok_for_arm (i)
+ HOST_WIDE_INT i;
+{
+ unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
+
+ /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
+ be all zero, or all one. */
+ if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
+ && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
+ != ((~(unsigned HOST_WIDE_INT) 0)
+ & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
+ return FALSE;
+
+ /* Fast return for 0 and powers of 2 */
+ if ((i & (i - 1)) == 0)
+ return TRUE;
+
+ do
+ {
+ if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
+ return TRUE;
+ mask =
+ (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
+ >> (32 - 2)) | ~((unsigned HOST_WIDE_INT) 0xffffffff);
+ } while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
+
+ return FALSE;
+}
+
+/* Return true if I is a valid constant for the operation CODE. */
+int
+const_ok_for_op (i, code, mode)
+ HOST_WIDE_INT i;
+ enum rtx_code code;
+ enum machine_mode mode;
+{
+ if (const_ok_for_arm (i))
+ return 1;
+
+ switch (code)
+ {
+ case PLUS:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
+
+ case MINUS: /* Should only occur with (MINUS I reg) => rsb */
+ case XOR:
+ case IOR:
+ return 0;
+
+ case AND:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
+
+ default:
+ abort ();
+ }
+}
+
+/* Emit a sequence of insns to handle a large constant.
+ CODE is the code of the operation required, it can be any of SET, PLUS,
+ IOR, AND, XOR, MINUS;
+ MODE is the mode in which the operation is being performed;
+ VAL is the integer to operate on;
+ SOURCE is the other operand (a register, or a null-pointer for SET);
+ SUBTARGETS means it is safe to create scratch registers if that will
+ either produce a simpler sequence, or we will want to cse the values.
+ Return value is the number of insns emitted. */
+
+int
+arm_split_constant (code, mode, val, target, source, subtargets)
+ enum rtx_code code;
+ enum machine_mode mode;
+ HOST_WIDE_INT val;
+ rtx target;
+ rtx source;
+ int subtargets;
+{
+ if (subtargets || code == SET
+ || (GET_CODE (target) == REG && GET_CODE (source) == REG
+ && REGNO (target) != REGNO (source)))
+ {
+ /* After arm_reorg has been called, we can't fix up expensive
+ constants by pushing them into memory so we must synthesise
+ them in-line, regardless of the cost. This is only likely to
+ be more costly on chips that have load delay slots and we are
+ compiling without running the scheduler (so no splitting
+ occurred before the final instruction emission).
+
+ Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
+ */ /* CYGNUS LOCAL nickc/strongarm */
+ if ((! after_arm_reorg || optimize == 0)
+ /* END CYGNUS LOCAL */
+ && (arm_gen_constant (code, mode, val, target, source, 1, 0)
+ > arm_constant_limit + (code != SET)))
+ {
+ if (code == SET)
+ {
+ /* Currently SET is the only monadic value for CODE, all
+ the rest are diadic. */
+ emit_insn (gen_rtx (SET, VOIDmode, target, GEN_INT (val)));
+ return 1;
+ }
+ else
+ {
+ rtx temp = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_insn (gen_rtx (SET, VOIDmode, temp, GEN_INT (val)));
+ /* For MINUS, the value is subtracted from, since we never
+ have subtraction of a constant. */
+ if (code == MINUS)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, temp, source)));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, source, temp)));
+ return 2;
+ }
+ }
+ }
+
+ return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
+}
+
+/* As above, but extra parameter GENERATE which, if clear, suppresses
+ RTL generation. */
+int
+arm_gen_constant (code, mode, val, target, source, subtargets, generate)
+ enum rtx_code code;
+ enum machine_mode mode;
+ HOST_WIDE_INT val;
+ rtx target;
+ rtx source;
+ int subtargets;
+ int generate;
+{
+ int can_invert = 0;
+ int can_negate = 0;
+ int can_negate_initial = 0;
+ int can_shift = 0;
+ int i;
+ int num_bits_set = 0;
+ int set_sign_bit_copies = 0;
+ int clear_sign_bit_copies = 0;
+ int clear_zero_bit_copies = 0;
+ int set_zero_bit_copies = 0;
+ int insns = 0;
+ unsigned HOST_WIDE_INT temp1, temp2;
+ unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
+
+ /* find out which operations are safe for a given CODE. Also do a quick
+ check for degenerate cases; these can occur when DImode operations
+ are split. */
+ switch (code)
+ {
+ case SET:
+ can_invert = 1;
+ can_shift = 1;
+ can_negate = 1;
+ break;
+
+ case PLUS:
+ can_negate = 1;
+ can_negate_initial = 1;
+ break;
+
+ case IOR:
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ GEN_INT (ARM_SIGN_EXTEND (val))));
+ return 1;
+ }
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ break;
+
+ case AND:
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, const0_rtx));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ can_invert = 1;
+ break;
+
+ case XOR:
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode, source)));
+ return 1;
+ }
+
+ /* We don't know how to handle this yet below. */
+ abort ();
+
+ case MINUS:
+ /* We treat MINUS as (val - source), since (source - val) is always
+ passed as (source + (-val)). */
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NEG, mode, source)));
+ return 1;
+ }
+ if (const_ok_for_arm (val))
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (MINUS, mode, GEN_INT (val), source)));
+ return 1;
+ }
+ can_negate = 1;
+
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* If we can do it in one insn get out quickly */
+ if (const_ok_for_arm (val)
+ || (can_negate_initial && const_ok_for_arm (-val))
+ || (can_invert && const_ok_for_arm (~val)))
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ (source ? gen_rtx (code, mode, source,
+ GEN_INT (val))
+ : GEN_INT (val))));
+ return 1;
+ }
+
+
+ /* Calculate a few attributes that may be useful for specific
+ optimizations. */
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_zero_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_zero_bit_copies++;
+ else
+ break;
+ }
+
+ switch (code)
+ {
+ case SET:
+ /* See if we can do this by sign_extending a constant that is known
+ to be negative. This is a good, way of doing it, since the shift
+ may well merge into a subsequent insn. */
+ if (set_sign_bit_copies > 1)
+ {
+ if (const_ok_for_arm
+ (temp1 = ARM_SIGN_EXTEND (remainder
+ << (set_sign_bit_copies - 1))))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_insn (gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ /* For an inverted constant, we will need to set the low bits,
+ these will be shifted out of harm's way. */
+ temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
+ if (const_ok_for_arm (~temp1))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_insn (gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ }
+
+ /* See if we can generate this by setting the bottom (or the top)
+ 16 bits, and then shifting these into the other half of the
+ word. We only look for the simplest cases, to do more would cost
+ too much. Be careful, however, not to generate this when the
+ alternative would take fewer insns. */
+ if (val & 0xffff0000)
+ {
+ temp1 = remainder & 0xffff0000;
+ temp2 = remainder & 0x0000ffff;
+
+ /* Overlaps outside this range are best done using other methods. */
+ for (i = 9; i < 24; i++)
+ {
+ if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
+ && ! const_ok_for_arm (temp2))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, temp2, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (IOR, mode,
+ gen_rtx (ASHIFT, mode, source,
+ GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+
+ /* Don't duplicate cases already considered. */
+ for (i = 17; i < 24; i++)
+ {
+ if (((temp1 | (temp1 >> i)) == remainder)
+ && ! const_ok_for_arm (temp1))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, temp1, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (IOR, mode,
+ gen_rtx (LSHIFTRT, mode,
+ source, GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+ }
+ break;
+
+ case IOR:
+ case XOR:
+ /* If we have IOR or XOR, and the constant can be loaded in a
+ single instruction, and we can find a temporary to put it in,
+ then this can be done in two instructions instead of 3-4. */
+ if (subtargets
+ /* TARGET can't be NULL if SUBTARGETS is 0 */
+ || (reload_completed && ! reg_mentioned_p (target, source)))
+ {
+ if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub, GEN_INT (val)));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, source, sub)));
+ }
+ return 2;
+ }
+ }
+
+ if (code == XOR)
+ break;
+
+ if (set_sign_bit_copies > 8
+ && (val & (-1 << (32 - set_sign_bit_copies))) == val)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_sign_bit_copies);
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode,
+ gen_rtx (ASHIFT, mode, source,
+ shift))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode,
+ gen_rtx (LSHIFTRT, mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (set_zero_bit_copies > 8
+ && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_zero_bit_copies);
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode,
+ gen_rtx (LSHIFTRT, mode, source,
+ shift))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode,
+ gen_rtx (ASHIFT, mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~ val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode, source)));
+ source = sub;
+ if (subtargets)
+ sub = gen_reg_rtx (mode);
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (AND, mode, source,
+ GEN_INT (temp1))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode, sub)));
+ }
+ return 3;
+ }
+ break;
+
+ case AND:
+ /* See if two shifts will do 2 or more insn's worth of work. */
+ if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = ((0xffffffff
+ << (32 - clear_sign_bit_copies))
+ & 0xffffffff);
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_sign_bit_copies);
+
+ emit_insn (gen_ashlsi3 (new_src, source, shift));
+ emit_insn (gen_lshrsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_zero_bit_copies);
+
+ emit_insn (gen_lshrsi3 (new_src, source, shift));
+ emit_insn (gen_ashlsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = 0; i < 32; i++)
+ if (remainder & (1 << i))
+ num_bits_set++;
+
+ if (code == AND || (can_invert && num_bits_set > 16))
+ remainder = (~remainder) & 0xffffffff;
+ else if (code == PLUS && num_bits_set > 16)
+ remainder = (-remainder) & 0xffffffff;
+ else
+ {
+ can_invert = 0;
+ can_negate = 0;
+ }
+
+ /* Now try and find a way of doing the job in either two or three
+ instructions.
+ We start by looking for the largest block of zeros that are aligned on
+ a 2-bit boundary, we then fill up the temps, wrapping around to the
+ top of the word when we drop off the bottom.
+ In the worst case this code should produce no more than four insns. */
+ {
+ int best_start = 0;
+ int best_consecutive_zeros = 0;
+
+ for (i = 0; i < 32; i += 2)
+ {
+ int consecutive_zeros = 0;
+
+ if (! (remainder & (3 << i)))
+ {
+ while ((i < 32) && ! (remainder & (3 << i)))
+ {
+ consecutive_zeros += 2;
+ i += 2;
+ }
+ if (consecutive_zeros > best_consecutive_zeros)
+ {
+ best_consecutive_zeros = consecutive_zeros;
+ best_start = i - consecutive_zeros;
+ }
+ i -= 2;
+ }
+ }
+
+ /* Now start emitting the insns, starting with the one with the highest
+ bit set: we do this so that the smallest number will be emitted last;
+ this is more likely to be combinable with addressing insns. */
+ i = best_start;
+ do
+ {
+ int end;
+
+ if (i <= 0)
+ i += 32;
+ if (remainder & (3 << (i - 2)))
+ {
+ end = i - 8;
+ if (end < 0)
+ end += 32;
+ temp1 = remainder & ((0x0ff << end)
+ | ((i < end) ? (0xff >> (32 - end)) : 0));
+ remainder &= ~temp1;
+
+ if (generate)
+ {
+ rtx new_src;
+
+ if (code == SET)
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (subtargets
+ ? gen_reg_rtx (mode)
+ : target),
+ GEN_INT (can_invert ? ~temp1 : temp1)));
+ else if (code == MINUS)
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (subtargets
+ ? gen_reg_rtx (mode)
+ : target),
+ gen_rtx (code, mode, GEN_INT (temp1),
+ source)));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (remainder
+ ? (subtargets
+ ? gen_reg_rtx (mode)
+ : target)
+ : target),
+ gen_rtx (code, mode, source,
+ GEN_INT (can_invert ? ~temp1
+ : (can_negate
+ ? -temp1
+ : temp1)))));
+ source = new_src;
+ }
+
+ if (code == SET)
+ {
+ can_invert = 0;
+ code = PLUS;
+ }
+ else if (code == MINUS)
+ code = PLUS;
+
+ insns++;
+ i -= 6;
+ }
+ i -= 2;
+ } while (remainder);
+ }
+ return insns;
+}
+
+/* Canonicalize a comparison so that we are more likely to recognize it.
+ This can be done for a few constant compares, where we can make the
+ immediate value easier to load. */
+enum rtx_code
+arm_canonicalize_comparison (code, op1)
+ enum rtx_code code;
+ rtx *op1;
+{
+ unsigned HOST_WIDE_INT i = INTVAL (*op1);
+
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ return code;
+
+ case GT:
+ case LE:
+ if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
+ - 1)
+ && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ {
+ *op1 = GEN_INT (i+1);
+ return code == GT ? GE : LT;
+ }
+ break;
+
+ case GE:
+ case LT:
+ if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
+ && (const_ok_for_arm (i-1) || const_ok_for_arm (- (i-1))))
+ {
+ *op1 = GEN_INT (i-1);
+ return code == GE ? GT : LE;
+ }
+ break;
+
+ case GTU:
+ case LEU:
+ if (i != ~((unsigned HOST_WIDE_INT) 0)
+ && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ {
+ *op1 = GEN_INT (i + 1);
+ return code == GTU ? GEU : LTU;
+ }
+ break;
+
+ case GEU:
+ case LTU:
+ if (i != 0
+ && (const_ok_for_arm (i - 1) || const_ok_for_arm (- (i - 1))))
+ {
+ *op1 = GEN_INT (i - 1);
+ return code == GEU ? GTU : LEU;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ return code;
+}
+
+/* CYGNSU LOCAL */
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+arm_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 1;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+/* END CYGNUS LOCAL */
+
+int
+legitimate_pic_operand_p (x)
+ rtx x;
+{
+ if (CONSTANT_P (x) && flag_pic
+ && (GET_CODE (x) == SYMBOL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
+ return 0;
+
+ return 1;
+}
+
+rtx
+legitimize_pic_address (orig, mode, reg)
+ rtx orig;
+ enum machine_mode mode;
+ rtx reg;
+{
+ if (GET_CODE (orig) == SYMBOL_REF)
+ {
+ rtx pic_ref, address;
+ rtx insn;
+ int subregs = 0;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+
+ subregs = 1;
+ }
+
+#ifdef AOF_ASSEMBLER
+ /* The AOF assembler can generate relocations for these directly, and
+ understands that the PIC register has to be added into the offset.
+ */
+ insn = emit_insn (gen_pic_load_addr_based (reg, orig));
+#else
+ if (subregs)
+ address = gen_reg_rtx (Pmode);
+ else
+ address = reg;
+
+ emit_insn (gen_pic_load_addr (address, orig));
+
+ pic_ref = gen_rtx (MEM, Pmode,
+ gen_rtx (PLUS, Pmode, pic_offset_table_rtx, address));
+ RTX_UNCHANGING_P (pic_ref) = 1;
+ insn = emit_move_insn (reg, pic_ref);
+#endif
+ current_function_uses_pic_offset_table = 1;
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL, orig,
+ REG_NOTES (insn));
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ /* The base register doesn't really matter, we only want to
+ test the index for the appropriate mode. */
+ GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
+
+ if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ abort ();
+
+ win:
+ if (GET_CODE (offset) == CONST_INT)
+ return plus_constant_for_output (base, INTVAL (offset));
+ }
+
+ if (GET_MODE_SIZE (mode) > 4
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || TARGET_SOFT_FLOAT))
+ {
+ emit_insn (gen_addsi3 (reg, base, offset));
+ return reg;
+ }
+
+ return gen_rtx (PLUS, Pmode, base, offset);
+ }
+ else if (GET_CODE (orig) == LABEL_REF)
+ current_function_uses_pic_offset_table = 1;
+
+ return orig;
+}
+
+static rtx pic_rtx;
+
+int
+is_pic(x)
+ rtx x;
+{
+ if (x == pic_rtx)
+ return 1;
+ return 0;
+}
+
+void
+arm_finalize_pic ()
+{
+#ifndef AOF_ASSEMBLER
+ rtx l1, pic_tmp, pic_tmp2, seq;
+ rtx global_offset_table;
+
+ if (current_function_uses_pic_offset_table == 0)
+ return;
+
+ if (! flag_pic)
+ abort ();
+
+ start_sequence ();
+ l1 = gen_label_rtx ();
+
+ global_offset_table = gen_rtx (SYMBOL_REF, Pmode, "_GLOBAL_OFFSET_TABLE_");
+ /* The PC contains 'dot'+8, but the label L1 is on the next
+ instruction, so the offset is only 'dot'+4. */
+ pic_tmp = gen_rtx (CONST, VOIDmode,
+ gen_rtx (PLUS, Pmode,
+ gen_rtx (LABEL_REF, VOIDmode, l1),
+ GEN_INT (4)));
+ pic_tmp2 = gen_rtx (CONST, VOIDmode,
+ gen_rtx (PLUS, Pmode,
+ global_offset_table,
+ pc_rtx));
+
+ pic_rtx = gen_rtx (CONST, Pmode,
+ gen_rtx (MINUS, Pmode, pic_tmp2, pic_tmp));
+
+ emit_insn (gen_pic_load_addr (pic_offset_table_rtx, pic_rtx));
+ emit_jump_insn (gen_pic_add_dot_plus_eight(l1, pic_offset_table_rtx));
+ emit_label (l1);
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, get_insns ());
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up. */
+ emit_insn (gen_rtx (USE, VOIDmode, pic_offset_table_rtx));
+#endif /* AOF_ASSEMBLER */
+}
+
+#define REG_OR_SUBREG_REG(X) \
+ (GET_CODE (X) == REG \
+ || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
+
+#define REG_OR_SUBREG_RTX(X) \
+ (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+int
+arm_rtx_costs (x, code, outer_code)
+ rtx x;
+ enum rtx_code code, outer_code;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum rtx_code subcode;
+ int extra_cost;
+
+ switch (code)
+ {
+ case MEM:
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
+ + (CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
+
+ case DIV:
+ case MOD:
+ return 100;
+
+ case ROTATE:
+ if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
+ return 4;
+ /* Fall through */
+ case ROTATERT:
+ if (mode != SImode)
+ return 8;
+ /* Fall through */
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT:
+ if (mode == DImode)
+ return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
+ + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 8));
+ return (1 + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 4)
+ + ((GET_CODE (XEXP (x, 1)) == REG
+ || (GET_CODE (XEXP (x, 1)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT))
+ ? 0 : 4));
+
+ case MINUS:
+ if (mode == DImode)
+ return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
+ ? 0 : 8));
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
+ ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 0))))
+ ? 0 : 8));
+
+ if (((GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))
+ && REG_OR_SUBREG_REG (XEXP (x, 1))))
+ || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
+ || subcode == ASHIFTRT || subcode == LSHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
+ (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
+ && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
+ || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
+ && REG_OR_SUBREG_REG (XEXP (x, 0))))
+ return 1;
+ /* Fall through */
+
+ case PLUS:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
+ ? 0 : 8));
+
+ /* Fall through */
+ case AND: case XOR: case IOR:
+ extra_cost = 0;
+
+ /* Normally the frame registers will be spilt into reg+const during
+ reload, so it is a bad idea to combine them with other instructions,
+ since then they might not be moved outside of loops. As a compromise
+ we allow integration with ops that have a constant as their second
+ operand. */
+ if ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
+ && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ || (REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
+ extra_cost = 4;
+
+ if (mode == DImode)
+ return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode)))
+ ? 0 : 8));
+
+ if (REG_OR_SUBREG_REG (XEXP (x, 0)))
+ return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode)))
+ ? 0 : 4));
+
+ else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
+ return (1 + extra_cost
+ + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
+ || subcode == LSHIFTRT || subcode == ASHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
+ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
+ && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
+ || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
+ ? 0 : 4));
+
+ return 8;
+
+ case MULT:
+ /* There is no point basing this on the tuning, since it is always the
+ fast variant if it exists at all */
+ if (arm_fast_multiply && mode == DImode
+ && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
+ && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
+ return 8;
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == DImode)
+ return 30;
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
+ & (unsigned HOST_WIDE_INT) 0xffffffff);
+ int add_cost = const_ok_for_arm (i) ? 4 : 8;
+ int j;
+ /* Tune as appropriate */
+ int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
+
+ for (j = 0; i && j < 32; j += booth_unit_size)
+ {
+ i >>= booth_unit_size;
+ add_cost += 2;
+ }
+
+ return add_cost;
+ }
+
+ return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
+ + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
+
+ case TRUNCATE:
+ if (arm_fast_multiply && mode == SImode
+ && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
+ return 8;
+ return 99;
+
+ case NEG:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
+ /* Fall through */
+ case NOT:
+ if (mode == DImode)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ case IF_THEN_ELSE:
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
+ return 14;
+ return 2;
+
+ case COMPARE:
+ return 1;
+
+ case ABS:
+ return 4 + (mode == DImode ? 4 : 0);
+
+ case SIGN_EXTEND:
+ if (GET_MODE (XEXP (x, 0)) == QImode)
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+ /* Fall through */
+ case ZERO_EXTEND:
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ return (1 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case HImode:
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case SImode:
+ return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ default:
+ break;
+ }
+ abort ();
+
+ default:
+ return 99;
+ }
+}
+
+int
+arm_adjust_cost (insn, link, dep, cost)
+ rtx insn;
+ rtx link;
+ rtx dep;
+ int cost;
+{
+ rtx i_pat, d_pat;
+
+ if ((i_pat = single_set (insn)) != NULL
+ && GET_CODE (SET_SRC (i_pat)) == MEM
+ && (d_pat = single_set (dep)) != NULL
+ && GET_CODE (SET_DEST (d_pat)) == MEM)
+ {
+ /* This is a load after a store, there is no conflict if the load reads
+ from a cached area. Assume that loads from the stack, and from the
+ constant pool are cached, and that others will miss. This is a
+ hack. */
+
+/* debug_rtx (insn);
+ debug_rtx (dep);
+ debug_rtx (link);
+ fprintf (stderr, "costs %d\n", cost); */
+
+ if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (stack_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (frame_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (hard_frame_pointer_rtx,
+ XEXP (SET_SRC (i_pat), 0)))
+ {
+/* fprintf (stderr, "***** Now 1\n"); */
+ return 1;
+ }
+ }
+
+ return cost;
+}
+
+/* This code has been fixed for cross compilation. */
+
+static int fpa_consts_inited = 0;
+
+char *strings_fpa[8] = {
+ "0", "1", "2", "3",
+ "4", "5", "0.5", "10"
+};
+
+static REAL_VALUE_TYPE values_fpa[8];
+
+static void
+init_fpa_table ()
+{
+ int i;
+ REAL_VALUE_TYPE r;
+
+ for (i = 0; i < 8; i++)
+ {
+ r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
+ values_fpa[i] = r;
+ }
+
+ fpa_consts_inited = 1;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPU constant. */
+
+int
+const_double_rtx_ok_for_fpu (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPU constant. */
+
+int
+neg_const_double_rtx_ok_for_fpu (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Predicates for `match_operand' and `match_operator'. */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Only accept reg, subreg(reg), const_int. */
+
+int
+reg_or_int_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return 1;
+
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Return 1 if OP is an item in memory, given that we are in reload. */
+
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return 1 if OP is a valid memory address, but not valid for a signed byte
+ memory access (architecture V4) */
+int
+bad_signed_byte_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (! memory_operand (op, mode) || GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ /* A sum of anything more complex than reg + reg or reg + const is bad */
+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
+ && (! s_register_operand (XEXP (op, 0), VOIDmode)
+ || (! s_register_operand (XEXP (op, 1), VOIDmode)
+ && GET_CODE (XEXP (op, 1)) != CONST_INT)))
+ return 1;
+
+ /* Big constants are also bad */
+ if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (INTVAL (XEXP (op, 1)) > 0xff
+ || -INTVAL (XEXP (op, 1)) > 0xff))
+ return 1;
+
+ /* Everything else is good, or can will automatically be made so. */
+ return 0;
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction. */
+
+int
+arm_rhs_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
+ */
+
+int
+arm_rhsm_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
+ || memory_operand (op, mode));
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
+ constant that is valid when negated. */
+
+int
+arm_add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (const_ok_for_arm (INTVAL (op))
+ || const_ok_for_arm (-INTVAL (op)))));
+}
+
+int
+arm_not_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (const_ok_for_arm (INTVAL (op))
+ || const_ok_for_arm (~INTVAL (op)))));
+}
+
+/* Return TRUE if the operand is a memory reference which contains an
+ offsettable address. */
+int
+offsettable_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ return (mode == GET_MODE (op)
+ && GET_CODE (op) == MEM
+ && offsettable_address_p (reload_completed | reload_in_progress,
+ mode, XEXP (op, 0)));
+}
+
+/* Return TRUE if the operand is a memory reference which is, or can be
+ made word aligned by adjusting the offset. */
+int
+alignable_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ rtx reg;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ return ((GET_CODE (reg = op) == REG
+ || (GET_CODE (op) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (op)) == REG)
+ || (GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (GET_CODE (reg = XEXP (op, 0)) == REG
+ || (GET_CODE (XEXP (op, 0)) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
+ && REGNO_POINTER_ALIGN (REGNO (reg)) >= 4);
+}
+
+/* Similar to s_register_operand, but does not allow hard integer
+ registers. */
+int
+f_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS));
+}
+
+/* Return TRUE for valid operands for the rhs of an FPU instruction. */
+
+int
+fpu_rhs_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return (const_double_rtx_ok_for_fpu (op));
+
+ return FALSE;
+}
+
+int
+fpu_add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return (const_double_rtx_ok_for_fpu (op)
+ || neg_const_double_rtx_ok_for_fpu (op));
+
+ return FALSE;
+}
+
+/* Return nonzero if OP is a constant power of two. */
+
+int
+power_of_two_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL(op);
+ return value != 0 && (value & (value - 1)) == 0;
+ }
+ return FALSE;
+}
+
+/* Return TRUE for a valid operand of a DImode operation.
+ Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
+ Note that this disallows MEM(REG+REG), but allows
+ MEM(PRE/POST_INC/DEC(REG)). */
+
+int
+di_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ case CONST_INT:
+ return TRUE;
+
+ case MEM:
+ return memory_address_p (DImode, XEXP (op, 0));
+
+ default:
+ return FALSE;
+ }
+}
+
+/* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
+ Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
+ Note that this disallows MEM(REG+REG), but allows
+ MEM(PRE/POST_INC/DEC(REG)). */
+
+int
+soft_df_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ return TRUE;
+
+ case MEM:
+ return memory_address_p (DFmode, XEXP (op, 0));
+
+ default:
+ return FALSE;
+ }
+}
+
+/* Return TRUE for valid index operands. */
+
+int
+index_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand(op, mode)
+ || (immediate_operand (op, mode)
+ && INTVAL (op) < 4096 && INTVAL (op) > -4096));
+}
+
+/* Return TRUE for valid shifts by a constant. This also accepts any
+ power of two on the (somewhat overly relaxed) assumption that the
+ shift operator in this case was a mult. */
+
+int
+const_shift_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (power_of_two_operand (op, mode)
+ || (immediate_operand (op, mode)
+ && (INTVAL (op) < 32 && INTVAL (op) > 0)));
+}
+
+/* Return TRUE for arithmetic operators which can be combined with a multiply
+ (shift). */
+
+int
+shiftable_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+ else
+ {
+ enum rtx_code code = GET_CODE (x);
+
+ return (code == PLUS || code == MINUS
+ || code == IOR || code == XOR || code == AND);
+ }
+}
+
+/* Return TRUE for shift operators. */
+
+int
+shift_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+ else
+ {
+ enum rtx_code code = GET_CODE (x);
+
+ if (code == MULT)
+ return power_of_two_operand (XEXP (x, 1));
+
+ return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
+ || code == ROTATERT);
+ }
+}
+
+int equality_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ return GET_CODE (x) == EQ || GET_CODE (x) == NE;
+}
+
+/* Return TRUE for SMIN SMAX UMIN UMAX operators. */
+
+int
+minmax_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ if (GET_MODE (x) != mode)
+ return FALSE;
+
+ return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
+}
+
+/* return TRUE if x is EQ or NE */
+
+/* Return TRUE if this is the condition code register, if we aren't given
+ a mode, accept any class CCmode register */
+
+int
+cc_register (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (x);
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return FALSE;
+ }
+
+ if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if this is the condition code register, if we aren't given
+ a mode, accept any class CCmode register which indicates a dominance
+ expression. */
+
+int
+dominant_cc_register (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (x);
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return FALSE;
+ }
+
+ if (mode != CC_DNEmode && mode != CC_DEQmode
+ && mode != CC_DLEmode && mode != CC_DLTmode
+ && mode != CC_DGEmode && mode != CC_DGTmode
+ && mode != CC_DLEUmode && mode != CC_DLTUmode
+ && mode != CC_DGEUmode && mode != CC_DGTUmode)
+ return FALSE;
+
+ if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if X references a SYMBOL_REF. */
+int
+symbol_mentioned_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (symbol_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return TRUE if X references a LABEL_REF. */
+int
+label_mentioned_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (x) == LABEL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (label_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+enum rtx_code
+minmax_code (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ if (code == SMAX)
+ return GE;
+ else if (code == SMIN)
+ return LE;
+ else if (code == UMIN)
+ return LEU;
+ else if (code == UMAX)
+ return GEU;
+
+ abort ();
+}
+
+/* Return 1 if memory locations are adjacent */
+
+int
+adjacent_mem_locations (a, b)
+ rtx a, b;
+{
+ int val0 = 0, val1 = 0;
+ int reg0, reg1;
+
+ if ((GET_CODE (XEXP (a, 0)) == REG
+ || (GET_CODE (XEXP (a, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
+ && (GET_CODE (XEXP (b, 0)) == REG
+ || (GET_CODE (XEXP (b, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ {
+ if (GET_CODE (XEXP (a, 0)) == PLUS)
+ {
+ reg0 = REGNO (XEXP (XEXP (a, 0), 0));
+ val0 = INTVAL (XEXP (XEXP (a, 0), 1));
+ }
+ else
+ reg0 = REGNO (XEXP (a, 0));
+ if (GET_CODE (XEXP (b, 0)) == PLUS)
+ {
+ reg1 = REGNO (XEXP (XEXP (b, 0), 0));
+ val1 = INTVAL (XEXP (XEXP (b, 0), 1));
+ }
+ else
+ reg1 = REGNO (XEXP (b, 0));
+ return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
+ }
+ return 0;
+}
+
+/* Return 1 if OP is a load multiple operation. It is known to be
+ parallel and the first section will be tested. */
+
+int
+load_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int dest_regno;
+ rtx src_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return 0;
+
+ /* Check to see if this might be a write-back */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
+ || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
+ || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
+ || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
+ != REGNO (SET_DEST (elt)))
+ return 0;
+
+ count--;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i - base
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Return 1 if OP is a store multiple operation. It is known to be
+ parallel and the first section will be tested. */
+
+int
+store_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int src_regno;
+ rtx dest_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return 0;
+
+ /* Check to see if this might be a write-back */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
+ || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
+ || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
+ || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
+ != REGNO (SET_DEST (elt)))
+ return 0;
+
+ count--;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i - base
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+load_multiple_sequence (operands, nops, regs, base, load_offset)
+ rtx *operands;
+ int nops;
+ int *regs;
+ int *base;
+ HOST_WIDE_INT *load_offset;
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ if (nops < 2 || nops > 4)
+ abort ();
+
+ /* Loop over the operands and check that the memory references are
+ suitable (ie immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg(operands[nops + i]);
+
+ if (GET_CODE (operands[nops + i]) != MEM)
+ abort ();
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO(reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, or if it overwrites the
+ base register but isn't the last insn in the list, then
+ we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
+ || (i != nops - 1 && unsorted_regs[i] == base_reg))
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* ldmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* ldmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* ldmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* ldmdb */
+
+ /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm if
+ the offset isn't small enough. The reason 2 ldrs are faster is because
+ these ARMs are able to do more than one cache access in a single cycle.
+ The ARM9 and StrongARM have Harvard caches, whilst the ARM8 has a double
+ bandwidth cache. This means that these cores can do both an instruction
+ fetch and a data fetch in a single cycle, so the trick of calculating the
+ address into a scratch register (one of the result regs) and then doing a
+ load multiple actually becomes slower (and no smaller in code size). That
+ is the transformation
+
+ ldr rd1, [rbase + offset]
+ ldr rd2, [rbase + offset + 4]
+
+ to
+
+ add rd1, rbase, offset
+ ldmia rd1, {rd1, rd2}
+
+ produces worse code -- '3 cycles + any stalls on rd2' instead of '2 cycles
+ + any stalls on rd2'. On ARMs with only one cache access per cycle, the
+ first sequence could never complete in less than 6 cycles, whereas the ldm
+ sequence would only take 5 and would make better use of sequential accesses
+ if not hitting the cache.
+
+ We cheat here and test 'arm_ld_sched' which we currently know to only be
+ true for the ARM8, ARM9 and StrongARM. If this ever changes, then the test
+ below needs to be reworked. */
+ if (nops == 2 && arm_ld_sched)
+ return 0;
+
+ /* Can't do it without setting up the offset, only do this if it takes
+ no more than one insn. */
+ return (const_ok_for_arm (unsorted_offsets[order[0]])
+ || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
+}
+
+char *
+emit_ldm_seq (operands, nops)
+ rtx *operands;
+ int nops;
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ strcpy (buf, "ldm%?ia\t");
+ break;
+
+ case 2:
+ strcpy (buf, "ldm%?ib\t");
+ break;
+
+ case 3:
+ strcpy (buf, "ldm%?da\t");
+ break;
+
+ case 4:
+ strcpy (buf, "ldm%?db\t");
+ break;
+
+ case 5:
+ if (offset >= 0)
+ sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) offset);
+ else
+ sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) -offset);
+ output_asm_insn (buf, operands);
+ base_reg = regs[0];
+ strcpy (buf, "ldm%?ia\t");
+ break;
+
+ default:
+ abort ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole ldm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+store_multiple_sequence (operands, nops, regs, base, load_offset)
+ rtx *operands;
+ int nops;
+ int *regs;
+ int *base;
+ HOST_WIDE_INT *load_offset;
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ if (nops < 2 || nops > 4)
+ abort ();
+
+ /* Loop over the operands and check that the memory references are
+ suitable (ie immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg(operands[nops + i]);
+
+ if (GET_CODE (operands[nops + i]) != MEM)
+ abort ();
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO(reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, then we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* stmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* stmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* stmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* stmdb */
+
+ return 0;
+}
+
+char *
+emit_stm_seq (operands, nops)
+ rtx *operands;
+ int nops;
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ strcpy (buf, "stm%?ia\t");
+ break;
+
+ case 2:
+ strcpy (buf, "stm%?ib\t");
+ break;
+
+ case 3:
+ strcpy (buf, "stm%?da\t");
+ break;
+
+ case 4:
+ strcpy (buf, "stm%?db\t");
+ break;
+
+ default:
+ abort ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole stm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+multi_register_push (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != PARALLEL
+ || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
+ || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != 2))
+ return 0;
+
+ return 1;
+}
+
+
+/* Routines for use with attributes */
+
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing. */
+
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+ return 0;
+}
+
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+
+/* Routines for use in generating RTL */
+
+rtx
+arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
+ in_struct_p, scalar_p)
+ int base_regno;
+ int count;
+ rtx from;
+ int up;
+ int write_back;
+ int unchanging_p;
+ int in_struct_p;
+ int scalar_p;
+{
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem;
+
+ result = gen_rtx (PARALLEL, VOIDmode,
+ rtvec_alloc (count + (write_back ? 2 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx (SET, GET_MODE (from), from,
+ plus_constant (from, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ mem = gen_rtx (MEM, SImode, plus_constant (from, j * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+ XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, SImode, base_regno + j),
+ mem);
+ }
+
+ if (write_back)
+ XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, from);
+
+ return result;
+}
+
+rtx
+arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
+ in_struct_p, scalar_p)
+ int base_regno;
+ int count;
+ rtx to;
+ int up;
+ int write_back;
+ int unchanging_p;
+ int in_struct_p;
+ int scalar_p;
+{
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem;
+
+ result = gen_rtx (PARALLEL, VOIDmode,
+ rtvec_alloc (count + (write_back ? 2 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx (SET, GET_MODE (to), to,
+ plus_constant (to, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ mem = gen_rtx (MEM, SImode, plus_constant (to, j * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+
+ XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode, mem,
+ gen_rtx (REG, SImode, base_regno + j));
+ }
+
+ if (write_back)
+ XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, to);
+
+ return result;
+}
+
+int
+arm_gen_movstrqi (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
+ int i;
+ rtx src, dst;
+ rtx st_src, st_dst, fin_src, fin_dst;
+ rtx part_bytes_reg = NULL;
+ rtx mem;
+ int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
+ int dst_scalar_p, src_scalar_p;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT
+ || INTVAL (operands[2]) > 64
+ || INTVAL (operands[3]) & 3)
+ return 0;
+
+ st_dst = XEXP (operands[0], 0);
+ st_src = XEXP (operands[1], 0);
+
+ dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
+ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
+ dst_scalar_p = MEM_SCALAR_P (operands[0]);
+ src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
+ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
+ src_scalar_p = MEM_SCALAR_P (operands[1]);
+
+ fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
+ fin_src = src = copy_to_mode_reg (SImode, st_src);
+
+ in_words_to_go = (INTVAL (operands[2]) + 3) / 4;
+ out_words_to_go = INTVAL (operands[2]) / 4;
+ last_bytes = INTVAL (operands[2]) & 3;
+
+ if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
+ part_bytes_reg = gen_rtx (REG, SImode, (in_words_to_go - 1) & 3);
+
+ for (i = 0; in_words_to_go >= 2; i+=4)
+ {
+ if (in_words_to_go > 4)
+ emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
+ src_unchanging_p,
+ src_in_struct_p,
+ src_scalar_p));
+ else
+ emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
+ FALSE, src_unchanging_p,
+ src_in_struct_p, src_scalar_p));
+
+ if (out_words_to_go)
+ {
+ if (out_words_to_go > 4)
+ emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
+ dst_unchanging_p,
+ dst_in_struct_p,
+ dst_scalar_p));
+ else if (out_words_to_go != 1)
+ emit_insn (arm_gen_store_multiple (0, out_words_to_go,
+ dst, TRUE,
+ (last_bytes == 0
+ ? FALSE : TRUE),
+ dst_unchanging_p,
+ dst_in_struct_p,
+ dst_scalar_p));
+ else
+ {
+ mem = gen_rtx (MEM, SImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (REG, SImode, 0));
+ if (last_bytes != 0)
+ emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
+ }
+ }
+
+ in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
+ out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
+ }
+
+ /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
+ if (out_words_to_go)
+ {
+ rtx sreg;
+
+ mem = gen_rtx (MEM, SImode, src);
+ RTX_UNCHANGING_P (mem) = src_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = src_in_struct_p;
+ MEM_SCALAR_P (mem) = src_scalar_p;
+ emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
+ emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
+
+ mem = gen_rtx (MEM, SImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, sreg);
+ emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
+ in_words_to_go--;
+
+ if (in_words_to_go) /* Sanity check */
+ abort ();
+ }
+
+ if (in_words_to_go)
+ {
+ if (in_words_to_go < 0)
+ abort ();
+
+ mem = gen_rtx (MEM, SImode, src);
+ RTX_UNCHANGING_P (mem) = src_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = src_in_struct_p;
+ MEM_SCALAR_P (mem) = src_scalar_p;
+ part_bytes_reg = copy_to_mode_reg (SImode, mem);
+ }
+
+ if (BYTES_BIG_ENDIAN && last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ if (part_bytes_reg == NULL)
+ abort ();
+
+ /* The bytes we want are in the top end of the word */
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
+ GEN_INT (8 * (4 - last_bytes))));
+ part_bytes_reg = tmp;
+
+ while (last_bytes)
+ {
+ mem = gen_rtx (MEM, QImode, plus_constant (dst, last_bytes - 1));
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (SUBREG, QImode, part_bytes_reg, 0));
+ if (--last_bytes)
+ {
+ tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+
+ }
+ else
+ {
+ while (last_bytes)
+ {
+ if (part_bytes_reg == NULL)
+ abort ();
+
+ mem = gen_rtx (MEM, QImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (SUBREG, QImode, part_bytes_reg, 0));
+ if (--last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (dst, dst, const1_rtx));
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Generate a memory reference for a half word, such that it will be loaded
+ into the top 16 bits of the word. We can assume that the address is
+ known to be alignable and of the form reg, or plus (reg, const). */
+rtx
+gen_rotated_half_load (memref)
+ rtx memref;
+{
+ HOST_WIDE_INT offset = 0;
+ rtx base = XEXP (memref, 0);
+
+ if (GET_CODE (base) == PLUS)
+ {
+ offset = INTVAL (XEXP (base, 1));
+ base = XEXP (base, 0);
+ }
+
+ /* If we aren't allowed to generate unaligned addresses, then fail. */
+ if (TARGET_SHORT_BY_BYTES
+ && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
+ return NULL;
+
+ base = gen_rtx (MEM, SImode, plus_constant (base, offset & ~2));
+
+ if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
+ return base;
+
+ return gen_rtx (ROTATE, SImode, base, GEN_INT (16));
+}
+
+static enum machine_mode
+select_dominance_cc_mode (op, x, y, cond_or)
+ enum rtx_code op;
+ rtx x;
+ rtx y;
+ HOST_WIDE_INT cond_or;
+{
+ enum rtx_code cond1, cond2;
+ int swapped = 0;
+
+ /* Currently we will probably get the wrong result if the individual
+ comparisons are not simple. This also ensures that it is safe to
+ reverse a comparison if necessary. */
+ if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
+ != CCmode)
+ || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
+ != CCmode))
+ return CCmode;
+
+ if (cond_or)
+ cond1 = reverse_condition (cond1);
+
+ /* If the comparisons are not equal, and one doesn't dominate the other,
+ then we can't do this. */
+ if (cond1 != cond2
+ && ! comparison_dominates_p (cond1, cond2)
+ && (swapped = 1, ! comparison_dominates_p (cond2, cond1)))
+ return CCmode;
+
+ if (swapped)
+ {
+ enum rtx_code temp = cond1;
+ cond1 = cond2;
+ cond2 = temp;
+ }
+
+ switch (cond1)
+ {
+ case EQ:
+ if (cond2 == EQ || ! cond_or)
+ return CC_DEQmode;
+
+ switch (cond2)
+ {
+ case LE: return CC_DLEmode;
+ case LEU: return CC_DLEUmode;
+ case GE: return CC_DGEmode;
+ case GEU: return CC_DGEUmode;
+ default: break;
+ }
+
+ break;
+
+ case LT:
+ if (cond2 == LT || ! cond_or)
+ return CC_DLTmode;
+ if (cond2 == LE)
+ return CC_DLEmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case GT:
+ if (cond2 == GT || ! cond_or)
+ return CC_DGTmode;
+ if (cond2 == GE)
+ return CC_DGEmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case LTU:
+ if (cond2 == LTU || ! cond_or)
+ return CC_DLTUmode;
+ if (cond2 == LEU)
+ return CC_DLEUmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case GTU:
+ if (cond2 == GTU || ! cond_or)
+ return CC_DGTUmode;
+ if (cond2 == GEU)
+ return CC_DGEUmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ /* The remaining cases only occur when both comparisons are the
+ same. */
+ case NE:
+ return CC_DNEmode;
+
+ case LE:
+ return CC_DLEmode;
+
+ case GE:
+ return CC_DGEmode;
+
+ case LEU:
+ return CC_DLEUmode;
+
+ case GEU:
+ return CC_DGEUmode;
+
+ default:
+ break;
+ }
+
+ abort ();
+}
+
+enum machine_mode
+arm_select_cc_mode (op, x, y)
+ enum rtx_code op;
+ rtx x;
+ rtx y;
+{
+ /* All floating point compares return CCFP if it is an equality
+ comparison, and CCFPE otherwise. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return (op == EQ || op == NE) ? CCFPmode : CCFPEmode;
+
+ /* A compare with a shifted operand. Because of canonicalization, the
+ comparison will have to be swapped when we emit the assembler. */
+ if (GET_MODE (y) == SImode && GET_CODE (y) == REG
+ && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
+ || GET_CODE (x) == ROTATERT))
+ return CC_SWPmode;
+
+ /* This is a special case that is used by combine to allow a
+ comparison of a shifted byte load to be split into a zero-extend
+ followed by a comparison of the shifted integer (only valid for
+ equalities and unsigned inequalities). */
+ if (GET_MODE (x) == SImode
+ && GET_CODE (x) == ASHIFT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
+ && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
+ && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
+ && (op == EQ || op == NE
+ || op == GEU || op == GTU || op == LTU || op == LEU)
+ && GET_CODE (y) == CONST_INT)
+ return CC_Zmode;
+
+ /* An operation that sets the condition codes as a side-effect, the
+ V flag is not set correctly, so we can only use comparisons where
+ this doesn't matter. (For LT and GE we can use "mi" and "pl"
+ instead. */
+ if (GET_MODE (x) == SImode
+ && y == const0_rtx
+ && (op == EQ || op == NE || op == LT || op == GE)
+ && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == AND || GET_CODE (x) == IOR
+ || GET_CODE (x) == XOR || GET_CODE (x) == MULT
+ || GET_CODE (x) == NOT || GET_CODE (x) == NEG
+ || GET_CODE (x) == LSHIFTRT
+ || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == ROTATERT || GET_CODE (x) == ZERO_EXTRACT))
+ return CC_NOOVmode;
+
+ /* A construct for a conditional compare, if the false arm contains
+ 0, then both conditions must be true, otherwise either condition
+ must be true. Not all conditions are possible, so CCmode is
+ returned if it can't be done. */
+ if (GET_CODE (x) == IF_THEN_ELSE
+ && (XEXP (x, 2) == const0_rtx
+ || XEXP (x, 2) == const1_rtx)
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
+ return select_dominance_cc_mode (op, XEXP (x, 0), XEXP (x, 1),
+ INTVAL (XEXP (x, 2)));
+
+ if (GET_MODE (x) == QImode && (op == EQ || op == NE))
+ return CC_Zmode;
+
+ if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
+ && GET_CODE (x) == PLUS
+ && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
+ return CC_Cmode;
+
+ return CCmode;
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 0 in the proper mode. FP means this is a
+ floating point compare: I don't think that it is needed on the arm. */
+
+rtx
+gen_compare_reg (code, x, y, fp)
+ enum rtx_code code;
+ rtx x, y;
+ int fp;
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg = gen_rtx (REG, mode, 24);
+
+ emit_insn (gen_rtx (SET, VOIDmode, cc_reg,
+ gen_rtx (COMPARE, mode, x, y)));
+
+ return cc_reg;
+}
+
+void
+arm_reload_in_hi (operands)
+ rtx *operands;
+{
+ rtx base = find_replacement (&XEXP (operands[1], 0));
+
+ emit_insn (gen_zero_extendqisi2 (operands[2], gen_rtx (MEM, QImode, base)));
+ /* Handle the case where the address is too complex to be offset by 1. */
+ if (GET_CODE (base) == MINUS
+ || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
+ {
+ rtx base_plus = gen_rtx (REG, SImode, REGNO (operands[0]));
+
+ emit_insn (gen_rtx (SET, VOIDmode, base_plus, base));
+ base = base_plus;
+ }
+
+ emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG, SImode, operands[0], 0),
+ gen_rtx (MEM, QImode,
+ plus_constant (base, 1))));
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ gen_rtx (IOR, SImode,
+ gen_rtx (ASHIFT, SImode,
+ gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ GEN_INT (8)),
+ operands[2])));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ gen_rtx (IOR, SImode,
+ gen_rtx (ASHIFT, SImode,
+ operands[2],
+ GEN_INT (8)),
+ gen_rtx (SUBREG, SImode, operands[0], 0))));
+}
+
+void
+arm_reload_out_hi (operands)
+ rtx *operands;
+{
+ rtx base = find_replacement (&XEXP (operands[0], 0));
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)),
+ gen_rtx (SUBREG, QImode, operands[1], 0)));
+ emit_insn (gen_lshrsi3 (operands[2],
+ gen_rtx (SUBREG, SImode, operands[1], 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, base),
+ gen_rtx (SUBREG, QImode, operands[2], 0)));
+ }
+ else
+ {
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, base),
+ gen_rtx (SUBREG, QImode, operands[1], 0)));
+ emit_insn (gen_lshrsi3 (operands[2],
+ gen_rtx (SUBREG, SImode, operands[1], 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)),
+ gen_rtx (SUBREG, QImode, operands[2], 0)));
+ }
+}
+
+/* CYGNUS LOCAL */
+/* Check to see if a branch is forwards or backwards. Return TRUE if it
+ is backwards. */
+
+int
+arm_backwards_branch (from, to)
+ int from, to;
+{
+ return insn_addresses[to] <= insn_addresses[from];
+}
+
+/* Check to see if a branch is within the distance that can be done using
+ an arithmetic expression. */
+int
+short_branch (from, to)
+ int from, to;
+{
+ int delta = insn_addresses[from] + 8 - insn_addresses[to];
+
+ return abs (delta) < 980; /* A small margin for safety */
+}
+
+/* Check to see that the insn isn't the target of the conditionalizing
+ code */
+int
+arm_insn_not_targeted (insn)
+ rtx insn;
+{
+ return insn != arm_target_insn;
+}
+/* END CYGNUS LOCAL */
+
+/* Routines for manipulation of the constant pool. */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Arm instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find th
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its offset within the current
+ pool.
+
+ X is the rtx we want to replace. MODE is its mode. On return,
+ ADDRESS_ONLY will be non-zero if we really want the address of such
+ a constant, not the constant itself. */
+static HOST_WIDE_INT
+add_constant (x, mode, address_only)
+ rtx x;
+ enum machine_mode mode;
+ int * address_only;
+{
+ int i;
+ HOST_WIDE_INT offset;
+
+ * address_only = 0;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+ else if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P(x))
+ {
+ *address_only = 1;
+ mode = get_pool_mode (x);
+ x = get_pool_constant (x);
+ }
+#ifndef AOF_ASSEMBLER
+ else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == 3)
+ x = XVECEXP (x, 0, 0);
+#endif
+
+#ifdef AOF_ASSEMBLER
+ /* PIC Symbol references need to be converted into offsets into the
+ based area. */
+ if (flag_pic && GET_CODE (x) == SYMBOL_REF)
+ x = aof_pic_entry (x);
+#endif /* AOF_ASSEMBLER */
+
+ /* First see if we've already got it */
+ for (i = 0; i < pool_size; i++)
+ {
+ if (GET_CODE (x) == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (GET_CODE (x) == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static int
+fixit (src, mode, destreg)
+ rtx src;
+ enum machine_mode mode;
+ int destreg;
+{
+ if (CONSTANT_P (src))
+ {
+ if (GET_CODE (src) == CONST_INT)
+ return (! const_ok_for_arm (INTVAL (src))
+ && ! const_ok_for_arm (~INTVAL (src)));
+ if (GET_CODE (src) == CONST_DOUBLE)
+ return (GET_MODE (src) == VOIDmode
+ || destreg < 16
+ || (! const_double_rtx_ok_for_fpu (src)
+ && ! neg_const_double_rtx_ok_for_fpu (src)));
+ return symbol_mentioned_p (src);
+ }
+#ifndef AOF_ASSEMBLER
+ else if (GET_CODE (src) == UNSPEC && XINT (src, 1) == 3)
+ return 1;
+#endif
+ else
+ return (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0)));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+static rtx
+find_barrier (from, max_count)
+ rtx from;
+ int max_count;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx last = from;
+
+ while (from && count < max_count)
+ {
+ rtx tmp;
+
+ if (GET_CODE (from) == BARRIER)
+ found_barrier = from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ count += 8;
+ /* Handle table jumps as a single entity. */
+ else if (GET_CODE (from) == JUMP_INSN
+ && JUMP_LABEL (from) != 0
+ && ((tmp = next_real_insn (JUMP_LABEL (from)))
+ == next_real_insn (from))
+ && tmp != NULL
+ && GET_CODE (tmp) == JUMP_INSN
+ && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
+ || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
+ {
+ int elt = GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC ? 1 : 0;
+ count += (get_attr_length (from)
+ + GET_MODE_SIZE (SImode) * XVECLEN (PATTERN (tmp), elt));
+ /* Continue after the dispatch table. */
+ last = from;
+ from = NEXT_INSN (tmp);
+ continue;
+ }
+ else
+ count += get_attr_length (from);
+
+ last = from;
+ from = NEXT_INSN (from);
+ }
+
+ if (! found_barrier)
+ {
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one. */
+ rtx label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (last);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump. */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ }
+
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ int destreg;
+ enum machine_mode mode = GET_MODE (dst);
+
+ if (dst == pc_rtx)
+ return 0;
+
+ if (GET_CODE (dst) == REG)
+ destreg = REGNO (dst);
+ else if (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG)
+ destreg = REGNO (SUBREG_REG (dst));
+ else
+ return 0;
+
+ return fixit (src, mode, destreg);
+ }
+ return 0;
+}
+
+void
+arm_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ int count_size;
+
+#if 0
+ /* The ldr instruction can work with up to a 4k offset, and most constants
+ will be loaded with one of these instructions; however, the adr
+ instruction and the ldf instructions only work with a 1k offset. This
+ code needs to be rewritten to use the 4k offset when possible, and to
+ adjust when a 1k offset is needed. For now we just use a 1k offset
+ from the start. */
+ count_size = 4000;
+
+ /* Floating point operands can't work further than 1024 bytes from the
+ PC, so to make things simple we restrict all loads for such functions.
+ */
+ if (TARGET_HARD_FLOAT)
+ {
+ int regno;
+
+ for (regno = 16; regno < 24; regno++)
+ if (regs_ever_live[regno])
+ {
+ count_size = 1000;
+ break;
+ }
+ }
+#else
+ count_size = 1000;
+#endif /* 0 */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn, count_size);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn = scan;
+ rtx newsrc;
+ rtx addr;
+ int scratch;
+ int address_only;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode, &address_only);
+ addr = plus_constant (gen_rtx (LABEL_REF, VOIDmode,
+ pool_vector_label),
+ offset);
+
+ /* If we only want the address of the pool entry, or
+ for wide moves to integer regs we need to split
+ the address calculation off into a separate insn.
+ If necessary, the load can then be done with a
+ load-multiple. This is safe, since we have
+ already noted the length of such insns to be 8,
+ and we are immediately over-writing the scratch
+ we have grabbed with the final result. */
+ if ((address_only || GET_MODE_SIZE (mode) > 4)
+ && (scratch = REGNO (dst)) < 16)
+ {
+ rtx reg;
+
+ if (mode == SImode)
+ reg = dst;
+ else
+ reg = gen_rtx (REG, SImode, scratch);
+
+ newinsn = emit_insn_after (gen_movaddr (reg, addr),
+ newinsn);
+ addr = reg;
+ }
+
+ if (! address_only)
+ {
+ newsrc = gen_rtx (MEM, mode, addr);
+
+ /* XXX Fixme -- I think the following is bogus. */
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after
+ (gen_rtx (SET, VOIDmode, dst, newsrc), newinsn);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+ }
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ insn = scan;
+ }
+ }
+
+ after_arm_reorg = 1;
+}
+
+
+/* Routines to output assembly language. */
+
+/* If the rtx is the correct value then return the string of the number.
+ In this way we can ensure that valid double constants are generated even
+ when cross compiling. */
+char *
+fp_immediate_constant (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return strings_fpa[i];
+
+ abort ();
+}
+
+/* As for fp_immediate_constant, but value is passed directly, not in rtx. */
+static char *
+fp_const_from_val (r)
+ REAL_VALUE_TYPE *r;
+{
+ int i;
+
+ if (! fpa_consts_inited)
+ init_fpa_table ();
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
+ return strings_fpa[i];
+
+ abort ();
+}
+
+/* Output the operands of a LDM/STM instruction to STREAM.
+ MASK is the ARM register set mask of which only bits 0-15 are important.
+ INSTR is the possibly suffixed base register. HAT unequals zero if a hat
+ must follow the register list. */
+
+void
+print_multi_reg (stream, instr, mask, hat)
+ FILE *stream;
+ char *instr;
+ int mask, hat;
+{
+ int i;
+ int not_first = FALSE;
+
+ fputc ('\t', stream);
+ fprintf (stream, instr, REGISTER_PREFIX);
+ fputs (", {", stream);
+ for (i = 0; i < 16; i++)
+ if (mask & (1 << i))
+ {
+ if (not_first)
+ fprintf (stream, ", ");
+ fprintf (stream, "%s%s", REGISTER_PREFIX, reg_names[i]);
+ not_first = TRUE;
+ }
+
+ fprintf (stream, "}%s\n", hat ? "^" : "");
+}
+
+/* Output a 'call' insn. */
+
+char *
+output_call (operands)
+ rtx *operands;
+{
+ /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
+
+ if (REGNO (operands[0]) == 14)
+ {
+ operands[0] = gen_rtx (REG, SImode, 12);
+ output_asm_insn ("mov%?\t%0, %|lr", operands);
+ }
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+
+ if (TARGET_THUMB_INTERWORK)
+ output_asm_insn ("bx%?\t%0", operands);
+ else
+ output_asm_insn ("mov%?\t%|pc, %0", operands);
+
+ return "";
+}
+
+static int
+eliminate_lr2ip (x)
+ rtx *x;
+{
+ int something_changed = 0;
+ rtx x0 = *x;
+ int code = GET_CODE (x0);
+ register int i, j;
+ register char *fmt;
+
+ switch (code)
+ {
+ case REG:
+ if (REGNO (x0) == 14)
+ {
+ *x = gen_rtx (REG, SImode, 12);
+ return 1;
+ }
+ return 0;
+ default:
+ /* Scan through the sub-elements and change any references there */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ something_changed |= eliminate_lr2ip (&XEXP (x0, i));
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x0, i); j++)
+ something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j));
+ return something_changed;
+ }
+}
+
+/* Output a 'call' insn that is a reference in memory. */
+
+char *
+output_call_mem (operands)
+ rtx *operands;
+{
+ operands[0] = copy_rtx (operands[0]); /* Be ultra careful */
+ /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
+ */
+ if (eliminate_lr2ip (&operands[0]))
+ output_asm_insn ("mov%?\t%|ip, %|lr", operands);
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ output_asm_insn ("ldr%?\t%|ip, %0", operands);
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("bx%?\t%|ip", operands);
+ }
+ else
+ {
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("ldr%?\t%|pc, %0", operands);
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpu registers.
+ OPERANDS[0] is an fpu register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+
+char *
+output_mov_long_double_fpu_from_arm (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[3];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
+ output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
+ return "";
+}
+
+/* Output a move from an fpu register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpu register. */
+
+char *
+output_mov_long_double_arm_from_fpu (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[3];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
+ output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
+ return "";
+}
+
+/* Output a move from arm registers to arm registers of a long double
+ OPERANDS[0] is the destination.
+ OPERANDS[1] is the source. */
+char *
+output_mov_long_double_arm_from_arm (operands)
+ rtx *operands;
+{
+ /* We have to be careful here because the two might overlap */
+ int dest_start = REGNO (operands[0]);
+ int src_start = REGNO (operands[1]);
+ rtx ops[2];
+ int i;
+
+ if (dest_start < src_start)
+ {
+ for (i = 0; i < 3; i++)
+ {
+ ops[0] = gen_rtx (REG, SImode, dest_start + i);
+ ops[1] = gen_rtx (REG, SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+ else
+ {
+ for (i = 2; i >= 0; i--)
+ {
+ ops[0] = gen_rtx (REG, SImode, dest_start + i);
+ ops[1] = gen_rtx (REG, SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpu registers.
+ OPERANDS[0] is an fpu register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+
+char *
+output_mov_double_fpu_from_arm (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[2];
+
+ if (arm_reg0 == 12)
+ abort();
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
+ output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
+ return "";
+}
+
+/* Output a move from an fpu register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpu register. */
+
+char *
+output_mov_double_arm_from_fpu (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[2];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
+ output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
+ return "";
+}
+
+/* Output a move between double words.
+ It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
+ or MEM<-REG and all MEMs must be offsettable addresses. */
+
+char *
+output_move_double (operands)
+ rtx *operands;
+{
+ enum rtx_code code0 = GET_CODE (operands[0]);
+ enum rtx_code code1 = GET_CODE (operands[1]);
+ rtx otherops[3];
+
+ if (code0 == REG)
+ {
+ int reg0 = REGNO (operands[0]);
+
+ otherops[0] = gen_rtx (REG, SImode, 1 + reg0);
+ if (code1 == REG)
+ {
+ int reg1 = REGNO (operands[1]);
+ if (reg1 == 12)
+ abort();
+
+ /* Ensure the second source is not overwritten */
+ if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
+ output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
+ else
+ output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
+ }
+ else if (code1 == CONST_DOUBLE)
+ {
+ if (GET_MODE (operands[1]) == DFmode)
+ {
+ long l[2];
+ union real_extract u;
+
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[1]), (char *) &u,
+ sizeof (u));
+ REAL_VALUE_TO_TARGET_DOUBLE (u.d, l);
+ otherops[1] = GEN_INT(l[1]);
+ operands[1] = GEN_INT(l[0]);
+ }
+ else if (GET_MODE (operands[1]) != VOIDmode)
+ abort ();
+ else if (WORDS_BIG_ENDIAN)
+ {
+
+ otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ }
+ else
+ {
+
+ otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ }
+ output_mov_immediate (operands);
+ output_mov_immediate (otherops);
+ }
+ else if (code1 == CONST_INT)
+ {
+#if HOST_BITS_PER_WIDE_INT > 32
+ /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
+ what the upper word is. */
+ if (WORDS_BIG_ENDIAN)
+ {
+ otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
+ operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
+ }
+ else
+ {
+ otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
+ operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
+ }
+#else
+ /* Sign extend the intval into the high-order word */
+ if (WORDS_BIG_ENDIAN)
+ {
+ otherops[1] = operands[1];
+ operands[1] = (INTVAL (operands[1]) < 0
+ ? constm1_rtx : const0_rtx);
+ }
+ else
+ otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
+#endif
+ output_mov_immediate (otherops);
+ output_mov_immediate (operands);
+ }
+ else if (code1 == MEM)
+ {
+ switch (GET_CODE (XEXP (operands[1], 0)))
+ {
+ case REG:
+ output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
+ break;
+
+ case PRE_INC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
+ break;
+
+ case POST_DEC:
+ abort (); /* Should never happen now */
+ break;
+
+ case LABEL_REF:
+ case CONST:
+ output_asm_insn ("adr%?\t%0, %1", operands);
+ output_asm_insn ("ldm%?ia\t%0, %M0", operands);
+ break;
+
+ default:
+ if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1)))
+ {
+ otherops[0] = operands[0];
+ otherops[1] = XEXP (XEXP (operands[1], 0), 0);
+ otherops[2] = XEXP (XEXP (operands[1], 0), 1);
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ switch (INTVAL (otherops[2]))
+ {
+ case -8:
+ output_asm_insn ("ldm%?db\t%1, %M0", otherops);
+ return "";
+ case -4:
+ output_asm_insn ("ldm%?da\t%1, %M0", otherops);
+ return "";
+ case 4:
+ output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
+ return "";
+ }
+ if (!(const_ok_for_arm (INTVAL (otherops[2]))))
+ output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+ return "ldm%?ia\t%0, %M0";
+ }
+ else
+ {
+ otherops[1] = adj_offsettable_operand (operands[1], 4);
+ /* Take care of overlapping base/data reg. */
+ if (reg_mentioned_p (operands[0], operands[1]))
+ {
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ }
+ }
+ }
+ }
+ else
+ abort(); /* Constraints should prevent this */
+ }
+ else if (code0 == MEM && code1 == REG)
+ {
+ if (REGNO (operands[1]) == 12)
+ abort();
+
+ switch (GET_CODE (XEXP (operands[0], 0)))
+ {
+ case REG:
+ output_asm_insn ("stm%?ia\t%m0, %M1", operands);
+ break;
+
+ case PRE_INC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("stm%?db\t%m0!, %M1", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
+ break;
+
+ case POST_DEC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PLUS:
+ if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
+ {
+ switch (INTVAL (XEXP (XEXP (operands[0], 0), 1)))
+ {
+ case -8:
+ output_asm_insn ("stm%?db\t%m0, %M1", operands);
+ return "";
+
+ case -4:
+ output_asm_insn ("stm%?da\t%m0, %M1", operands);
+ return "";
+
+ case 4:
+ output_asm_insn ("stm%?ib\t%m0, %M1", operands);
+ return "";
+ }
+ }
+ /* Fall through */
+
+ default:
+ otherops[0] = adj_offsettable_operand (operands[0], 4);
+ otherops[1] = gen_rtx (REG, SImode, 1 + REGNO (operands[1]));
+ output_asm_insn ("str%?\t%1, %0", operands);
+ output_asm_insn ("str%?\t%1, %0", otherops);
+ }
+ }
+ else
+ abort(); /* Constraints should prevent this */
+
+ return "";
+}
+
+
+/* Output an arbitrary MOV reg, #n.
+ OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
+
+char *
+output_mov_immediate (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT n = INTVAL (operands[1]);
+ int n_ones = 0;
+ int i;
+
+ /* Try to use one MOV */
+ if (const_ok_for_arm (n))
+ {
+ output_asm_insn ("mov%?\t%0, %1", operands);
+ return "";
+ }
+
+ /* Try to use one MVN */
+ if (const_ok_for_arm (~n))
+ {
+ operands[1] = GEN_INT (~n);
+ output_asm_insn ("mvn%?\t%0, %1", operands);
+ return "";
+ }
+
+ /* If all else fails, make it out of ORRs or BICs as appropriate. */
+
+ for (i=0; i < 32; i++)
+ if (n & 1 << i)
+ n_ones++;
+
+ if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
+ output_multi_immediate(operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
+ ~n);
+ else
+ output_multi_immediate(operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
+ n);
+
+ return "";
+}
+
+
+/* Output an ADD r, s, #n where n may be too big for one instruction. If
+ adding zero to one register, output nothing. */
+
+char *
+output_add_immediate (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT n = INTVAL (operands[2]);
+
+ if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (n < 0)
+ output_multi_immediate (operands,
+ "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
+ -n);
+ else
+ output_multi_immediate (operands,
+ "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
+ n);
+ }
+
+ return "";
+}
+
+/* Output a multiple immediate operation.
+ OPERANDS is the vector of operands referred to in the output patterns.
+ INSTR1 is the output pattern to use for the first constant.
+ INSTR2 is the output pattern to use for subsequent constants.
+ IMMED_OP is the index of the constant slot in OPERANDS.
+ N is the constant value. */
+
+static char *
+output_multi_immediate (operands, instr1, instr2, immed_op, n)
+ rtx *operands;
+ char *instr1, *instr2;
+ int immed_op;
+ HOST_WIDE_INT n;
+{
+#if HOST_BITS_PER_WIDE_INT > 32
+ n &= 0xffffffff;
+#endif
+
+ if (n == 0)
+ {
+ operands[immed_op] = const0_rtx;
+ output_asm_insn (instr1, operands); /* Quick and easy output */
+ }
+ else
+ {
+ int i;
+ char *instr = instr1;
+
+ /* Note that n is never zero here (which would give no output) */
+ for (i = 0; i < 32; i += 2)
+ {
+ if (n & (3 << i))
+ {
+ operands[immed_op] = GEN_INT (n & (255 << i));
+ output_asm_insn (instr, operands);
+ instr = instr2;
+ i += 6;
+ }
+ }
+ }
+ return "";
+}
+
+
+/* Return the appropriate ARM instruction for the operation code.
+ The returned result should not be overwritten. OP is the rtx of the
+ operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
+ was shifted. */
+
+char *
+arithmetic_instr (op, shift_first_arg)
+ rtx op;
+ int shift_first_arg;
+{
+ switch (GET_CODE (op))
+ {
+ case PLUS:
+ return "add";
+
+ case MINUS:
+ return shift_first_arg ? "rsb" : "sub";
+
+ case IOR:
+ return "orr";
+
+ case XOR:
+ return "eor";
+
+ case AND:
+ return "and";
+
+ default:
+ abort ();
+ }
+}
+
+
+/* Ensure valid constant shifts and return the appropriate shift mnemonic
+ for the operation code. The returned result should not be overwritten.
+ OP is the rtx code of the shift.
+ On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
+ shift. */
+
+static char *
+shift_op (op, amountp)
+ rtx op;
+ HOST_WIDE_INT *amountp;
+{
+ char *mnem;
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
+ *amountp = -1;
+ else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
+ *amountp = INTVAL (XEXP (op, 1));
+ else
+ abort ();
+
+ switch (code)
+ {
+ case ASHIFT:
+ mnem = "asl";
+ break;
+
+ case ASHIFTRT:
+ mnem = "asr";
+ break;
+
+ case LSHIFTRT:
+ mnem = "lsr";
+ break;
+
+ case ROTATERT:
+ mnem = "ror";
+ break;
+
+ case MULT:
+ /* We never have to worry about the amount being other than a
+ power of 2, since this case can never be reloaded from a reg. */
+ if (*amountp != -1)
+ *amountp = int_log2 (*amountp);
+ else
+ abort ();
+ return "asl";
+
+ default:
+ abort ();
+ }
+
+ if (*amountp != -1)
+ {
+ /* This is not 100% correct, but follows from the desire to merge
+ multiplication by a power of 2 with the recognizer for a
+ shift. >=32 is not a valid shift for "asl", so we must try and
+ output a shift that produces the correct arithmetical result.
+ Using lsr #32 is identical except for the fact that the carry bit
+ is not set correctly if we set the flags; but we never use the
+ carry bit from such an operation, so we can ignore that. */
+ if (code == ROTATERT)
+ *amountp &= 31; /* Rotate is just modulo 32 */
+ else if (*amountp != (*amountp & 31))
+ {
+ if (code == ASHIFT)
+ mnem = "lsr";
+ *amountp = 32;
+ }
+
+ /* Shifts of 0 are no-ops. */
+ if (*amountp == 0)
+ return NULL;
+ }
+
+ return mnem;
+}
+
+
+/* Obtain the shift from the POWER of two. */
+
+static HOST_WIDE_INT
+int_log2 (power)
+ HOST_WIDE_INT power;
+{
+ HOST_WIDE_INT shift = 0;
+
+ while (((((HOST_WIDE_INT) 1) << shift) & power) == 0)
+ {
+ if (shift > 31)
+ abort ();
+ shift++;
+ }
+
+ return shift;
+}
+
+/* Output a .ascii pseudo-op, keeping track of lengths. This is because
+ /bin/as is horribly restrictive. */
+
+void
+output_ascii_pseudo_op (stream, p, len)
+ FILE *stream;
+ unsigned char *p;
+ int len;
+{
+ int i;
+ int len_so_far = 1000;
+ int chars_so_far = 0;
+
+ for (i = 0; i < len; i++)
+ {
+ register int c = p[i];
+
+ if (len_so_far > 50)
+ {
+ if (chars_so_far)
+ fputs ("\"\n", stream);
+ fputs ("\t.ascii\t\"", stream);
+ len_so_far = 0;
+ /* CYGNUS LOCAL */
+ arm_increase_location (chars_so_far);
+ /* END CYGNUS LOCAL */
+ chars_so_far = 0;
+ }
+
+ if (c == '\"' || c == '\\')
+ {
+ putc('\\', stream);
+ len_so_far++;
+ }
+
+ if (c >= ' ' && c < 0177)
+ {
+ putc (c, stream);
+ len_so_far++;
+ }
+ else
+ {
+ fprintf (stream, "\\%03o", c);
+ len_so_far +=4;
+ }
+
+ chars_so_far++;
+ }
+
+ fputs ("\"\n", stream);
+ /* CYGNUS LOCAL */
+ arm_increase_location (chars_so_far);
+ /* END CYGNUS LOCAL */
+}
+
+
+/* Try to determine whether a pattern really clobbers the link register.
+ This information is useful when peepholing, so that lr need not be pushed
+ if we combine a call followed by a return.
+ NOTE: This code does not check for side-effect expressions in a SET_SRC:
+ such a check should not be needed because these only update an existing
+ value within a register; the register must still be set elsewhere within
+ the function. */
+
+static int
+pattern_really_clobbers_lr (x)
+ rtx x;
+{
+ int i;
+
+ switch (GET_CODE (x))
+ {
+ case SET:
+ switch (GET_CODE (SET_DEST (x)))
+ {
+ case REG:
+ return REGNO (SET_DEST (x)) == 14;
+
+ case SUBREG:
+ if (GET_CODE (XEXP (SET_DEST (x), 0)) == REG)
+ return REGNO (XEXP (SET_DEST (x), 0)) == 14;
+
+ if (GET_CODE (XEXP (SET_DEST (x), 0)) == MEM)
+ return 0;
+ abort ();
+
+ default:
+ return 0;
+ }
+
+ case PARALLEL:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ if (pattern_really_clobbers_lr (XVECEXP (x, 0, i)))
+ return 1;
+ return 0;
+
+ case CLOBBER:
+ switch (GET_CODE (XEXP (x, 0)))
+ {
+ case REG:
+ return REGNO (XEXP (x, 0)) == 14;
+
+ case SUBREG:
+ if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG)
+ return REGNO (XEXP (XEXP (x, 0), 0)) == 14;
+ abort ();
+
+ default:
+ return 0;
+ }
+
+ case UNSPEC:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static int
+function_really_clobbers_lr (first)
+ rtx first;
+{
+ rtx insn, next;
+
+ for (insn = first; insn; insn = next_nonnote_insn (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ case NOTE:
+ case CODE_LABEL:
+ case JUMP_INSN: /* Jump insns only change the PC (and conds) */
+ case INLINE_HEADER:
+ break;
+
+ case INSN:
+ if (pattern_really_clobbers_lr (PATTERN (insn)))
+ return 1;
+ break;
+
+ case CALL_INSN:
+ /* Don't yet know how to handle those calls that are not to a
+ SYMBOL_REF */
+ if (GET_CODE (PATTERN (insn)) != PARALLEL)
+ abort ();
+
+ switch (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)))
+ {
+ case CALL:
+ if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn), 0, 0), 0), 0))
+ != SYMBOL_REF)
+ return 1;
+ break;
+
+ case SET:
+ if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn),
+ 0, 0)), 0), 0))
+ != SYMBOL_REF)
+ return 1;
+ break;
+
+ default: /* Don't recognize it, be safe */
+ return 1;
+ }
+
+ /* A call can be made (by peepholing) not to clobber lr iff it is
+ followed by a return. There may, however, be a use insn iff
+ we are returning the result of the call.
+ If we run off the end of the insn chain, then that means the
+ call was at the end of the function. Unfortunately we don't
+ have a return insn for the peephole to recognize, so we
+ must reject this. (Can this be fixed by adding our own insn?) */
+ if ((next = next_nonnote_insn (insn)) == NULL)
+ return 1;
+
+ /* No need to worry about lr if the call never returns */
+ if (GET_CODE (next) == BARRIER)
+ break;
+
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE
+ && (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ && (REGNO (SET_DEST (XVECEXP (PATTERN (insn), 0, 0)))
+ == REGNO (XEXP (PATTERN (next), 0))))
+ if ((next = next_nonnote_insn (next)) == NULL)
+ return 1;
+
+ if (GET_CODE (next) == JUMP_INSN
+ && GET_CODE (PATTERN (next)) == RETURN)
+ break;
+ return 1;
+
+ default:
+ abort ();
+ }
+ }
+
+ /* We have reached the end of the chain so lr was _not_ clobbered */
+ return 0;
+}
+
+char *
+output_return_instruction (operand, really_return, reverse)
+ rtx operand;
+ int really_return;
+ int reverse;
+{
+ char instr[100];
+ int reg, live_regs = 0;
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ return_used_this_function = 1;
+
+ if (volatile_func)
+ {
+ rtx ops[2];
+ /* If this function was declared non-returning, and we have found a tail
+ call, then we have to trust that the called function won't return. */
+ if (! really_return)
+ return "";
+
+ /* Otherwise, trap an attempted return by aborting. */
+ ops[0] = operand;
+ ops[1] = gen_rtx (SYMBOL_REF, Pmode, "abort");
+ assemble_external_libcall (ops[1]);
+ output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
+ return "";
+ }
+
+ if (current_function_calls_alloca && ! really_return)
+ abort();
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs++;
+
+ if (live_regs || (regs_ever_live[14] && ! lr_save_eliminated))
+ live_regs++;
+
+ if (frame_pointer_needed)
+ live_regs += 4;
+
+ if (live_regs)
+ {
+ if (lr_save_eliminated || ! regs_ever_live[14])
+ live_regs++;
+
+ if (frame_pointer_needed)
+ strcpy (instr,
+ reverse ? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
+ else
+ strcpy (instr,
+ reverse ? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ strcat (instr, "%|");
+ strcat (instr, reg_names[reg]);
+ if (--live_regs)
+ strcat (instr, ", ");
+ }
+
+ if (frame_pointer_needed)
+ {
+ strcat (instr, "%|");
+ strcat (instr, reg_names[11]);
+ strcat (instr, ", ");
+ strcat (instr, "%|");
+ strcat (instr, reg_names[13]);
+ strcat (instr, ", ");
+ strcat (instr, "%|");
+ strcat (instr, TARGET_THUMB_INTERWORK || (! really_return)
+ ? reg_names[14] : reg_names[15] );
+ }
+ else
+ {
+ strcat (instr, "%|");
+ if (TARGET_THUMB_INTERWORK && really_return)
+ strcat (instr, reg_names[12]);
+ else
+ strcat (instr, really_return ? reg_names[15] : reg_names[14]);
+ }
+ strcat (instr, (TARGET_APCS_32 || !really_return) ? "}" : "}^");
+ output_asm_insn (instr, &operand);
+
+ if (TARGET_THUMB_INTERWORK && really_return)
+ {
+ strcpy (instr, "bx%?");
+ strcat (instr, reverse ? "%D0" : "%d0");
+ strcat (instr, "\t%|");
+ strcat (instr, frame_pointer_needed ? "lr" : "ip");
+
+ output_asm_insn (instr, & operand);
+ }
+ }
+ else if (really_return)
+ {
+ /* CYGNUS LOCAL unknown */
+ if (operand && GET_MODE_CLASS (GET_MODE (XEXP (operand, 0))) != MODE_CC)
+ output_asm_insn ("ldr%?\t%|ip, %0", & operand);
+ /* END CYGNUS LOCAL */
+
+ if (TARGET_THUMB_INTERWORK)
+ sprintf (instr, "bx%%?%%%s0\t%%|lr", reverse ? "D" : "d");
+ else
+ sprintf (instr, "mov%%?%%%s0%s\t%%|pc, %%|lr",
+ reverse ? "D" : "d", TARGET_APCS_32 ? "" : "s");
+
+ output_asm_insn (instr, & operand);
+ }
+
+ return "";
+}
+
+/* Return nonzero if optimizing and the current function is volatile.
+ Such functions never return, and many memory cycles can be saved
+ by not storing register values that will never be needed again.
+ This optimization was added to speed up context switching in a
+ kernel application. */
+
+int
+arm_volatile_func ()
+{
+ return (optimize > 0 && TREE_THIS_VOLATILE (current_function_decl));
+}
+
+/* CYGNUS LOCAL unknown */
+/* Return the size of the prologue. It's not too bad if we slightly
+ over-estimate. */
+
+static int
+get_prologue_size ()
+{
+ return profile_flag ? 12 : 0;
+}
+/* END CYGNUS LOCAL */
+
+/* The amount of stack adjustment that happens here, in output_return and in
+ output_epilogue must be exactly the same as was calculated during reload,
+ or things will point to the wrong place. The only time we can safely
+ ignore this constraint is when a function has no arguments on the stack,
+ no stack frame requirement and no live registers execpt for `lr'. If we
+ can guarantee that by making all function calls into tail calls and that
+ lr is not clobbered in any other way, then there is no need to push lr
+ onto the stack. */
+
+void
+output_func_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int reg, live_regs_mask = 0;
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ /* Nonzero if we must stuff some register arguments onto the stack as if
+ they were passed there. */
+ int store_arg_regs = 0;
+
+ if (arm_ccfsm_state || arm_target_insn)
+ abort (); /* Sanity check */
+
+ if (arm_naked_function_p (current_function_decl))
+ return;
+
+ return_used_this_function = 0;
+ lr_save_eliminated = 0;
+
+ fprintf (f, "\t%s args = %d, pretend = %d, frame = %d\n",
+ ASM_COMMENT_START, current_function_args_size,
+ current_function_pretend_args_size, frame_size);
+ fprintf (f, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
+ ASM_COMMENT_START, frame_pointer_needed,
+ current_function_anonymous_args);
+
+ if (volatile_func)
+ fprintf (f, "\t%s Volatile function.\n", ASM_COMMENT_START);
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs_mask |= (1 << reg);
+
+ if (frame_pointer_needed)
+ live_regs_mask |= 0xD800;
+ else if (regs_ever_live[14])
+ {
+ if (! current_function_args_size
+ && ! function_really_clobbers_lr (get_insns ()))
+ lr_save_eliminated = 1;
+ else
+ live_regs_mask |= 0x4000;
+ }
+
+ if (live_regs_mask)
+ {
+ /* if a di mode load/store multiple is used, and the base register
+ is r3, then r4 can become an ever live register without lr
+ doing so, in this case we need to push lr as well, or we
+ will fail to get a proper return. */
+
+ live_regs_mask |= 0x4000;
+ lr_save_eliminated = 0;
+
+ }
+
+ if (lr_save_eliminated)
+ fprintf (f,"\t%s I don't think this function clobbers lr\n",
+ ASM_COMMENT_START);
+
+#ifdef AOF_ASSEMBLER
+ if (flag_pic)
+ fprintf (f, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX, REGISTER_PREFIX,
+ reg_names[PIC_OFFSET_TABLE_REGNUM]);
+#endif
+}
+
+
+void
+output_func_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int reg, live_regs_mask = 0;
+ /* CYGNUS LOCAL unknown */
+ int code_size = 0;
+ /* END CYGNUS LOCAL */
+ /* If we need this then it will always be at least this much */
+ int floats_offset = 12;
+ rtx operands[3];
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ if (use_return_insn (FALSE) && return_used_this_function)
+ {
+ if ((frame_size + current_function_outgoing_args_size) != 0
+ /* CYGNUS LOCAL bug fix */
+ && !(frame_pointer_needed && TARGET_APCS))
+ /* END CYGNUS LOCAL */
+ abort ();
+ goto epilogue_done;
+ }
+
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ goto epilogue_done;
+
+ /* A volatile function should never return. Call abort. */
+ if (TARGET_ABORT_NORETURN && volatile_func)
+ {
+ rtx op = gen_rtx (SYMBOL_REF, Pmode, "abort");
+ assemble_external_libcall (op);
+ output_asm_insn ("bl\t%a0", &op);
+ /* CYGNUS LOCAL unknown */
+ code_size = 4;
+ /* END CYGNUS LOCAL */
+ goto epilogue_done;
+ }
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ live_regs_mask |= (1 << reg);
+ floats_offset += 4;
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 23; reg > 15; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ floats_offset += 12;
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX,
+ reg_names[reg], REGISTER_PREFIX, floats_offset);
+ }
+ }
+ else
+ {
+ int start_reg = 23;
+
+ for (reg = 23; reg > 15; reg--)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ floats_offset += 12;
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ /* We can't unstack more than four registers at once */
+ if (start_reg - reg == 3)
+ {
+ fprintf (f, "\tlfm\t%s%s, 4, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg],
+ REGISTER_PREFIX, floats_offset);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ fprintf (f, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg + 1],
+ start_reg - reg, REGISTER_PREFIX, floats_offset);
+ }
+ /* END CYGNUS LOCAL */
+ start_reg = reg - 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ fprintf (f, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg + 1],
+ start_reg - reg, REGISTER_PREFIX, floats_offset);
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ live_regs_mask |= 0x6800;
+ print_multi_reg (f, "ldmea\t%sfp", live_regs_mask, FALSE);
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ /* CYGNUS LOCAL unknown */
+ code_size += 8;
+ /* END CYGNUS LOCAL */
+ }
+ else
+ {
+ live_regs_mask |= 0xA800;
+ print_multi_reg (f, "ldmea\t%sfp", live_regs_mask,
+ TARGET_APCS_32 ? FALSE : TRUE);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ }
+ else
+ {
+ /* Restore stack pointer if necessary. */
+ if (frame_size + current_function_outgoing_args_size != 0)
+ {
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (frame_size
+ + current_function_outgoing_args_size);
+ output_add_immediate (operands);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 16; reg < 24; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX,
+ reg_names[reg], REGISTER_PREFIX);
+ }
+ }
+ else
+ {
+ int start_reg = 16;
+
+ for (reg = 16; reg < 24; reg++)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ if (reg - start_reg == 3)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, 4, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ REGISTER_PREFIX);
+ start_reg = reg + 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ reg - start_reg, REGISTER_PREFIX);
+ }
+
+ start_reg = reg + 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ reg - start_reg, REGISTER_PREFIX);
+ }
+ }
+
+ if (current_function_pretend_args_size == 0 && regs_ever_live[14])
+ {
+ if (TARGET_THUMB_INTERWORK)
+ {
+ /* CYGNUS LOCAL */
+ if (! lr_save_eliminated)
+ live_regs_mask |= 0x4000;
+
+ if (live_regs_mask != 0)
+ {
+ code_size += 4;
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE);
+ }
+ /* END CYGNUS LOCAL */
+
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ }
+ else if (lr_save_eliminated)
+ fprintf (f, (TARGET_APCS_32 ? "\tmov\t%spc, %slr\n"
+ : "\tmovs\t%spc, %slr\n"),
+ REGISTER_PREFIX, REGISTER_PREFIX, f);
+ else
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask | 0x8000,
+ TARGET_APCS_32 ? FALSE : TRUE);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ else
+ {
+ if (live_regs_mask || regs_ever_live[14])
+ {
+ /* Restore the integer regs, and the return address into lr */
+ if (! lr_save_eliminated)
+ live_regs_mask |= 0x4000;
+
+ if (live_regs_mask != 0)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE);
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ /* Unwind the pre-pushed regs */
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (current_function_pretend_args_size);
+ output_add_immediate (operands);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ /* And finally, go home */
+ if (TARGET_THUMB_INTERWORK)
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ else if (TARGET_APCS_32)
+ fprintf (f, "\tmov\t%spc, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX );
+ else
+ fprintf (f, "\tmovs\t%spc, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX );
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ }
+
+epilogue_done:
+
+ /* CYGNUS LOCAL unknown */
+ if (optimize > 0)
+ arm_increase_location (code_size
+ + insn_addresses[INSN_UID (get_last_insn ())]
+ + get_prologue_size ());
+ /* END CYGNUS LOCAL */
+
+ current_function_anonymous_args = 0;
+ after_arm_reorg = 0;
+}
+
+static void
+emit_multi_reg_push (mask)
+ int mask;
+{
+ int num_regs = 0;
+ int i, j;
+ rtx par;
+
+ for (i = 0; i < 16; i++)
+ if (mask & (1 << i))
+ num_regs++;
+
+ if (num_regs == 0 || num_regs > 16)
+ abort ();
+
+ par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (num_regs));
+
+ for (i = 0; i < 16; i++)
+ {
+ if (mask & (1 << i))
+ {
+ XVECEXP (par, 0, 0)
+ = gen_rtx (SET, VOIDmode, gen_rtx (MEM, BLKmode,
+ gen_rtx (PRE_DEC, BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx (UNSPEC, BLKmode,
+ gen_rtvec (1, gen_rtx (REG, SImode, i)),
+ 2));
+ break;
+ }
+ }
+
+ for (j = 1, i++; j < num_regs; i++)
+ {
+ if (mask & (1 << i))
+ {
+ XVECEXP (par, 0, j)
+ = gen_rtx (USE, VOIDmode, gen_rtx (REG, SImode, i));
+ j++;
+ }
+ }
+
+ emit_insn (par);
+}
+
+static void
+emit_sfm (base_reg, count)
+ int base_reg;
+ int count;
+{
+ rtx par;
+ int i;
+
+ par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (par, 0, 0) = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, BLKmode,
+ gen_rtx (PRE_DEC, BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx (UNSPEC, BLKmode,
+ gen_rtvec (1, gen_rtx (REG, XFmode,
+ base_reg++)),
+ 2));
+ for (i = 1; i < count; i++)
+ XVECEXP (par, 0, i) = gen_rtx (USE, VOIDmode,
+ gen_rtx (REG, XFmode, base_reg++));
+
+ emit_insn (par);
+}
+
+void
+arm_expand_prologue ()
+{
+ int reg;
+ rtx amount = GEN_INT (-(get_frame_size ()
+ + current_function_outgoing_args_size));
+ int live_regs_mask = 0;
+ int store_arg_regs = 0;
+ /* CYGNUS LOCAL unknown */
+ int sp_overflow_check = 0;
+ /* END CYGNUS LOCAL */
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (! volatile_func)
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs_mask |= 1 << reg;
+
+ if (! volatile_func && regs_ever_live[14])
+ live_regs_mask |= 0x4000;
+
+ if (frame_pointer_needed)
+ {
+ live_regs_mask |= 0xD800;
+ emit_insn (gen_movsi (gen_rtx (REG, SImode, 12),
+ stack_pointer_rtx));
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size / 4))
+ & 0xf);
+ else
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-current_function_pretend_args_size)));
+ }
+
+ if (live_regs_mask)
+ {
+ /* If we have to push any regs, then we must push lr as well, or
+ we won't get a proper return. */
+ live_regs_mask |= 0x4000;
+ emit_multi_reg_push (live_regs_mask);
+ }
+
+ /* For now the integer regs are still pushed in output_func_epilogue (). */
+
+ if (! volatile_func)
+ {
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 23; reg > 15; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, XFmode,
+ gen_rtx (PRE_DEC, XFmode,
+ stack_pointer_rtx)),
+ gen_rtx (REG, XFmode, reg)));
+ }
+ else
+ {
+ int start_reg = 23;
+
+ for (reg = 23; reg > 15; reg--)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ if (start_reg - reg == 3)
+ {
+ emit_sfm (reg, 4);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (start_reg != reg)
+ emit_sfm (reg + 1, start_reg - reg);
+ start_reg = reg - 1;
+ }
+ }
+
+ if (start_reg != reg)
+ emit_sfm (reg + 1, start_reg - reg);
+ }
+ }
+
+ if (frame_pointer_needed)
+ emit_insn (gen_addsi3 (hard_frame_pointer_rtx, gen_rtx (REG, SImode, 12),
+ (GEN_INT
+ (-(4 + current_function_pretend_args_size)))));
+
+ /* CYGNUS LOCAL */
+ /* The arm vxworks group wants the instructions setting up the frame */
+ /* to be unscheduled or unbroken */
+ if (TARGET_NO_SCHED_PRO)
+ emit_insn (gen_blockage ());
+
+ /* Checking whether the frame amount is zero is not a good enough
+ marker for deciding whether we need to check for stack overflow.
+ We are interested in whether anything has/is being stored on the
+ stack. Since GCC always creates the frame structure at the
+ moment, this is always true. When we add a machine specific flag
+ to allow leaf functions to avoid creating an entry frame we will
+ need to make this conditional (NOTE: This will probably not be a
+ standard feature, since the debugging world may assume that EVERY
+ function has a frame, whereas it is not actually a requirement of
+ the APCS). */
+ if (TARGET_APCS_STACK)
+ {
+ int bound = get_frame_size ();
+
+ /* The software stack overflow handler has two forms. The first
+ is for small stack frames, where 256bytes or less of stack is
+ required:
+ __rt_stkovf_split_small
+
+ The second is for bigger stack frames of more than 256bytes:
+ __rt_stkovf_split_big
+
+ The run-time *MUST* provide these routines when software
+ stack checking is enabled. After calling one of the above
+ routines the fp/r11 and sp/r12 registers do not necessarily
+ point into the same stack chunk. This means that arguments
+ passed on the stack *MUST* be addressed by offsets from
+ fp/r11 and *NOT* from sp/r13. The sl/r10 register should
+ always be at the bottom of the current stack chunk, with at
+ least 256bytes of stack available beneath it (this allows for
+ leaf functions that use less than 256bytes of stack to avoid
+ the stack limit check, aswell as giving the overflow
+ functions some workspace).
+
+ NOTE: The stack-checking APCS does *NOT* cope with alloca(),
+ since the amount of stack required is not known until
+ run-time. Similarly the use of run-time sized vectors causes
+ the same problem. This means that the handler routines
+ should only be used for raising aborts at the moment, and not
+ for providing stack chunk extension.
+
+ TODO: Check code generated for late stack pointer
+ modifications. The APCS allows for these, but a similar
+ stack overflow check and call must be inserted. */
+
+ if (bound < 256)
+ {
+ /* Leaf functions that use less than 256bytes of stack do
+ not need to perform a check: */
+ if (frame_pointer_needed)
+ {
+ /* Stop the prologue being re-ordered: */
+ emit_insn (gen_blockage ());
+ emit_insn (gen_cond_call (stack_pointer_rtx,
+ gen_rtx (REG, SImode, 10),
+ gen_rtx (SYMBOL_REF, Pmode,
+ "*__rt_stkovf_split_small"),
+ gen_rtx (LTU, SImode, 24)));
+ sp_overflow_check = 1;
+ }
+ }
+ else
+ {
+ rtx bamount;
+
+ if (!frame_pointer_needed)
+ abort ();
+
+ if (!const_ok_for_arm ((HOST_WIDE_INT) bound))
+ {
+ /* Find the closest 8bit rotated (by even amount) value
+ above bound: */
+ int count;
+ for (count = 0; ((bound >> count) & ~0xFF); count +=2);
+ bound = (bound & (0xFF << count)) + (1 << count);
+ }
+ bamount = GEN_INT (- bound);
+
+ emit_insn (gen_blockage ()); /* stop prologue being re-ordered */
+ emit_insn (gen_addsi3 (gen_rtx (REG, SImode, 12),
+ stack_pointer_rtx, bamount));
+ emit_insn (gen_cond_call (gen_rtx (REG, SImode, 12),
+ gen_rtx (REG, SImode, 10),
+ gen_rtx (SYMBOL_REF, Pmode,
+ "*__rt_stkovf_split_big"),
+ gen_rtx (LTU, SImode, 24)));
+ sp_overflow_check = 1;
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ if (amount != const0_rtx)
+ {
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, amount));
+ emit_insn (gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (MEM, BLKmode, stack_pointer_rtx)));
+ }
+
+ /* CYGNUS LOCAL */
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. Similarly do not allow instructions
+ to be moved to before the stack overflow check or if the user has
+ requested no scheduling in the prolog. */
+ if (profile_flag || profile_block_flag || sp_overflow_check)
+ emit_insn (gen_blockage ());
+ /* END CYGNUS LOCAL */
+}
+
+
+/* If CODE is 'd', then the X is a condition operand and the instruction
+ should only be executed if the condition is true.
+ if CODE is 'D', then the X is a condition operand and the instruction
+ should only be executed if the condition is false: however, if the mode
+ of the comparison is CCFPEmode, then always execute the instruction -- we
+ do this because in these circumstances !GE does not necessarily imply LT;
+ in these cases the instruction pattern will take care to make sure that
+ an instruction containing %d will follow, thereby undoing the effects of
+ doing this instruction unconditionally.
+ If CODE is 'N' then X is a floating point operand that must be negated
+ before output.
+ If CODE is 'B' then output a bitwise inverted value of X (a const int).
+ If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
+
+void
+arm_print_operand (stream, x, code)
+ FILE *stream;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, stream);
+ return;
+
+ case '|':
+ fputs (REGISTER_PREFIX, stream);
+ return;
+
+ case '?':
+ if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
+ fputs (arm_condition_codes[arm_current_cc], stream);
+ return;
+
+ case 'N':
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ fprintf (stream, "%s", fp_const_from_val (&r));
+ }
+ return;
+
+ case 'B':
+ if (GET_CODE (x) == CONST_INT)
+ fprintf (stream,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "%d",
+#else
+ "%ld",
+#endif
+ ARM_SIGN_EXTEND (~ INTVAL (x)));
+ else
+ {
+ putc ('~', stream);
+ output_addr_const (stream, x);
+ }
+ return;
+
+ case 'i':
+ fprintf (stream, "%s", arithmetic_instr (x, 1));
+ return;
+
+ case 'I':
+ fprintf (stream, "%s", arithmetic_instr (x, 0));
+ return;
+
+ case 'S':
+ {
+ HOST_WIDE_INT val;
+ char *shift = shift_op (x, &val);
+
+ if (shift)
+ {
+ fprintf (stream, ", %s ", shift_op (x, &val));
+ if (val == -1)
+ arm_print_operand (stream, XEXP (x, 1), 0);
+ else
+ fprintf (stream,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "#%d",
+#else
+ "#%ld",
+#endif
+ val);
+ }
+ }
+ return;
+
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], stream);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], stream);
+ return;
+
+ case 'm':
+ fputs (REGISTER_PREFIX, stream);
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ fputs (reg_names[REGNO (XEXP (x, 0))], stream);
+ else
+ fputs (reg_names[REGNO (XEXP (XEXP (x, 0), 0))], stream);
+ return;
+
+ case 'M':
+ fprintf (stream, "{%s%s-%s%s}", REGISTER_PREFIX, reg_names[REGNO (x)],
+ REGISTER_PREFIX, reg_names[REGNO (x) - 1
+ + ((GET_MODE_SIZE (GET_MODE (x))
+ + GET_MODE_SIZE (SImode) - 1)
+ / GET_MODE_SIZE (SImode))]);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (arm_condition_codes[get_arm_condition_code (x)],
+ stream);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
+ (get_arm_condition_code (x))],
+ stream);
+ return;
+
+ default:
+ if (x == 0)
+ abort ();
+
+ if (GET_CODE (x) == REG)
+ {
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x)], stream);
+ }
+ else if (GET_CODE (x) == MEM)
+ {
+ output_memory_reference_mode = GET_MODE (x);
+ output_address (XEXP (x, 0));
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ fprintf (stream, "#%s", fp_immediate_constant (x));
+ else if (GET_CODE (x) == NEG)
+ abort (); /* This should never happen now. */
+ else
+ {
+ fputc ('#', stream);
+ output_addr_const (stream, x);
+ }
+ }
+}
+
+/* CYGNUS LOCAL unknown */
+/* Increase the `arm_text_location' by AMOUNT if we're in the text
+ segment. */
+
+void
+arm_increase_location (amount)
+ int amount;
+{
+ if (in_text_section ())
+ arm_text_location += amount;
+}
+
+
+/* Output a label definition. If this label is within the .text segment, it
+ is stored in OFFSET_TABLE, to be used when building `llc' instructions.
+ Maybe GCC remembers names not starting with a `*' for a long time, but this
+ is a minority anyway, so we just make a copy. Do not store the leading `*'
+ if the name starts with one. */
+
+void
+arm_asm_output_label (stream, name)
+ FILE * stream;
+ char * name;
+{
+ char * real_name;
+ char * s;
+ struct label_offset *cur;
+ int hash = 0;
+
+ assemble_name (stream, name);
+ fputs (":\n", stream);
+
+ if (! in_text_section ())
+ return;
+
+ if (name[0] == '*')
+ {
+ real_name = xmalloc (1 + strlen (&name[1]));
+ strcpy (real_name, &name[1]);
+ }
+ else
+ {
+ real_name = xmalloc (2 + strlen (name));
+ strcpy (real_name, user_label_prefix);
+ strcat (real_name, name);
+ }
+ for (s = real_name; *s; s++)
+ hash += *s;
+
+ hash = hash % LABEL_HASH_SIZE;
+ cur = (struct label_offset *) xmalloc (sizeof (struct label_offset));
+ cur->name = real_name;
+ cur->offset = arm_text_location;
+ cur->cdr = offset_table[hash];
+ offset_table[hash] = cur;
+}
+/* END CYGNUS LOCAL */
+
+/* A finite state machine takes care of noticing whether or not instructions
+ can be conditionally executed, and thus decrease execution time and code
+ size by deleting branch instructions. The fsm is controlled by
+ final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
+
+/* The state of the fsm controlling condition codes are:
+ 0: normal, do nothing special
+ 1: make ASM_OUTPUT_OPCODE not output this instruction
+ 2: make ASM_OUTPUT_OPCODE not output this instruction
+ 3: make instructions conditional
+ 4: make instructions conditional
+
+ State transitions (state->state by whom under condition):
+ 0 -> 1 final_prescan_insn if the `target' is a label
+ 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
+ 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
+ (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
+ 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
+ (the target insn is arm_target_insn).
+
+ If the jump clobbers the conditions then we use states 2 and 4.
+
+ A similar thing can be done with conditional return insns.
+
+ XXX In case the `target' is an unconditional branch, this conditionalising
+ of the instructions always reduces code size, but not always execution
+ time. But then, I want to reduce the code size to somewhere near what
+ /bin/cc produces. */
+
+/* Returns the index of the ARM condition code string in
+ `arm_condition_codes'. COMPARISON should be an rtx like
+ `(eq (...) (...))'. */
+
+static enum arm_cond_code
+get_arm_condition_code (comparison)
+ rtx comparison;
+{
+ enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
+ register int code;
+ register enum rtx_code comp_code = GET_CODE (comparison);
+
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
+ XEXP (comparison, 1));
+
+ switch (mode)
+ {
+ case CC_DNEmode: code = ARM_NE; goto dominance;
+ case CC_DEQmode: code = ARM_EQ; goto dominance;
+ case CC_DGEmode: code = ARM_GE; goto dominance;
+ case CC_DGTmode: code = ARM_GT; goto dominance;
+ case CC_DLEmode: code = ARM_LE; goto dominance;
+ case CC_DLTmode: code = ARM_LT; goto dominance;
+ case CC_DGEUmode: code = ARM_CS; goto dominance;
+ case CC_DGTUmode: code = ARM_HI; goto dominance;
+ case CC_DLEUmode: code = ARM_LS; goto dominance;
+ case CC_DLTUmode: code = ARM_CC;
+
+ dominance:
+ if (comp_code != EQ && comp_code != NE)
+ abort ();
+
+ if (comp_code == EQ)
+ return ARM_INVERSE_CONDITION_CODE (code);
+ return code;
+
+ case CC_NOOVmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_PL;
+ case LT: return ARM_MI;
+ default: abort ();
+ }
+
+ case CC_Zmode:
+ case CCFPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ default: abort ();
+ }
+
+ case CCFPEmode:
+ switch (comp_code)
+ {
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LS;
+ case LT: return ARM_MI;
+ default: abort ();
+ }
+
+ case CC_SWPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_LE;
+ case GT: return ARM_LT;
+ case LE: return ARM_GE;
+ case LT: return ARM_GT;
+ case GEU: return ARM_LS;
+ case GTU: return ARM_CC;
+ case LEU: return ARM_CS;
+ case LTU: return ARM_HI;
+ default: abort ();
+ }
+
+ case CC_Cmode:
+ switch (comp_code)
+ {
+ case LTU: return ARM_CS;
+ case GEU: return ARM_CC;
+ default: abort ();
+ }
+
+ case CCmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LE;
+ case LT: return ARM_LT;
+ case GEU: return ARM_CS;
+ case GTU: return ARM_HI;
+ case LEU: return ARM_LS;
+ case LTU: return ARM_CC;
+ default: abort ();
+ }
+
+ default: abort ();
+ }
+
+ abort ();
+}
+
+
+void
+final_prescan_insn (insn, opvec, noperands)
+ rtx insn;
+ rtx *opvec;
+ int noperands;
+{
+ /* BODY will hold the body of INSN. */
+ register rtx body = PATTERN (insn);
+
+ /* This will be 1 if trying to repeat the trick, and things need to be
+ reversed if it appears to fail. */
+ int reverse = 0;
+
+ /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
+ taken are clobbered, even if the rtl suggests otherwise. It also
+ means that we have to grub around within the jump expression to find
+ out what the conditions are when the jump isn't taken. */
+ int jump_clobbers = 0;
+
+ /* If we start with a return insn, we only succeed if we find another one. */
+ int seeking_return = 0;
+
+ /* START_INSN will hold the insn from where we start looking. This is the
+ first insn after the following code_label if REVERSE is true. */
+ rtx start_insn = insn;
+
+ /* If in state 4, check if the target branch is reached, in order to
+ change back to state 0. */
+ if (arm_ccfsm_state == 4)
+ {
+ if (insn == arm_target_insn)
+ {
+ arm_target_insn = NULL;
+ arm_ccfsm_state = 0;
+ }
+ return;
+ }
+
+ /* If in state 3, it is possible to repeat the trick, if this insn is an
+ unconditional branch to a label, and immediately following this branch
+ is the previous target label which is only used once, and the label this
+ branch jumps to is not too far off. */
+ if (arm_ccfsm_state == 3)
+ {
+ if (simplejump_p (insn))
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ {
+ /* XXX Isn't this always a barrier? */
+ start_insn = next_nonnote_insn (start_insn);
+ }
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ reverse = TRUE;
+ else
+ return;
+ }
+ else if (GET_CODE (body) == RETURN)
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ {
+ reverse = TRUE;
+ seeking_return = 1;
+ }
+ else
+ return;
+ }
+ else
+ return;
+ }
+
+ if (arm_ccfsm_state != 0 && !reverse)
+ abort ();
+ if (GET_CODE (insn) != JUMP_INSN)
+ return;
+
+ /* This jump might be paralleled with a clobber of the condition codes
+ the jump should always come first */
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
+ body = XVECEXP (body, 0, 0);
+
+#if 0
+ /* If this is a conditional return then we don't want to know */
+ if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
+ && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN
+ || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN))
+ return;
+#endif
+
+ if (reverse
+ || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
+ {
+ int insns_skipped;
+ int fail = FALSE, succeed = FALSE;
+ /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
+ int then_not_else = TRUE;
+ rtx this_insn = start_insn, label = 0;
+
+ if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
+ {
+ /* The code below is wrong for these, and I haven't time to
+ fix it now. So we just do the safe thing and return. This
+ whole function needs re-writing anyway. */
+ jump_clobbers = 1;
+ return;
+ }
+
+ /* Register the insn jumped to. */
+ if (reverse)
+ {
+ if (!seeking_return)
+ label = XEXP (SET_SRC (body), 0);
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
+ label = XEXP (XEXP (SET_SRC (body), 1), 0);
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
+ {
+ label = XEXP (XEXP (SET_SRC (body), 2), 0);
+ then_not_else = FALSE;
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
+ seeking_return = 1;
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
+ {
+ seeking_return = 1;
+ then_not_else = FALSE;
+ }
+ else
+ abort ();
+
+ /* See how many insns this branch skips, and what kind of insns. If all
+ insns are okay, and the label or unconditional branch to the same
+ label is not too far away, succeed. */
+ for (insns_skipped = 0;
+ !fail && !succeed && insns_skipped++ < max_insns_skipped;)
+ {
+ rtx scanbody;
+
+ this_insn = next_nonnote_insn (this_insn);
+ if (!this_insn)
+ break;
+
+ switch (GET_CODE (this_insn))
+ {
+ case CODE_LABEL:
+ /* Succeed if it is the target label, otherwise fail since
+ control falls in from somewhere else. */
+ if (this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case BARRIER:
+ /* Succeed if the following insn is the target label.
+ Otherwise fail.
+ If return insns are used then the last insn in a function
+ will be a barrier. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case CALL_INSN:
+ /* If using 32-bit addresses the cc is not preserved over
+ calls */
+ if (TARGET_APCS_32)
+ {
+ /* Succeed if the following insn is the target label,
+ or if the following two insns are a barrier and
+ the target label. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && GET_CODE (this_insn) == BARRIER)
+ this_insn = next_nonnote_insn (this_insn);
+
+ if (this_insn && this_insn == label
+ && insns_skipped < max_insns_skipped)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ }
+ break;
+
+ case JUMP_INSN:
+ /* If this is an unconditional branch to the same label, succeed.
+ If it is to another label, do nothing. If it is conditional,
+ fail. */
+ /* XXX Probably, the tests for SET and the PC are unnecessary. */
+
+ scanbody = PATTERN (this_insn);
+ if (GET_CODE (scanbody) == SET
+ && GET_CODE (SET_DEST (scanbody)) == PC)
+ {
+ if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
+ && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
+ fail = TRUE;
+ }
+ /* Fail if a conditional return is undesirable (eg on a
+ StrongARM), but still allow this if optimizing for size. */
+ else if (GET_CODE (scanbody) == RETURN
+ && ! use_return_insn (TRUE)
+ && ! optimize_size)
+ fail = TRUE;
+ else if (GET_CODE (scanbody) == RETURN
+ && seeking_return)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (scanbody) == PARALLEL)
+ {
+ switch (get_attr_conds (this_insn))
+ {
+ case CONDS_NOCOND:
+ break;
+ default:
+ fail = TRUE;
+ break;
+ }
+ }
+ break;
+
+ case INSN:
+ /* Instructions using or affecting the condition codes make it
+ fail. */
+ scanbody = PATTERN (this_insn);
+ if (! (GET_CODE (scanbody) == SET
+ || GET_CODE (scanbody) == PARALLEL)
+ || get_attr_conds (this_insn) != CONDS_NOCOND)
+ fail = TRUE;
+ break;
+
+ default:
+ break;
+ }
+ }
+ if (succeed)
+ {
+ if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
+ arm_target_label = CODE_LABEL_NUMBER (label);
+ else if (seeking_return || arm_ccfsm_state == 2)
+ {
+ while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
+ {
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && (GET_CODE (this_insn) == BARRIER
+ || GET_CODE (this_insn) == CODE_LABEL))
+ abort ();
+ }
+ if (!this_insn)
+ {
+ /* Oh, dear! we ran off the end.. give up */
+ recog (PATTERN (insn), insn, NULL_PTR);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ return;
+ }
+ arm_target_insn = this_insn;
+ }
+ else
+ abort ();
+ if (jump_clobbers)
+ {
+ if (reverse)
+ abort ();
+ arm_current_cc =
+ get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
+ 0), 0), 1));
+ if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ else
+ {
+ /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
+ what it was. */
+ if (!reverse)
+ arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
+ 0));
+ }
+
+ if (reverse || then_not_else)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ /* restore recog_operand (getting the attributes of other insns can
+ destroy this array, but final.c assumes that it remains intact
+ across this call; since the insn has been recognized already we
+ call recog direct). */
+ recog (PATTERN (insn), insn, NULL_PTR);
+ }
+}
+
+#ifdef AOF_ASSEMBLER
+/* Special functions only needed when producing AOF syntax assembler. */
+
+rtx aof_pic_label = NULL_RTX;
+struct pic_chain
+{
+ struct pic_chain *next;
+ char *symname;
+};
+
+static struct pic_chain *aof_pic_chain = NULL;
+
+rtx
+aof_pic_entry (x)
+ rtx x;
+{
+ struct pic_chain **chainp;
+ int offset;
+
+ if (aof_pic_label == NULL_RTX)
+ {
+ /* This needs to persist throughout the compilation. */
+ end_temporary_allocation ();
+ aof_pic_label = gen_rtx (SYMBOL_REF, Pmode, "x$adcons");
+ resume_temporary_allocation ();
+ }
+
+ for (offset = 0, chainp = &aof_pic_chain; *chainp;
+ offset += 4, chainp = &(*chainp)->next)
+ if ((*chainp)->symname == XSTR (x, 0))
+ return plus_constant (aof_pic_label, offset);
+
+ *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
+ (*chainp)->next = NULL;
+ (*chainp)->symname = XSTR (x, 0);
+ return plus_constant (aof_pic_label, offset);
+}
+
+void
+aof_dump_pic_table (f)
+ FILE *f;
+{
+ struct pic_chain *chain;
+
+ if (aof_pic_chain == NULL)
+ return;
+
+ fprintf (f, "\tAREA |%s$$adcons|, BASED %s%s\n",
+ reg_names[PIC_OFFSET_TABLE_REGNUM], REGISTER_PREFIX,
+ reg_names[PIC_OFFSET_TABLE_REGNUM]);
+ fputs ("|x$adcons|\n", f);
+
+ for (chain = aof_pic_chain; chain; chain = chain->next)
+ {
+ fputs ("\tDCD\t", f);
+ assemble_name (f, chain->symname);
+ fputs ("\n", f);
+ }
+}
+
+int arm_text_section_count = 1;
+
+char *
+aof_text_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ if (flag_pic)
+ strcat (buf, ", PIC, REENTRANT");
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare
+ a function as imported near the beginning of the file, and then to
+ export it later on. It is, however, possible to delay the decision
+ until all the functions in the file have been compiled. To get
+ around this, we maintain a list of the imports and exports, and
+ delete from it any that are subsequently defined. At the end of
+ compilation we spit the remainder of the list out before the END
+ directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+int arm_main_function = 0;
+
+void
+aof_dump_imports (f)
+ FILE *f;
+{
+ /* The AOF assembler needs this to cause the startup code to be extracted
+ from the library. Brining in __main causes the whole thing to work
+ automagically. */
+ if (arm_main_function)
+ {
+ text_section ();
+ fputs ("\tIMPORT __main\n", f);
+ fputs ("\tDCD __main\n", f);
+ }
+
+ /* Now dump the remaining imports. */
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif /* AOF_ASSEMBLER */
+
+/* CYGNUS LOCAL */
+
+/* Return non-zero if X is a symbolic operand (contains a SYMBOL_REF). */
+int
+symbolic_operand (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case CONST_DOUBLE:
+ case CONST:
+ case MEM:
+ case PLUS:
+ return symbolic_operand (mode, XEXP (x, 0));
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Handle a special case when computing the offset
+ of an argument from the frame pointer. */
+int
+arm_debugger_arg_offset (value, addr)
+ int value;
+ struct rtx_def * addr;
+{
+ rtx insn;
+
+ /* We are only interested if dbxout_parms() failed to compute the offset. */
+ if (value != 0)
+ return 0;
+
+ /* We can only cope with the case where the address is held in a register. */
+ if (GET_CODE (addr) != REG)
+ return 0;
+
+ /* If we are using the frame pointer to point at the argument, then an offset of 0 is correct. */
+ if (REGNO (addr) == HARD_FRAME_POINTER_REGNUM)
+ return 0;
+
+ /* Oh dear. The argument is pointed to by a register rather
+ than being held in a register, or being stored at a known
+ offset from the frame pointer. Since GDB only understands
+ those two kinds of argument we must translate the address
+ held in the register into an offset from the frame pointer.
+ We do this by searching through the insns for the function
+ looking to see where this register gets its value. If the
+ register is initialised from the frame pointer plus an offset
+ then we are in luck and we can continue, otherwise we give up.
+
+ This code is exercised by producing debugging information
+ for a function with arguments like this:
+
+ double func (double a, double b, int c, double d) {return d;}
+
+ Without this code the stab for parameter 'd' will be set to
+ an offset of 0 from the frame pointer, rather than 8. */
+
+ /* The if() statement says:
+
+ If the insn is a normal instruction
+ and if the insn is setting the value in a register
+ and if the register being set is the register holding the address of the argument
+ and if the address is computing by an addition
+ that involves adding to a register
+ which is the frame pointer
+ a constant integer
+
+ then... */
+
+ for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
+ {
+ if ( GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
+ && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == HARD_FRAME_POINTER_REGNUM
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
+ )
+ {
+ value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
+
+ break;
+ }
+ }
+
+ if (value == 0)
+ {
+ warning ("Unable to compute real location of stacked parameter" );
+ value = 8; /* XXX magic hack */
+ }
+
+ return value;
+}
+
+/* Return nonzero if this insn is a call insn. */
+
+static int
+is_call_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == CALL_INSN)
+ return 1;
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == CALL_INSN)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if this insn, which is known to occur after a call insn,
+ will not stop the call from being interpreted as a tail call. */
+
+static int
+is_safe_after_call_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == NOTE)
+ return 1;
+
+ if (GET_CODE (insn) == INSN)
+ {
+ rtx pattern = PATTERN (insn);
+
+ if (GET_CODE (pattern) == USE)
+ return 1;
+
+ /* Special case: Assignment of the result of the call that
+ has just been made to the return value for this function
+ will result in a move from the result register to itself.
+ Detect this case and rely upon the fact that a later pass
+ will eliminate this redundant move. */
+
+ if (GET_CODE (pattern) == SET
+ && GET_CODE (SET_SRC (pattern)) == REG
+ && GET_CODE (SET_DEST (pattern)) == REG
+ && REGNO (SET_SRC (pattern)) == REGNO (SET_DEST (pattern)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return nonzero if this function is suitable for a tail call optimisation. */
+
+int
+can_tail_call_optimise ()
+{
+ rtx insn;
+ int found_call = 0;
+
+ /* Functions that need frames cannot have tail call optimisations applied. */
+ if (get_frame_size() > 0
+ || current_function_anonymous_args)
+ return 0;
+
+ /* Functions that perform more than one function call,
+ or that perform some computation after their only
+ function call cannot be optimised either. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (is_call_insn (insn))
+ {
+ if (found_call)
+ return 0;
+ else
+ found_call = 1;
+ }
+ else if (found_call)
+ {
+ if (! is_safe_after_call_insn (insn))
+ return 0;
+ }
+ }
+
+ /* Repeat the tests for the insns in the epilogue list. */
+ for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
+ {
+ if (is_call_insn (insn))
+ {
+ if (found_call)
+ return 0;
+ else
+ found_call = 1;
+ }
+ else if (found_call)
+ {
+ if (! is_safe_after_call_insn (insn))
+ return 0;
+ }
+ }
+
+ return found_call;
+}
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL nickc */
+int
+ok_integer_or_other (operand)
+ rtx operand;
+{
+ if (GET_CODE (operand) == CONST_INT)
+ {
+ if (const_ok_for_arm (INTVAL (operand))
+ || const_ok_for_arm (~INTVAL (operand)))
+ return 1;
+ return 0;
+ }
+
+ return 1;
+}
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/config/arm/arm.h b/gcc_arm/config/arm/arm.h
new file mode 100755
index 0000000..6429c3d
--- /dev/null
+++ b/gcc_arm/config/arm/arm.h
@@ -0,0 +1,2218 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999, 2002 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode (arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((NAMED) \
+ ? ((CUM) >= 16 ? 0 : gen_rtx (REG, MODE, (CUM) / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM) < 16 && 16 < (CUM) + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM) / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) ? 4 : 0))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+}
+#endif
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+int s_register_operand (/* register rtx op, enum machine_mode mode */);
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm.md b/gcc_arm/config/arm/arm.md
new file mode 100755
index 0000000..77f98e3
--- /dev/null
+++ b/gcc_arm/config/arm/arm.md
@@ -0,0 +1,6496 @@
+;;- Machine description for Advanced RISC Machines' ARM for GNU compiler
+;; Copyright (C) 1991, 93-98, 1999, 2002 Free Software Foundation, Inc.
+;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+;; and Martin Simmons (@harleqn.co.uk).
+;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; There are patterns in this file to support XFmode arithmetic.
+;; Unfortunately RISC iX doesn't work well with these so they are disabled.
+;; (See arm.h)
+
+;; UNSPEC Usage:
+;; 0 `sin' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 1 `cos' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 2 `push multiple' operation: operand 0 is the first register. Subsequent
+;; registers are in parallel (use...) expressions.
+;; 3 A symbol that has been treated properly for pic usage, that is, we
+;; will add the pic_register value to it before trying to dereference it.
+;; Note: sin and cos are no-longer used.
+
+;; Attributes
+
+; PROG_MODE attribute is used to determine whether condition codes are
+; clobbered by a call insn: they are if in prog32 mode. This is controlled
+; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
+(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
+
+(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
+
+; Floating Point Unit. If we only have floating point emulation, then there
+; is no point in scheduling the floating point insns. (Well, for best
+; performance we should try and group them together).
+
+(define_attr "fpu" "fpa,fpe2,fpe3" (const (symbol_ref "arm_fpu_attr")))
+
+; LENGTH of an instruction (in bytes)
+(define_attr "length" "" (const_int 4))
+
+; An assembler sequence may clobber the condition codes without us knowing
+(define_asm_attributes
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")])
+
+; TYPE attribute is used to detect floating point instructions which, if
+; running on a co-processor can run in parallel with other, basic instructions
+; If write-buffer scheduling is enabled then it can also be used in the
+; scheduling of writes.
+
+; Classification of each insn
+; normal any data instruction that doesn't hit memory or fp regs
+; mult a multiply instruction
+; block blockage insn, this blocks all functional units
+; float a floating point arithmetic operation (subject to expansion)
+; fdivx XFmode floating point division
+; fdivd DFmode floating point division
+; fdivs SFmode floating point division
+; fmul Floating point multiply
+; ffmul Fast floating point multiply
+; farith Floating point arithmetic (4 cycle)
+; ffarith Fast floating point arithmetic (2 cycle)
+; float_em a floating point arithmetic operation that is normally emulated
+; even on a machine with an fpa.
+; f_load a floating point load from memory
+; f_store a floating point store to memory
+; f_mem_r a transfer of a floating point register to a real reg via mem
+; r_mem_f the reverse of f_mem_r
+; f_2_r fast transfer float to arm (no memory needed)
+; r_2_f fast transfer arm to float
+; call a subroutine call
+; load any load from memory
+; store1 store 1 word to memory from arm registers
+; store2 store 2 words
+; store3 store 3 words
+; store4 store 4 words
+;
+(define_attr "type"
+ "normal,mult,block,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith,float_em,f_load,f_store,f_mem_r,r_mem_f,f_2_r,r_2_f,call,load,store1,store2,store3,store4"
+ (const_string "normal"))
+
+;; CYGNUS LOCAL load scheduling
+; Load scheduling, set from the arm_ld_sched variable
+; initialised by arm_override_options()
+(define_attr "ldsched" "no,yes"
+ (const (symbol_ref "arm_ld_sched")))
+;; END CYGNUS LOCAL
+
+; condition codes: this one is used by final_prescan_insn to speed up
+; conditionalizing instructions. It saves having to scan the rtl to see if
+; it uses or alters the condition codes.
+
+; USE means that the condition codes are used by the insn in the process of
+; outputting code, this means (at present) that we can't use the insn in
+; inlined branches
+
+; SET means that the purpose of the insn is to set the condition codes in a
+; well defined manner.
+
+; CLOB means that the condition codes are altered in an undefined manner, if
+; they are altered at all
+
+; JUMP_CLOB is used when the conditions are not defined if a branch is taken,
+; but are if the branch wasn't taken; the effect is to limit the branch
+; elimination scanning.
+
+; NOCOND means that the condition codes are neither altered nor affect the
+; output of this insn
+
+(define_attr "conds" "use,set,clob,jump_clob,nocond"
+ (if_then_else (eq_attr "type" "call")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_string "clob") (const_string "nocond"))
+ (const_string "nocond")))
+
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrent modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes" (const (symbol_ref "arm_is_6_or_7")))
+
+(define_attr "write_conflict" "no,yes"
+ (if_then_else (eq_attr "type"
+ "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
+ (const_string "yes")
+ (const_string "no")))
+
+(define_attr "core_cycles" "single,multi"
+ (if_then_else (eq_attr "type"
+ "normal,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith")
+ (const_string "single")
+ (const_string "multi")))
+
+; The write buffer on some of the arm6 processors is hard to model exactly.
+; There is room in the buffer for up to two addresses and up to eight words
+; of memory, but the two needn't be split evenly. When writing the two
+; addresses are fully pipelined. However, a read from memory that is not
+; currently in the cache will block until the writes have completed.
+; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
+; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
+; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
+; cycle to add as well.
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivx")) 71 69)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivd")) 59 57)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivs")) 31 29)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fmul")) 9 7)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffmul")) 6 4)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "farith")) 4 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffarith")) 2 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "r_2_f")) 5 3)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_2_r")) 1 2)
+
+;; The fpa10 doesn't really have a memory read unit, but it can start to
+;; speculatively execute the instruction in the pipeline, provided the data
+;; is already loaded, so pretend reads have a delay of 2 (and that the
+;; pipeline is infinite.
+
+(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_load")) 3 1)
+
+;;--------------------------------------------------------------------
+;; Write buffer
+;;--------------------------------------------------------------------
+;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1,r_mem_f")) 5 3)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 4)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 5)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")) 11 6)
+
+;;--------------------------------------------------------------------
+;; Write blockage unit
+;;--------------------------------------------------------------------
+;; The write_blockage unit models (partially), the fact that reads will stall
+;; until the write buffer empties.
+;; The f_mem_r and r_mem_f could also block, but they are to the stack,
+;; so we don't model them here
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")) 5 5
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 7
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 9
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes") (eq_attr "type" "store4")) 11 11
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "write_conflict" "yes")) 1 1)
+
+;;--------------------------------------------------------------------
+;; Core unit
+;;--------------------------------------------------------------------
+;; Everything must spend at least one cycle in the core unit
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load")) 3 3)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")) 6 6)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r")) 7 7)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "no") (eq_attr "type" "mult")) 16 16)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "no"))
+ (eq_attr "type" "mult")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "yes"))
+ (eq_attr "type" "mult")) 3 2)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store2") 3 3)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store3") 4 4)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store4") 5 5)
+
+;; CYGNUS LOCAL
+;; APCS support: When generating code for the software stack checking
+;; model, we need to be able to perform calls to the special exception
+;; handler routines. These routines are *NOT* APCS conforming, so we
+;; do not need to mark any registers as clobbered over the call other
+;; than the lr/r14 modified by the actual BL instruction. Rather than
+;; trying to force the RTL for the existing comparison and call to
+;; achieve this, we simply have a pattern that does the desired job.
+
+;; TODO: This is not ideal since it does not specify all of the
+;; operators involved:
+;; cmp %op0,%op1 cmpsi_insn (compare)
+;; bl%op3 %op2 call_value_symbol (call)
+;; Unfortunately since we do not go through the normal arm_ccfsm_state
+;; processing we cannot use the %? operand replacment for the BL
+;; condition.
+
+(define_insn "cond_call"
+ [(compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "" "X")
+ (match_operator 3 "comparison_operator" [(reg:CC 24) (const_int 0)])
+ (clobber (reg:CC 24))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[2]) == SYMBOL_REF && GET_CODE (operands[3]) == LTU"
+ "cmp\\t%0, %1\;bllt\\t%a2"
+[(set_attr "conds" "clob")
+ (set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; END CYGNUS LOCAL
+
+;; Note: For DImode insns, there is normally no reason why operands should
+;; not be in the same register, what we don't want is for something being
+;; written to partially overlap something that is an input.
+
+;; Addition insns.
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %Q2\;adc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*addsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))]
+ ""
+ "@
+ add%?\\t%0, %1, %2
+ sub%?\\t%0, %1, #%n2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_insn "*addsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (const_int 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+;; The next four insns work because they compare the result with one of
+;; the operands, and we know that the use of the condition code is
+;; either GEU or LTU, so we can use the carry flag from the addition
+;; instead of doing the compare a second time.
+(define_insn "*addsi3_compare_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare_op2"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 2)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op0"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 1)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_carryin"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt1"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (ltu:SI (reg:CC_C 24) (const_int 0))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ add%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+; If a constant is too big to fit in a single instruction then the constant
+; will be pre-loaded into a register taking at least two insns, we might be
+; able to merge it with an add, but it depends on the exact value.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "!(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
+ "
+{
+ unsigned int val = (unsigned) INTVAL (operands[2]);
+ int i;
+ unsigned int temp;
+
+ /* this code is similar to the approach followed in movsi, but it must
+ generate exactly two insns */
+
+ for (i = 30; i >= 0; i -= 2)
+ {
+ if (val & (3 << i))
+ {
+ i -= 6;
+ if (i < 0) i = 0;
+ if (const_ok_for_arm (temp = (val & ~(255 << i))))
+ {
+ val &= 255 << i;
+ break;
+ }
+ /* we might be able to do this as (larger number - small number) */
+ temp = ((val >> i) & 255) + 1;
+ if (temp > 255 && i < 24)
+ {
+ i += 2;
+ temp = ((val >> i) & 255) + 1;
+ }
+ if (const_ok_for_arm ((temp << i) - val))
+ {
+ i = temp << i;
+ temp = (unsigned) - (int) (i - val);
+ val = i;
+ break;
+ }
+ FAIL;
+ }
+ }
+ /* if we got here, we have found a way of doing it in two instructions.
+ the two constants are in val and temp */
+ operands[2] = GEN_INT ((int)val);
+ operands[3] = GEN_INT ((int)temp);
+}
+")
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "f,f")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?s\\t%0, %1, %2
+ suf%?s\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f,f")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f,f"))
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "addxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (plus:XF (match_operand:XF 1 "s_register_operand" "f,f")
+ (match_operand:XF 2 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ adf%?e\\t%0, %1, %2
+ suf%?e\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0")
+ (match_operand:DI 2 "s_register_operand" "r,0,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %Q2\;sbc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_sesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "r,0")
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %1, %2\;rsc\\t%R0, %1, %1"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*subsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
+ ""
+ "@
+ rsb%?\\t%0, %2, %1
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[1]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2], 0);
+ DONE;
+")
+
+(define_insn "*subsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ sub%?s\\t%0, %1, %2
+ rsb%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ ""
+ "@
+ sub%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "*,8")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (minus:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?s\\t%0, %1, %2
+ rsf%?s\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f,f"))))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "subxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (minus:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ suf%?e\\t%0, %1, %2
+ rsf%?e\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+;; Multiplication insns
+
+;; Use `&' and then `0' to prevent the operands 0 and 1 being the same
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0")))]
+ ""
+ "mul%?\\t%0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r"))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+;; Unnamed templates to match MLA instruction.
+
+(define_insn "*mulsi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))]
+ ""
+ "mla%?\\t%0, %2, %1, %3"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI (mult:SI (match_dup 2) (match_dup 1))
+ (match_dup 3)))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "smull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "umull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "smulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "smull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "umull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "fml%?s\\t%0, %1, %2"
+[(set_attr "type" "ffmul")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "mulxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mult:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "muf%?e\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+;; Division insns
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (div:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ fdv%?s\\t%0, %1, %2
+ frd%?s\\t%0, %2, %1"
+[(set_attr "type" "fdivs")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ dvf%?d\\t%0, %1, %2
+ rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "divxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (div:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ dvf%?e\\t%0, %1, %2
+ rdf%?e\\t%0, %2, %1"
+[(set_attr "type" "fdivx")])
+
+;; Modulo insns
+
+(define_insn "modsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mod:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?s\\t%0, %1, %2"
+[(set_attr "type" "fdivs")])
+
+(define_insn "moddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "modxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mod:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "rmf%?e\\t%0, %1, %2"
+[(set_attr "type" "fdivx")])
+
+;; Boolean and,ior,xor insns
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %Q2\;and%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_sesdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;and%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,K,?n")))]
+ ""
+ "@
+ and%?\\t%0, %1, %2
+ bic%?\\t%0, %1, #%B2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (~ INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*andsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (and:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ and%?s\\t%0, %1, %2
+ bic%?s\\t%0, %1, #%B2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r"))]
+ ""
+ "@
+ tst%?\\t%0, %1
+ bic%?s\\t%3, %0, #%B1"
+[(set_attr "conds" "set")])
+
+(define_insn "*zeroextractsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))]
+ "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8
+ && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"tst%?\\t%0, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")])
+
+;; ??? This pattern does not work because it does not check for start+length
+;; less than or equal to 8. This is necessary for the bitfield to fit within
+;; a single byte. This pattern was deleted Feb 25, 1999 in egcs, so we can
+;; just disabled it for 99r1.
+
+(define_insn "*zeroextractqi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:QI 0 "memory_operand" "m")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 3 "=r"))]
+ "0 && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 8
+ && INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"ldr%?b\\t%3, %0\", operands);
+ output_asm_insn (\"tst%?\\t%3, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+;;; ??? This pattern is bogus. If operand3 has bits outside the range
+;;; represented by the bitfield, then this will produce incorrect results.
+;;; Somewhere, the value needs to be truncated. On targets like the m68k,
+;;; which have a real bitfield insert instruction, the truncation happens
+;;; in the bitfield insert instruction itself. Since arm does not have a
+;;; bitfield insert instruction, we would have to emit code here to truncate
+;;; the value before we insert. This loses some of the advantage of having
+;;; this insv pattern, so this pattern needs to be reevalutated.
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "nonmemory_operand" ""))]
+ ""
+ "
+{
+ int start_bit = INTVAL (operands[2]);
+ int width = INTVAL (operands[1]);
+ HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
+ rtx target, subtarget;
+
+ target = operands[0];
+ /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
+ subreg as the final target. */
+ if (GET_CODE (target) == SUBREG)
+ {
+ subtarget = gen_reg_rtx (SImode);
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
+ < GET_MODE_SIZE (SImode))
+ target = SUBREG_REG (target);
+ }
+ else
+ subtarget = target;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Since we are inserting a known constant, we may be able to
+ reduce the number of bits that we have to clear so that
+ the mask becomes simple. */
+ /* ??? This code does not check to see if the new mask is actually
+ simpler. It may not be. */
+ rtx op1 = gen_reg_rtx (SImode);
+ /* ??? Truncate operand3 to fit in the bitfield. See comment before
+ start of this pattern. */
+ HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
+ HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
+
+ emit_insn (gen_andsi3 (op1, operands[0], GEN_INT (~mask2)));
+ emit_insn (gen_iorsi3 (subtarget, op1,
+ GEN_INT (op3_value << start_bit)));
+ }
+ else if (start_bit == 0
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* A Trick, since we are setting the bottom bits in the word,
+ we can shift operand[3] up, operand[0] down, OR them together
+ and rotate the result back again. This takes 3 insns, and
+ the third might be mergable into another op. */
+ /* The shift up copes with the possibility that operand[3] is
+ wider than the bitfield. */
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_iorsi3 (op1, gen_rtx (LSHIFTRT, SImode, operands[0],
+ operands[1]),
+ op0));
+ emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
+ }
+ else if ((width + start_bit == 32)
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* Similar trick, but slightly less efficient. */
+
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_iorsi3 (subtarget,
+ gen_rtx (LSHIFTRT, SImode, op1,
+ operands[1]), op0));
+ }
+ else
+ {
+ rtx op0 = GEN_INT (mask);
+ rtx op1 = gen_reg_rtx (SImode);
+ rtx op2 = gen_reg_rtx (SImode);
+
+ if (! (const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ /* Mask out any bits in operand[3] that are not needed. */
+ emit_insn (gen_andsi3 (op1, operands[3], op0));
+
+ if (GET_CODE (op0) == CONST_INT
+ && (const_ok_for_arm (mask << start_bit)
+ || const_ok_for_arm (~ (mask << start_bit))))
+ {
+ op0 = GEN_INT (~(mask << start_bit));
+ emit_insn (gen_andsi3 (op2, operands[0], op0));
+ }
+ else
+ {
+ if (GET_CODE (op0) == CONST_INT)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ if (start_bit != 0)
+ op0 = gen_rtx (ASHIFT, SImode, op0, operands[2]);
+
+ emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
+ }
+
+ if (start_bit != 0)
+ op1 = gen_rtx (ASHIFT, SImode, op1, operands[2]);
+
+ emit_insn (gen_iorsi3 (subtarget, op1, op2));
+ }
+
+ if (subtarget != target)
+ {
+ /* If TARGET is still a SUBREG, then it must be wider than a word,
+ so we must be careful only to set the subword we were asked to. */
+ if (GET_CODE (target) == SUBREG)
+ emit_move_insn (target, subtarget);
+ else
+ emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
+ }
+
+ DONE;
+}
+")
+
+;; constants for op 2 will never be given to these patterns.
+(define_insn "*anddi_notdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0"))
+ (match_operand:DI 1 "s_register_operand" "0,r")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %Q2\;bic%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_notzesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ bic%?\\t%Q0, %Q1, %2
+ bic%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*anddi_notsesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %2\;bic%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "andsi_notsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2")
+
+(define_insn "andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2%S4")
+
+(define_insn "*andsi_notsi_si_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_dup 2)) (match_dup 1)))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi_notsi_si_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (ior:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "r")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %Q2\;orr%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*iordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%?\\t%Q0, %Q1, %2
+ orr%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*iordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %2\;orr%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*iorsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,?n")))]
+ ""
+ "@
+ orr%?\\t%0, %1, %2
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[2]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*iorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*iorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %Q2\;eor%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*xordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ eor%?\\t%Q0, %Q1, %2
+ eor%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*xordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %2\;eor%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "eor%?\\t%0, %1, %2")
+
+(define_insn "*xorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "eor%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*xorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "teq%?\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
+;; (NOT D) we can sometimes merge the final NOT into one of the following
+;; insns
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI")))
+ (match_operand:SI 3 "arm_rhs_operand" "rI")))
+ (clobber (match_operand:SI 4 "s_register_operand" "=r"))]
+ ""
+ [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2))
+ (not:SI (match_dup 3))))
+ (set (match_dup 0) (not:SI (match_dup 4)))]
+ ""
+)
+
+(define_insn "*andsi_iorsi3_notsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r")
+ (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))
+ (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))]
+ ""
+ "orr%?\\t%0, %1, %2\;bic%?\\t%0, %0, %3"
+[(set_attr "length" "8")])
+
+
+
+;; Minimum and maximum insns
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movlt\\t%0, %2
+ cmp\\t%1, %2\;movge\\t%0, %1
+ cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movge\\t%0, %2
+ cmp\\t%1, %2\;movlt\\t%0, %1
+ cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcc\\t%0, %2
+ cmp\\t%1, %2\;movcs\\t%0, %1
+ cmp\\t%1, %2\;movcs\\t%0, %1\;movcc\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcs\\t%0, %2
+ cmp\\t%1, %2\;movcc\\t%0, %1
+ cmp\\t%1, %2\;movcc\\t%0, %1\;movcs\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*store_minmaxsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (match_operator:SI 3 "minmax_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ operands[3] = gen_rtx (minmax_code (operands[3]), SImode, operands[1],
+ operands[2]);
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"str%d3\\t%1, %0\", operands);
+ output_asm_insn (\"str%D3\\t%2, %0\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")
+ (set_attr "type" "store1")])
+
+; Reject the frame pointer in operand[1], since reloading this after
+; it has been eliminated can cause carnage.
+(define_insn "*minmax_arithsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 4 "shiftable_operator"
+ [(match_operator:SI 5 "minmax_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ "GET_CODE (operands[1]) != REG
+ || (REGNO(operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO(operands[1]) != ARG_POINTER_REGNUM)"
+ "*
+{
+ enum rtx_code code = GET_CODE (operands[4]);
+
+ operands[5] = gen_rtx (minmax_code (operands[5]), SImode, operands[2],
+ operands[3]);
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%i4%d5\\t%0, %1, %2\", operands);
+ if (which_alternative != 0 || operands[3] != const0_rtx
+ || (code != PLUS && code != MINUS && code != IOR && code != XOR))
+ output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+
+;; Shift and rotation insns
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (31);
+")
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
+ else
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_subsi3 (reg, GEN_INT (32), operands[2]));
+ operands[2] = reg;
+ }
+")
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) % 32);
+")
+
+(define_insn "*shiftsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")]))]
+ ""
+ "mov%?\\t%0, %1%S3")
+
+(define_insn "*shiftsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*shiftsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "mvn%?\\t%0, %1%S3")
+
+(define_insn "*notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+
+;; Unary arithmetic insns
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "rsbs\\t%Q0, %Q1, #0\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "rsb%?\\t%0, %1, #0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*negdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mnf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; abssi2 doesn't really clobber the condition codes if a different register
+;; is being set. To keep things simple, assume during rtl manipulations that
+;; it does, but tell the final scan operator the truth. Similarly for
+;; (neg (abs...))
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "*neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*absdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "abs%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?s\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "*sqrtdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (sqrt:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "sqt%?e\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+;; SIN COS TAN and family are always emulated, so it's probably better
+;; to always call a library function.
+;(define_insn "sinsf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sindf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*sindf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sinxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 0))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "sin%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cossf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosdf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*cosdf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 1))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "cos%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "mvn%?\\t%Q0, %Q1\;mvn%?\\t%R0, %R1"
+[(set_attr "length" "8")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "mvn%?\\t%0, %1")
+
+(define_insn "*notsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_dup 1)))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; Fixed <--> Floating conversion insns
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float:SF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?s\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float:DF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?d\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsixf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float:XF (match_operand:SI 1 "s_register_operand" "r")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "flt%?e\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncxfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+;; Truncation insns
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; Zero and sign extension instructions.
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+"
+[(set_attr "length" "8")])
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%?b\\t%Q0, %1\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")
+ (set_attr "type" "*,load")])
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, %Q0, asr #31\";
+"
+[(set_attr "length" "8")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_dup 2) (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_movhi_bytes (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*zero_extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?h\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(lshiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ emit_insn (gen_andsi3 (operands[0], gen_lowpart (SImode, operands[1]),
+ GEN_INT (255)));
+ DONE;
+ }
+")
+
+(define_insn "*load_extendqisi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldr%?b\\t%0, %1\\t%@ zero_extendqisi2"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0)))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "GET_CODE (operands[1]) != MEM"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
+ "")
+
+(define_insn "*compareqi_eq0"
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z (match_operand:QI 0 "s_register_operand" "r")
+ (const_int 0)))]
+ ""
+ "tst\\t%0, #255"
+[(set_attr "conds" "set")])
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_extendhisi2_mem (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "extendhisi2_mem"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 7)))
+ (set (match_dup 6) (ashift:SI (match_dup 4) (const_int 24)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_insn "*extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?sh\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(ashiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, HImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqihi_insn"
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:HI 0 "s_register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0) (sign_extend:HI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[3] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[2] = gen_rtx (MEM, QImode, operands[3]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[3], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, SImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (sign_extend:SI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[2] = gen_rtx (MEM, QImode, operands[0]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[0], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:SF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:DF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+
+;; Move insns (including loads and stores)
+
+;; XXX Just some ideas about movti.
+;; I don't think these are a good idea on the arm, there just aren't enough
+;; registers
+;;(define_expand "loadti"
+;; [(set (match_operand:TI 0 "s_register_operand" "")
+;; (mem:TI (match_operand:SI 1 "address_operand" "")))]
+;; "" "")
+
+;;(define_expand "storeti"
+;; [(set (mem:TI (match_operand:TI 0 "address_operand" ""))
+;; (match_operand:TI 1 "s_register_operand" ""))]
+;; "" "")
+
+;;(define_expand "movti"
+;; [(set (match_operand:TI 0 "general_operand" "")
+;; (match_operand:TI 1 "general_operand" ""))]
+;; ""
+;; "
+;;{
+;; rtx insn;
+;;
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[1] = copy_to_reg (operands[1]);
+;; if (GET_CODE (operands[0]) == MEM)
+;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]);
+;; else if (GET_CODE (operands[1]) == MEM)
+;; insn = gen_loadti (operands[0], XEXP (operands[1], 0));
+;; else
+;; FAIL;
+;;
+;; emit_insn (insn);
+;; DONE;
+;;}")
+
+;; Recognise garbage generated above.
+
+;;(define_insn ""
+;; [(set (match_operand:TI 0 "general_operand" "=r,r,r,<,>,m")
+;; (match_operand:TI 1 "general_operand" "<,>,m,r,r,r"))]
+;; ""
+;; "*
+;; {
+;; register mem = (which_alternative < 3);
+;; register char *template;
+;;
+;; operands[mem] = XEXP (operands[mem], 0);
+;; switch (which_alternative)
+;; {
+;; case 0: template = \"ldmdb\\t%1!, %M0\"; break;
+;; case 1: template = \"ldmia\\t%1!, %M0\"; break;
+;; case 2: template = \"ldmia\\t%1, %M0\"; break;
+;; case 3: template = \"stmdb\\t%0!, %M1\"; break;
+;; case 4: template = \"stmia\\t%0!, %M1\"; break;
+;; case 5: template = \"stmia\\t%0, %M1\"; break;
+;; }
+;; output_asm_insn (template, operands);
+;; return \"\";
+;; }")
+
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "di_operand" "=r,r,o<>")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r"))]
+ ""
+ "*
+ return (output_move_double (operands));
+"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+ /* CYGNUS LOCAL nickc */
+ if (! ok_integer_or_other (operands[1]))
+ /* END CYGNUS LOCAL */
+ {
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX,
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+ if (CONSTANT_P (operands[1]) && flag_pic)
+ operands[1] = legitimize_pic_address (operands[1], SImode,
+ ((reload_in_progress
+ || reload_completed)
+ ? operands[0] : 0));
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,m")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "! (const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX, 0);
+ DONE;
+")
+
+(define_expand "movaddr"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:DI 1 "address_operand" ""))]
+ ""
+ "")
+
+(define_insn "*movaddr_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:DI 1 "address_operand" "p"))]
+ "reload_completed
+ && (GET_CODE (operands[1]) == LABEL_REF
+ || (GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT))"
+ "adr%?\\t%0, %a1")
+
+/* When generating pic, we need to load the symbol offset into a register.
+ So that the optimizer does not confuse this with a normal symbol load
+ we use an unspec. The offset will be loaded from a constant pool entry,
+ since that is the only type of relocation we can use. */
+
+(define_insn "pic_load_addr"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] 3))]
+ "flag_pic"
+ "ldr%?\\t%0, %a1"
+ [(set_attr "type" "load")])
+
+;; This variant is used for AOF assembly, since it needs to mention the
+;; pic register in the rtl.
+(define_expand "pic_load_addr_based"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "") (match_dup 2)] 3))]
+ "flag_pic"
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_insn "*pic_load_addr_based_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")
+ (match_operand 2 "s_register_operand" "r")] 3))]
+ "flag_pic && operands[2] == pic_offset_table_rtx"
+ "*
+#ifdef AOF_ASSEMBLER
+ operands[1] = aof_pic_entry (operands[1]);
+#endif
+ output_asm_insn (\"ldr%?\\t%0, %a1\", operands);
+ return \"\";
+" [(set_attr "type" "load")])
+
+(define_insn "pic_add_dot_plus_eight"
+ [(set (pc) (label_ref (match_operand 0 "" "")))
+ (set (match_operand 1 "register_operand" "+r")
+ (plus:SI (match_dup 1) (const (plus:SI (pc) (const_int 8)))))]
+ "flag_pic"
+ "add%?\\t%1, %|pc, %1")
+
+;; If copying one reg to another we can set the condition codes according to
+;; its value. Such a move is common after a return from subroutine and the
+;; result is being tested against zero.
+
+(define_insn "*movsi_compare0"
+ [(set (reg:CC 24) (compare:CC (match_operand:SI 1 "s_register_operand" "0,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r") (match_dup 1))]
+ ""
+ "@
+ cmp%?\\t%0, #0
+ sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+;; Subroutine to store a half word from a register into memory.
+;; Operand 0 is the source register (HImode)
+;; Operand 1 is the destination address in a register (SImode)
+
+;; In both this routine and the next, we must be careful not to spill
+;; a memory address of reg+large_const into a separate PLUS insn, since this
+;; can generate unrecognizable rtl.
+
+(define_expand "storehi"
+ [;; store the low byte
+ (set (match_operand 1 "" "") (match_dup 3))
+ ;; extract the high byte
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ ;; store the high byte
+ (set (match_dup 4) (subreg:QI (match_dup 2) 0))] ;explicit subreg safe
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+(define_expand "storehi_bigend"
+ [(set (match_dup 4) (match_dup 3))
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ (set (match_operand 1 "" "") (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+;; Subroutine to store a half word integer constant into memory.
+(define_expand "storeinthi"
+ [(set (match_operand 0 "" "")
+ (subreg:QI (match_operand 1 "" "") 0))
+ (set (match_dup 3) (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ rtx addr = XEXP (operands[0], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[1] = gen_reg_rtx (SImode);
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
+ }
+ }
+
+ operands[3] = change_address (operands[0], QImode, plus_constant (addr, 1));
+ operands[0] = change_address (operands[0], QImode, NULL_RTX);
+}
+")
+
+(define_expand "storehi_single_op"
+ [(set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ "arm_arch4"
+ "
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (arm_arch4)
+ {
+ emit_insn (gen_storehi_single_op (operands[0], operands[1]));
+ DONE;
+ }
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_storeinthi (operands[0], operands[1]));
+ else
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_storehi_bigend (operands[1], operands[0]));
+ else
+ emit_insn (gen_storehi (operands[1], operands[0]));
+ }
+ DONE;
+ }
+ /* Sign extend a constant, and keep it in an SImode reg. */
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
+
+ /* If the constant is already valid, leave it alone. */
+ if (! const_ok_for_arm (val))
+ {
+ /* If setting all the top bits will make the constant
+ loadable in a single instruction, then set them.
+ Otherwise, sign extend the number. */
+
+ if (const_ok_for_arm (~ (val | ~0xffff)))
+ val |= ~0xffff;
+ else if (val & 0x8000)
+ val |= ~0xffff;
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (val)));
+ operands[1] = gen_rtx_SUBREG (HImode, reg, 0);
+ }
+ else if (! arm_arch4)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ for v4 and up architectures because LDRH instructions will
+ be used to access the HI values, and these cannot generate
+ unaligned word access faults in the MMU. */
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ if (TARGET_SHORT_BY_BYTES)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+ rtx reg = gen_reg_rtx (SImode);
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && ((INTVAL(offset) & 1) != 1)
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) & ~3;
+ rtx new;
+
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ if (((INTVAL (offset) & 2) != 0)
+ ^ (BYTES_BIG_ENDIAN ? 1 : 0))
+ {
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrsi3 (reg2, reg, GEN_INT (16)));
+ reg = reg2;
+ }
+ }
+ else
+ emit_insn (gen_movhi_bytes (reg, operands[1]));
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (BYTES_BIG_ENDIAN)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ rtx new;
+
+ if ((INTVAL (offset) & 2) == 2)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) ^ 2;
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ }
+ else
+ {
+ new = gen_rtx_MEM (SImode, XEXP (operands[1], 0));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new)
+ = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_rotated_loadsi (reg, new));
+ }
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else
+ {
+ emit_insn (gen_movhi_bigend (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! const_ok_for_arm (INTVAL (operands[1]))
+ && ! const_ok_for_arm (~INTVAL (operands[1])))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}
+")
+
+(define_insn "rotated_loadsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "offsettable_memory_operand" "o")
+ (const_int 16)))]
+ "! TARGET_SHORT_BY_BYTES"
+ "*
+{
+ rtx ops[2];
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 2));
+ output_asm_insn (\"ldr%?\\t%0, %1\\t%@ load-rotate\", ops);
+ return \"\";
+}"
+[(set_attr "type" "load")])
+
+(define_expand "movhi_bytes"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 6)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_expand "movhi_bigend"
+ [(set (match_dup 2)
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "") 0)
+ (const_int 16)))
+ (set (match_dup 3)
+ (ashiftrt:SI (match_dup 2) (const_int 16)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (subreg:HI (match_dup 3) 0))]
+ ""
+ "
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+")
+
+;; Pattern to recognise insn generated default case above
+;; CYGNUS LOCAL nickc: Store before load to avoid problem with reload.
+(define_insn "*movhi_insn_arch4"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
+ "arm_arch4
+ && ok_integer_or_other (operands[0])
+ && ok_integer_or_other (operands[1])" ;; CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ str%?h\\t%1, %0\\t%@ movhi ;; CYGNUS LOCAL nickc
+ ldr%?h\\t%0, %1\\t%@ movhi" ;; CYGNUS LOCAL nickc
+[(set_attr "type" "*,*,store1,load")]) ;; CYGNUS LOCAL nickc
+;; END CYGNUS LOCAL
+
+(define_insn "*movhi_insn_littleend"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && ! BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL nickc */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi"
+[(set_attr "type" "*,*,load")])
+
+(define_insn "*movhi_insn_bigend"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL NICKC */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi_bigend\;mov%?\\t%0, %0, asr #16"
+[(set_attr "type" "*,*,load")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*loadhi_si_bigend"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "m") 0)
+ (const_int 16)))]
+ "BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES"
+ "ldr%?\\t%0, %1\\t%@ movhi_bigend"
+[(set_attr "type" "load")])
+
+(define_insn "*movhi_bytes"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r")
+ (match_operand:HI 1 "arm_rhs_operand" "rI,K"))]
+ "TARGET_SHORT_BY_BYTES"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi")
+
+
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "reload_memory_operand" "=o")
+ (match_operand:HI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ ""
+ "
+ arm_reload_out_hi (operands);
+ DONE;
+")
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "s_register_operand" "=r")
+ (match_operand:HI 1 "reload_memory_operand" "o")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ "TARGET_SHORT_BY_BYTES"
+ "
+ arm_reload_in_hi (operands);
+ DONE;
+")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_rtx (SUBREG, QImode, reg, 0);
+ }
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+")
+
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "general_operand" "=r,r,r,m")
+ (match_operand:QI 1 "general_operand" "rI,K,m,r"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?b\\t%0, %1
+ str%?b\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+")
+
+(define_insn "*movsf_hard_insn"
+ [(set (match_operand:SF 0 "general_operand" "=f,f,f,m,f,r,r,r,m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mvf%?s\\t%0, %1
+ mnf%?s\\t%0, #%N1
+ ldf%?s\\t%0, %1
+ stf%?s\\t%1, %0
+ str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4
+ stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4,4,8,8,4,4,4")
+ (set_attr "type"
+ "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load,store1")])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -msoft-float.
+
+(define_insn "*movsf_soft_insn"
+ [(set (match_operand:SF 0 "general_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4")
+ (set_attr "type" "*,load,store1")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+")
+
+;; Reloading a df mode value stored in integer regs to memory can require a
+;; scratch reg.
+(define_expand "reload_outdf"
+ [(match_operand:DF 0 "reload_memory_operand" "=o")
+ (match_operand:DF 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
+
+ if (code == REG)
+ operands[2] = XEXP (operands[0], 0);
+ else if (code == POST_INC || code == PRE_DEC)
+ {
+ operands[0] = gen_rtx (SUBREG, DImode, operands[0], 0);
+ operands[1] = gen_rtx (SUBREG, DImode, operands[1], 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ else if (code == PRE_INC)
+ {
+ rtx reg = XEXP (XEXP (operands[0], 0), 0);
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
+ operands[2] = reg;
+ }
+ else if (code == POST_DEC)
+ operands[2] = XEXP (XEXP (operands[0], 0), 0);
+ else
+ emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
+ XEXP (XEXP (operands[0], 0), 1)));
+
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (MEM, DFmode, operands[2]),
+ operands[1]));
+
+ if (code == POST_DEC)
+ emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+
+ DONE;
+}
+")
+
+(define_insn "*movdf_hard_insn"
+ [(set (match_operand:DF 0 "general_operand" "=r,Q,r,m,r,f,f,f,m,!f,!r")
+ (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,fG,H,mF,f,r,f"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ rtx ops[3];
+
+ switch (which_alternative)
+ {
+ case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
+ case 2: case 3: case 4: return output_move_double (operands);
+ case 5: return \"mvf%?d\\t%0, %1\";
+ case 6: return \"mnf%?d\\t%0, #%N1\";
+ case 7: return \"ldf%?d\\t%0, %1\";
+ case 8: return \"stf%?d\\t%1, %0\";
+ case 9: return output_mov_double_fpu_from_arm (operands);
+ case 10: return output_mov_double_arm_from_fpu (operands);
+ }
+}
+"
+[(set_attr "length" "4,4,8,8,8,4,4,4,4,8,8")
+ (set_attr "type"
+"load,store2,*,store2,load,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")])
+
+;; Software floating point version. This is essentially the same as movdi.
+;; Do not use `f' as a constraint to prevent reload from ever trying to use
+;; an `f' reg.
+
+(define_insn "*movdf_soft_insn"
+ [(set (match_operand:DF 0 "soft_df_operand" "=r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "r,mF,r"))]
+ "TARGET_SOFT_FLOAT"
+ "* return output_move_double (operands);"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "")
+
+;; Even when the XFmode patterns aren't enabled, we enable this after
+;; reloading so that we can push floating point registers in the prologue.
+
+(define_insn "*movxf_hard_insn"
+ [(set (match_operand:XF 0 "general_operand" "=f,f,f,m,f,r,r")
+ (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))]
+ "TARGET_HARD_FLOAT && (ENABLE_XF_PATTERNS || reload_completed)"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"mvf%?e\\t%0, %1\";
+ case 1: return \"mnf%?e\\t%0, #%N1\";
+ case 2: return \"ldf%?e\\t%0, %1\";
+ case 3: return \"stf%?e\\t%1, %0\";
+ case 4: return output_mov_long_double_fpu_from_arm (operands);
+ case 5: return output_mov_long_double_arm_from_fpu (operands);
+ case 6: return output_mov_long_double_arm_from_arm (operands);
+ }
+"
+[(set_attr "length" "4,4,4,4,8,8,12")
+ (set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")])
+
+
+;; load- and store-multiple insns
+;; The arm can load/store any set of registers, provided that they are in
+;; ascending order; but that is beyond GCC so stick with what it knows.
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) > 14
+ || REGNO (operands[0]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[1], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P(operands[1]),
+ MEM_IN_STRUCT_P(operands[1]),
+ MEM_SCALAR_P (operands[1]));
+")
+
+;; Load multiple with write-back
+
+(define_insn "*ldmsi_postinc"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (match_dup 1)))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"ldm%?ia\\t%0!, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+;; Ordinary load multiple
+
+(define_insn "*ldmsi"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 2 "s_register_operand" "r")))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"ldm%?ia\\t%0, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != REG
+ || GET_CODE (operands[0]) != MEM
+ || REGNO (operands[1]) > 14
+ || REGNO (operands[1]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[0], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P (operands[0]),
+ MEM_IN_STRUCT_P(operands[0]),
+ MEM_SCALAR_P (operands[0]));
+")
+
+;; Store multiple with write-back
+
+(define_insn "*stmsi_postinc"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (mem:SI (match_dup 1))
+ (match_operand:SI 3 "s_register_operand" "r"))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"stm%?ia\\t%0!, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 5))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Ordinary store multiple
+
+(define_insn "*stmsi"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_DEST (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"stm%?ia\\t%0, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 3))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Move a block of memory if it is word aligned and MORE than 2 words long.
+;; We could let this apply for blocks of less than this, but it clobbers so
+;; many registers that there is then probably a better way.
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (arm_gen_movstrqi (operands))
+ DONE;
+ FAIL;
+")
+
+
+;; Comparison and test insns
+
+(define_expand "cmpsi"
+ [(match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "arm_add_operand" "")]
+ ""
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 0;
+ DONE;
+}
+")
+
+(define_expand "cmpsf"
+ [(match_operand:SF 0 "s_register_operand" "")
+ (match_operand:SF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpdf"
+ [(match_operand:DF 0 "s_register_operand" "")
+ (match_operand:DF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpxf"
+ [(match_operand:XF 0 "s_register_operand" "")
+ (match_operand:XF 1 "fpu_rhs_operand" "")]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L")))]
+ ""
+ "@
+ cmp%?\\t%0, %1
+ cmn%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP 24)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_neg_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (neg:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))))]
+ ""
+ "cmn%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpesfdf_df"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_esfdf"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpsf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_esfdf_df_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_df_esfdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?e\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+; This insn allows redundant compares to be removed by cse, nothing should
+; ever appear in the output file since (set (reg x) (reg x)) is a no-op that
+; is deleted later on. The match_dup will match the mode here, so that
+; mode changes of the condition codes aren't lost by this even though we don't
+; specify what they are.
+
+(define_insn "*deleted_compare"
+ [(set (match_operand 0 "cc_register" "") (match_dup 0))]
+ ""
+ "\\t%@ deleted compare"
+[(set_attr "conds" "set")
+ (set_attr "length" "0")])
+
+
+;; Conditional branch insns
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+;; patterns to match conditional branch insns
+
+(define_insn "*condbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%d1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+(define_insn "*condbranch_reversed"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%D1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+
+; scc insns
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (le:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_insn "*mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ ""
+ "mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+
+;; Conditional move insns
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "arm_not_operand" "")
+ (match_operand:SI 3 "arm_not_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "s_register_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
+ Otherwise, ensure it is a valid FP add operand */
+ if ((! TARGET_HARD_FLOAT)
+ || (! fpu_add_operand (operands[3], SFmode)))
+ operands[3] = force_reg (SFmode, operands[3]);
+
+ ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "s_register_operand" "")
+ (match_operand:DF 3 "fpu_add_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_insn "*movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ ""
+ "@
+ mov%D3\\t%0, %2
+ mvn%D3\\t%0, #%B2
+ mov%d3\\t%0, %1
+ mvn%d3\\t%0, #%B1
+ mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_hard_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:SF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3s\\t%0, %2
+ mnf%D3s\\t%0, #%N2
+ mvf%d3s\\t%0, %1
+ mnf%d3s\\t%0, #%N1
+ mvf%d3s\\t%0, %1\;mvf%D3s\\t%0, %2
+ mvf%d3s\\t%0, %1\;mnf%D3s\\t%0, #%N2
+ mnf%d3s\\t%0, #%N1\;mvf%D3s\\t%0, %2
+ mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_SOFT_FLOAT"
+ "@
+ mov%D3\\t%0, %2
+ mov%d3\\t%0, %1"
+ [(set_attr "conds" "use")])
+
+(define_insn "*movdfcc_insn"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:DF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3d\\t%0, %2
+ mnf%D3d\\t%0, #%N2
+ mvf%d3d\\t%0, %1
+ mnf%d3d\\t%0, #%N1
+ mvf%d3d\\t%0, %1\;mvf%D3d\\t%0, %2
+ mvf%d3d\\t%0, %1\;mnf%D3d\\t%0, #%N2
+ mnf%d3d\\t%0, #%N1\;mvf%D3d\\t%0, %2
+ mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+;; Jump and linkage insns
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%?\\t%l0\";
+}")
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG)
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_reg"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" "g"))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (operands);
+"
+;; length is worst case, normally it is only two
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_mem"
+ [(call (mem:SI (match_operand 0 "memory_operand" "m"))
+ (match_operand 1 "general_operand" "g"))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call_mem (operands);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "=rf")
+ (call (match_operand 1 "memory_operand" "m")
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG)
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_reg"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_value_mem"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand 1 "memory_operand" "m"))
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ "! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))"
+ "*
+ return output_call_mem (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses
+;; The 'a' causes the operand to be treated as an address, i.e. no '#' output.
+
+(define_insn "*call_symbol"
+ [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl%?\\t%a0"
+[(set_attr "type" "call")])
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ "! TARGET_LONG_CALLS && GET_CODE(operands[1]) == SYMBOL_REF"
+ "bl%?\\t%a1"
+[(set_attr "type" "call")])
+
+;; Often the return insn will be the same as loading from memory, so set attr
+(define_insn "return"
+ [(return)]
+ "USE_RETURN_INSN (FALSE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (NULL, TRUE, FALSE);
+}"
+[(set_attr "type" "load")])
+
+(define_insn "*cond_return"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (return)
+ (pc)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, FALSE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+(define_insn "*cond_return_inverted"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (pc)
+ (return)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, TRUE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+[(set_attr "length" "0")
+ (set_attr "type" "block")])
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "s_register_operand" "") ; index to jump on
+ (match_operand:SI 1 "const_int_operand" "") ; lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; total range
+ (match_operand:SI 3 "" "") ; table label
+ (match_operand:SI 4 "" "")] ; Out of range label
+ ""
+ "
+{
+ rtx reg;
+ if (operands[1] != const0_rtx)
+ {
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_addsi3 (reg, operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = reg;
+ }
+
+ if (! const_ok_for_arm (INTVAL (operands[2])))
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
+ operands[4]));
+ DONE;
+}")
+
+;; The USE in this pattern is needed to tell flow analysis that this is
+;; a CASESI insn. It has no other purpose.
+(define_insn "casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (use (label_ref (match_dup 2)))])]
+ ""
+ "*
+ if (flag_pic)
+ return \"cmp\\t%0, %1\;addls\\t%|pc, %|pc, %0, asl #2\;b\\t%l3\";
+ return \"cmp\\t%0, %1\;ldrls\\t%|pc, [%|pc, %0, asl #2]\;b\\t%l3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "s_register_operand" "r"))]
+ ""
+ "mov%?\\t%|pc, %0\\t%@ indirect jump")
+
+(define_insn "*load_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "memory_operand" "m"))]
+ ""
+ "ldr%?\\t%|pc, %0\\t%@ indirect jump"
+[(set_attr "type" "load")])
+
+;; Misc insns
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "mov%?\\tr0, r0\\t%@ nop")
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ ""
+ "%i1%?\\t%0, %2, %4%S3")
+
+(define_insn "*arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")])))]
+ ""
+ "sub%?\\t%0, %1, %3%S2")
+
+(define_insn "*sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+;; These variants of the above insns can occur if the first operand is the
+;; frame pointer and we eliminate that. This is a kludge, but there doesn't
+;; seem to be a way around it. Most of the predicates have to be null
+;; because the format can be generated part way through reload, so
+;; if we don't match it as soon as it becomes available, reload doesn't know
+;; how to reload pseudos that haven't got hard registers; the constraints will
+;; sort everything out.
+
+(define_insn "*reload_mulsi3"
+ [(set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 2 "" "r"))
+ (match_operand:SI 1 "const_int_operand" "n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"add%?\\t%0, %2, %3%S5\", operands);
+ operands[2] = operands[1];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+; we have no idea how long the add_immediate is, it could be up to 4.
+[(set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)])
+ (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+;; These are similar, but are needed when the mla pattern contains the
+;; eliminated register as operand 3.
+
+(define_insn "*reload_muladdsi"
+ [(set (match_operand:SI 0 "" "=&r,&r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "" "%0,r")
+ (match_operand:SI 2 "" "r,r"))
+ (match_operand:SI 3 "" "r,r"))
+ (match_operand:SI 4 "const_int_operand" "n,n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"mla%?\\t%0, %2, %1, %3\", operands);
+ operands[2] = operands[4];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+[(set_attr "length" "20")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands);
+ return \"\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"mla%?s\\t%0, %3, %4, %0\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+
+
+(define_insn "*and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator 1 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ ""
+ "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+(define_insn "*compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[1]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %2, lsr #31\";
+
+ if (GET_CODE (operands[1]) == GE && operands[3] == const0_rtx)
+ return \"mvn\\t%0, %2\;mov\\t%0, %0, lsr #31\";
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ else if (GET_CODE (operands[5]) == MINUS)
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ else if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*cmp_ite0"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 0))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%2, %3\;cmp%d5\\t%0, %1\",\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\"},
+ {\"cmp\\t%2, %3\;cmn%d5\\t%0, #%n1\", \"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\"},
+ {\"cmn\\t%2, #%n3\;cmp%d5\\t%0, %1\", \"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\"},
+ {\"cmn\\t%2, #%n3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*cmp_ite1"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 1))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\", \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]),
+ reverse_condition (GET_CODE (operands[4])));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %1, asr #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*ifcompare_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")))]
+ ""
+ "@
+ add%d4\\t%0, %2, %3
+ sub%d4\\t%0, %2, #%n3
+ add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1
+ add%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))))]
+ ""
+ "@
+ add%D4\\t%0, %2, %3
+ sub%D4\\t%0, %2, #%n3
+ add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1
+ add%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 9 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))]
+ ""
+ "%I6%d5\\t%0, %1, %2\;%I7%D5\\t%0, %3, %4"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[3] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[5]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[4])
+ && REGNO (operands[4]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == LT)
+ return \"and\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ else if (GET_CODE (operands[6]) == GE)
+ return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ }
+ if (GET_CODE (operands[3]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[3])))
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ return \"ldr%D6\\t%0, %1\";
+ else
+ return \"mov%D6\\t%0, %1\";
+ }
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")))]
+ ""
+ "@
+ %I5%d4\\t%0, %2, %3
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ %I5%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[5] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[3]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[2])
+ && REGNO (operands[2]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == GE)
+ return \"and\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ else if (GET_CODE (operands[6]) == LT)
+ return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ }
+
+ if (GET_CODE (operands[5]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[5])))
+ output_asm_insn (\"cmn\\t%4, #%n5\", operands);
+ else
+ output_asm_insn (\"cmp\\t%4, %5\", operands);
+
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ output_asm_insn (\"ldr%d6\\t%0, %1\", operands);
+ else
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
+ }
+ return \"%I7%D6\\t%0, %2, %3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])))]
+ ""
+ "@
+ %I5%D4\\t%0, %2, %3
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ %I5%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2
+ mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mvn%d4\\t%0, %2
+ mov%D4\\t%0, %1\;mvn%d4\\t%0, %2
+ mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mov%d5\\t%0, %2%S4
+ mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
+ mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])))]
+ ""
+ "@
+ mov%D5\\t%0, %2%S4
+ mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
+ mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 7 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 9 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))]
+ ""
+ "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))]
+ ""
+ "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))]
+ ""
+ "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ rsb%d4\\t%0, %2, #0
+ mov%D4\\t%0, %1\;rsb%d4\\t%0, %2, #0
+ mvn%D4\\t%0, #%B1\;rsb%d4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ rsb%D4\\t%0, %2, #0
+ mov%d4\\t%0, %1\;rsb%D4\\t%0, %2, #0
+ mvn%d4\\t%0, #%B1\;rsb%D4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*arith_adjacentmem"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operand:SI 2 "memory_operand" "m")
+ (match_operand:SI 3 "memory_operand" "m")]))
+ (clobber (match_scratch:SI 4 "=r"))]
+ "adjacent_mem_locations (operands[2], operands[3])"
+ "*
+{
+ rtx ldm[3];
+ rtx arith[4];
+ int val1 = 0, val2 = 0;
+
+ if (REGNO (operands[0]) > REGNO (operands[4]))
+ {
+ ldm[1] = operands[4];
+ ldm[2] = operands[0];
+ }
+ else
+ {
+ ldm[1] = operands[0];
+ ldm[2] = operands[4];
+ }
+ if (GET_CODE (XEXP (operands[2], 0)) != REG)
+ val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
+ if (GET_CODE (XEXP (operands[3], 0)) != REG)
+ val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
+ arith[0] = operands[0];
+ arith[3] = operands[1];
+ if (val1 < val2)
+ {
+ arith[1] = ldm[1];
+ arith[2] = ldm[2];
+ }
+ else
+ {
+ arith[1] = ldm[2];
+ arith[2] = ldm[1];
+ }
+ if (val1 && val2)
+ {
+ rtx ops[3];
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = XEXP (XEXP (operands[2], 0), 0);
+ ops[2] = XEXP (XEXP (operands[2], 0), 1);
+ output_add_immediate (ops);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ else if (val1)
+ {
+ ldm[0] = XEXP (operands[3], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ }
+ else
+ {
+ ldm[0] = XEXP (operands[2], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
+ return \"\";
+}
+"
+[(set_attr "length" "12")
+ (set_attr "type" "load")])
+
+;; the arm can support extended pre-inc instructions
+
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+;; We reject the frame pointer if it occurs anywhere in these patterns since
+;; elimination will cause too many headaches.
+
+(define_insn "*strqi_preinc"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_preinc"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_predec"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_preinc"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadsi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadsi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_preinc"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_predec"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "(!BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*strqi_shiftpreinc"
+ [(set (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_shiftpredec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_shiftpreinc"
+ [(set (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strsi_shiftpredec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpreinc"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpredec"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+; It can also support extended post-inc expressions, but combine doesn't
+; try these....
+; It doesn't seem worth adding peepholes for anything but the most common
+; cases since, unlike combine, the increment must immediately follow the load
+; for this pattern to match.
+; When loading we must watch to see that the base register isn't trampled by
+; the load. In such cases this isn't a post-inc expression.
+
+(define_peephole
+ [(set (mem:QI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?b\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:QI 0 "s_register_operand" "=r")
+ (mem:QI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?b\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (mem:HI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2\\t%@ loadhi")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operand:SI 0 "s_register_operand" "+r")
+ (match_operand:SI 1 "index_operand" "rJ")))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))]
+ ""
+ "str%?b\\t%2, [%0, %1]!")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n")])
+ (match_operand:SI 2 "s_register_operand" "+r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_dup 2) (plus:SI (match_op_dup 4 [(match_dup 0) (match_dup 1)])
+ (match_dup 2)))]
+ ""
+ "str%?b\\t%3, [%2, %0%S4]!")
+
+; This pattern is never tried by combine, so do it as a peephole
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (reg:CC 24)
+ (compare:CC (match_dup 1) (const_int 0)))]
+ ""
+ "sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+; Peepholes to spot possible load- and store-multiples, if the ordering is
+; reversed, check that the memory references aren't volatile.
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 6 "memory_operand" "m"))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (match_operand:SI 7 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 2);
+")
+
+(define_peephole
+ [(set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 6 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_operand:SI 7 "memory_operand" "=m")
+ (match_operand:SI 3 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 2 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 2);
+")
+
+;; A call followed by return can be replaced by restoring the regs and
+;; jumping to the subroutine, provided we aren't passing the address of
+;; any of our local variables. If we call alloca then this is unsafe
+;; since restoring the frame frees the memory, which is not what we want.
+;; Sometimes the return might have been targeted by the final prescan:
+;; if so then emit a proper return insn as well.
+;; Unfortunately, if the frame pointer is required, we don't know if the
+;; current function has any implicit stack pointer adjustments that will
+;; be restored by the return: we can't therefore do a tail call.
+;; Another unfortunate that we can't handle is if current_function_args_size
+;; is non-zero: in this case elimination of the argument pointer assumed
+;; that lr was pushed onto the stack, so eliminating upsets the offset
+;; calculations.
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; As above but when this function is not void, we must be returning the
+;; result of the called subroutine.
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (use (match_dup 0))
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; CYGNUS LOCAL
+;; If calling a subroutine and then jumping back to somewhere else, but not
+;; too far away, then we can set the link register with the branch address
+;; and jump direct to the subroutine. On return from the subroutine
+;; execution continues at the branch; this avoids a prefetch stall.
+;; We use the length attribute (via short_branch ()) to establish whether or
+;; not this is possible, this is the same as the sparc does.
+
+(define_peephole
+ [(parallel[(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 2 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[2]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[2]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l2 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l2 - . -4)\", operands);
+ }
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+
+(define_peephole
+ [(parallel[(set (match_operand:SI 0 "s_register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 3 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[3]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l3 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l3 - . -4)\", operands);
+ }
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+;; END CYGNUS LOCAL
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "")
+ (const_int 0))
+ (neg:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "")
+ (match_operand:SI 4 "arm_rhs_operand" "")]))))
+ (clobber (match_operand:SI 5 "s_register_operand" ""))]
+ ""
+ [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31))))
+ (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 5)))]
+ "")
+
+;; This split can be used because CC_Z mode implies that the following
+;; branch will be an equality, or an unsigned inequality, so the sign
+;; extension is not needed.
+
+(define_split
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z
+ (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "") 0)
+ (const_int 24))
+ (match_operand 1 "const_int_operand" "")))
+ (clobber (match_scratch:SI 2 ""))]
+ "((unsigned HOST_WIDE_INT) INTVAL (operands[1]))
+ == (((unsigned HOST_WIDE_INT) INTVAL (operands[1])) >> 24) << 24"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 0)))
+ (set (reg:CC 24) (compare:CC (match_dup 2) (match_dup 1)))]
+ "
+ operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24);
+")
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ arm_expand_prologue ();
+ DONE;
+")
+
+;; This split is only used during output to reduce the number of patterns
+;; that need assembler instructions adding to them. We allowed the setting
+;; of the conditions to be implicit during rtl generation so that
+;; the conditional compare patterns would work. However this conflicts to
+;; some extent with the conditional data operations, so we have to split them
+;; up again here.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (match_dup 5)))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+;; CYGNUS LOCAL
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")])
+ (match_operand:SI 4 "arm_rhs_operand" "")
+ (not:SI
+ (match_operand:SI 5 "s_register_operand" ""))))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (not:SI (match_dup 5))))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+(define_insn "*cond_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+;; END CYGNUS LOCAL
+
+;; The next two patterns occur when an AND operation is followed by a
+;; scc insn sequence
+
+(define_insn "*sign_extract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"ands\\t%0, %1, %2\", operands);
+ return \"mvnne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*not_signextract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n"))))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"tst\\t%1, %2\", operands);
+ output_asm_insn (\"mvneq\\t%0, #0\", operands);
+ return \"movne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+;; Push multiple registers to the stack. The first register is in the
+;; unspec part of the insn; subsequent registers are in parallel (use ...)
+;; expressions.
+(define_insn "*push_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+ extern int lr_save_eliminated;
+
+ if (lr_save_eliminated)
+ {
+ if (XVECLEN (operands[2], 0) > 1)
+ abort ();
+ return \"\";
+ }
+ strcpy (pattern, \"stmfd\\t%m0!, {%1\");
+ for (i = 1; i < XVECLEN (operands[2], 0); i++)
+ {
+ strcat (pattern, \", %|\");
+ strcat (pattern, reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i),
+ 0))]);
+ }
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "store4")])
+
+;; Similarly for the floating point registers
+(define_insn "*push_fp_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:XF 1 "f_register_operand" "f")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+
+ sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "f_store")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/arm_010110a.h b/gcc_arm/config/arm/arm_010110a.h
new file mode 100755
index 0000000..91f440e
--- /dev/null
+++ b/gcc_arm/config/arm/arm_010110a.h
@@ -0,0 +1,2211 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode ((enum attr_prog_mode) arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((NAMED) \
+ ? ((CUM) >= 16 ? 0 : gen_rtx (REG, MODE, (CUM) / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM) < 16 && 16 < (CUM) + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM) / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) ? 4 : 0))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+}
+#endif
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+int s_register_operand (/* register rtx op, enum machine_mode mode */);
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm_020422.c b/gcc_arm/config/arm/arm_020422.c
new file mode 100755
index 0000000..65a08dc
--- /dev/null
+++ b/gcc_arm/config/arm/arm_020422.c
@@ -0,0 +1,7160 @@
+/* Output routines for GCC for ARM.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rearnsha@arm.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include <stdio.h>
+#include <string.h>
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "reload.h"
+#include "tree.h"
+#include "expr.h"
+#include "toplev.h"
+
+/* The maximum number of insns skipped which will be conditionalised if
+ possible. */
+static int max_insns_skipped = 5;
+
+extern FILE *asm_out_file;
+/* Some function declarations. */
+
+/* CYGNUS LOCAL */
+void arm_increase_location PROTO ((int));
+static int get_prologue_size PROTO ((void));
+/* END CYGNUS LOCAL */
+
+static HOST_WIDE_INT int_log2 PROTO ((HOST_WIDE_INT));
+static char *output_multi_immediate PROTO ((rtx *, char *, char *, int,
+ HOST_WIDE_INT));
+static int arm_gen_constant PROTO ((enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, rtx, rtx, int, int));
+static int arm_naked_function_p PROTO ((tree));
+static void init_fpa_table PROTO ((void));
+static enum machine_mode select_dominance_cc_mode PROTO ((enum rtx_code, rtx,
+ rtx, HOST_WIDE_INT));
+static HOST_WIDE_INT add_constant PROTO ((rtx, enum machine_mode, int *));
+static void dump_table PROTO ((rtx));
+static int fixit PROTO ((rtx, enum machine_mode, int));
+static rtx find_barrier PROTO ((rtx, int));
+static int broken_move PROTO ((rtx));
+static char *fp_const_from_val PROTO ((REAL_VALUE_TYPE *));
+static int eliminate_lr2ip PROTO ((rtx *));
+static char *shift_op PROTO ((rtx, HOST_WIDE_INT *));
+static int pattern_really_clobbers_lr PROTO ((rtx));
+static int function_really_clobbers_lr PROTO ((rtx));
+static void emit_multi_reg_push PROTO ((int));
+static void emit_sfm PROTO ((int, int));
+static enum arm_cond_code get_arm_condition_code PROTO ((rtx));
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. */
+
+rtx arm_compare_op0, arm_compare_op1;
+int arm_compare_fp;
+
+/* CYGNUS LOCAL: Definition of arm_cpu deleted. */
+
+/* What type of floating point are we tuning for? */
+enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available? */
+enum floating_point_type arm_fpu_arch;
+
+/* What program mode is the cpu running in? 26-bit mode or 32-bit mode */
+enum prog_mode_type arm_prgmode;
+
+/* CYGNUS LOCAL: Name changed to fpe. */
+/* Set by the -mfpe=... option */
+char *target_fpe_name = NULL;
+/* END CYGNUS LOCAL */
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+/* Bit values used to identify processor capabilities. */
+#define FL_CO_PROC 0x01 /* Has external co-processor bus */
+#define FL_FAST_MULT 0x02 /* Fast multiply */
+#define FL_MODE26 0x04 /* 26-bit mode support */
+#define FL_MODE32 0x08 /* 32-bit mode support */
+#define FL_ARCH4 0x10 /* Architecture rel 4 */
+#define FL_THUMB 0x20 /* Thumb aware */
+#define FL_LDSCHED 0x40 /* Load scheduling necessary */
+#define FL_STRONG 0x80 /* StrongARM */
+
+/* The bits in this mask specify which instructions we are allowed to generate. */
+static int insn_flags = 0;
+/* The bits in this mask specify which instruction scheduling options should
+ be used. Note - there is an overlap with the FL_FAST_MULT. For some
+ hardware we want to be able to generate the multiply instructions, but to
+ tune as if they were not present in the architecture. */
+static int tune_flags = 0;
+
+/* The following are used in the arm.md file as equivalents to bits
+ in the above two flag variables. */
+
+/* Nonzero if this is an "M" variant of the processor. */
+int arm_fast_multiply = 0;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+int arm_arch4 = 0;
+
+/* Nonzero if this chip can benefit from load scheduling. */
+int arm_ld_sched = 0;
+
+/* Nonzero if this chip is a StrongARM. */
+int arm_is_strong = 0;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+int arm_is_6_or_7 = 0;
+
+/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
+ must report the mode of the memory reference from PRINT_OPERAND to
+ PRINT_OPERAND_ADDRESS. */
+enum machine_mode output_memory_reference_mode;
+
+/* Nonzero if the prologue must setup `fp'. */
+int current_function_anonymous_args;
+
+/* The register number to be used for the PIC offset register. */
+int arm_pic_register = 9;
+
+/* Location counter of .text segment. */
+int arm_text_location = 0;
+
+/* Set to one if we think that lr is only saved because of subroutine calls,
+ but all of these can be `put after' return insns */
+int lr_save_eliminated;
+
+/* Set to 1 when a return insn is output, this means that the epilogue
+ is not needed. */
+
+static int return_used_this_function;
+
+/* Set to 1 after arm_reorg has started. Reset to start at the start of
+ the next function. */
+static int after_arm_reorg = 0;
+
+/* The maximum number of insns to be used when loading a constant. */
+static int arm_constant_limit = 3;
+
+/* CYGNUS LOCAL unknown */
+/* A hash table is used to store text segment labels and their associated
+ offset from the start of the text segment. */
+struct label_offset
+{
+ char * name;
+ int offset;
+ struct label_offset * cdr;
+};
+
+#define LABEL_HASH_SIZE 257
+
+static struct label_offset * offset_table [LABEL_HASH_SIZE];
+/* END CYGNUS LOCAL */
+
+/* For an explanation of these variables, see final_prescan_insn below. */
+int arm_ccfsm_state;
+enum arm_cond_code arm_current_cc;
+rtx arm_target_insn;
+int arm_target_label;
+
+/* The condition codes of the ARM, and the inverse function. */
+char *arm_condition_codes[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
+};
+
+static enum arm_cond_code get_arm_condition_code ();
+
+
+/* Initialization code */
+
+struct processors
+{
+ char * name;
+ unsigned int flags;
+};
+
+/* Not all of these give usefully different compilation alternatives,
+ but there is no simple way of generalizing them. */
+static struct processors all_cores[] =
+{
+ /* ARM Cores */
+
+ {"arm2", FL_CO_PROC | FL_MODE26 },
+ {"arm250", FL_CO_PROC | FL_MODE26 },
+ {"arm3", FL_CO_PROC | FL_MODE26 },
+ {"arm6", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm60", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm600", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm610", FL_MODE26 | FL_MODE32 },
+ {"arm620", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm7", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm7m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT }, /* arm7m doesn't exist on its own, */
+ {"arm7d", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* but only with D, (and I), */
+ {"arm7dm", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT }, /* but those don't alter the code, */
+ {"arm7di", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* so arm7m is sometimes used. */
+ {"arm7dmi", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
+ {"arm70", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm700", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm700i", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"arm710", FL_MODE26 | FL_MODE32 },
+ {"arm710c", FL_MODE26 | FL_MODE32 },
+ {"arm7100", FL_MODE26 | FL_MODE32 },
+ {"arm7500", FL_MODE26 | FL_MODE32 },
+ {"arm7500fe", FL_CO_PROC | FL_MODE26 | FL_MODE32 }, /* Doesn't really have an external co-proc, but does have embedded fpu. */
+ {"arm7tdmi", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
+ {"arm8", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm810", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm9", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"arm920", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED },
+ {"arm920t", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"arm9tdmi", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED },
+ {"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+
+ {NULL, 0}
+};
+
+static struct processors all_architectures[] =
+{
+ /* ARM Architectures */
+
+ {"armv2", FL_CO_PROC | FL_MODE26 },
+ {"armv2a", FL_CO_PROC | FL_MODE26 },
+ {"armv3", FL_CO_PROC | FL_MODE26 | FL_MODE32 },
+ {"armv3m", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT },
+ {"armv4", FL_CO_PROC | FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 },
+ /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
+ implementations that support it, so we will leave it out for now. */
+ {"armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
+ {NULL, 0}
+};
+
+/* This is a magic stucture. The 'string' field is magically filled in
+ with a pointer to the value specified by the user on the command line
+ assuming that the user has specified such a value. */
+
+struct arm_cpu_select arm_select[] =
+{
+ /* string name processors */
+ { NULL, "-mcpu=", all_cores },
+ { NULL, "-march=", all_architectures },
+ { NULL, "-mtune=", all_cores }
+};
+
+/* Return the number of bits set in value' */
+static unsigned int
+bit_count (value)
+ signed int value;
+{
+ unsigned int count = 0;
+
+ while (value)
+ {
+ value &= ~(value & - value);
+ ++ count;
+ }
+
+ return count;
+}
+
+/* Fix up any incompatible options that the user has specified.
+ This has now turned into a maze. */
+void
+arm_override_options ()
+{
+ unsigned i;
+
+ /* Set up the flags based on the cpu/architecture selected by the user. */
+ for (i = sizeof (arm_select) / sizeof (arm_select[0]); i--;)
+ {
+ struct arm_cpu_select * ptr = arm_select + i;
+
+ if (ptr->string != NULL && ptr->string[0] != '\0')
+ {
+ const struct processors * sel;
+
+ for (sel = ptr->processors; sel->name != NULL; sel ++)
+ if (! strcmp (ptr->string, sel->name))
+ {
+ if (i == 2)
+ tune_flags = sel->flags;
+ else
+ {
+ /* If we have been given an architecture and a processor
+ make sure that they are compatible. We only generate
+ a warning though, and we prefer the CPU over the
+ architecture. */
+ if (insn_flags != 0 && (insn_flags ^ sel->flags))
+ warning ("switch -mcpu=%s conflicts with -march= switch",
+ ptr->string);
+
+ insn_flags = sel->flags;
+ }
+
+ break;
+ }
+
+ if (sel->name == NULL)
+ error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+ }
+ }
+
+ /* If the user did not specify a processor, choose one for them. */
+ if (insn_flags == 0)
+ {
+ struct processors * sel;
+ unsigned int sought;
+ static struct cpu_default
+ {
+ int cpu;
+ char * name;
+ }
+ cpu_defaults[] =
+ {
+ { TARGET_CPU_arm2, "arm2" },
+ { TARGET_CPU_arm6, "arm6" },
+ { TARGET_CPU_arm610, "arm610" },
+ { TARGET_CPU_arm710, "arm710" },
+ { TARGET_CPU_arm7m, "arm7m" },
+ { TARGET_CPU_arm7500fe, "arm7500fe" },
+ { TARGET_CPU_arm7tdmi, "arm7tdmi" },
+ { TARGET_CPU_arm8, "arm8" },
+ { TARGET_CPU_arm810, "arm810" },
+ { TARGET_CPU_arm9, "arm9" },
+ { TARGET_CPU_strongarm, "strongarm" },
+ { TARGET_CPU_generic, "arm" },
+ { 0, 0 }
+ };
+ struct cpu_default * def;
+
+ /* Find the default. */
+ for (def = cpu_defaults; def->name; def ++)
+ if (def->cpu == TARGET_CPU_DEFAULT)
+ break;
+
+ /* Make sure we found the default CPU. */
+ if (def->name == NULL)
+ abort ();
+
+ /* Find the default CPU's flags. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if (! strcmp (def->name, sel->name))
+ break;
+
+ if (sel->name == NULL)
+ abort ();
+
+ insn_flags = sel->flags;
+
+ /* Now check to see if the user has specified some command line
+ switch that require certain abilities from the cpu. */
+ sought = 0;
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ sought |= (FL_THUMB | FL_MODE32);
+
+ /* Force apcs-32 to be used for interworking. */
+ target_flags |= ARM_FLAG_APCS_32;
+
+ /* There are no ARM processor that supports both APCS-26 and
+ interworking. Therefore we force FL_MODE26 to be removed
+ from insn_flags here (if it was set), so that the search
+ below will always be able to find a compatible processor. */
+ insn_flags &= ~ FL_MODE26;
+ }
+
+ if (! TARGET_APCS_32)
+ sought |= FL_MODE26;
+
+ if (sought != 0 && ((sought & insn_flags) != sought))
+ {
+ /* Try to locate a CPU type that supports all of the abilities
+ of the default CPU, plus the extra abilities requested by
+ the user. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if ((sel->flags & sought) == (sought | insn_flags))
+ break;
+
+ if (sel->name == NULL)
+ {
+ unsigned int current_bit_count = 0;
+ struct processors * best_fit = NULL;
+
+ /* Ideally we would like to issue an error message here
+ saying that it was not possible to find a CPU compatible
+ with the default CPU, but which also supports the command
+ line options specified by the programmer, and so they
+ ought to use the -mcpu=<name> command line option to
+ override the default CPU type.
+
+ Unfortunately this does not work with multilibing. We
+ need to be able to support multilibs for -mapcs-26 and for
+ -mthumb-interwork and there is no CPU that can support both
+ options. Instead if we cannot find a cpu that has both the
+ characteristics of the default cpu and the given command line
+ options we scan the array again looking for a best match. */
+ for (sel = all_cores; sel->name != NULL; sel ++)
+ if ((sel->flags & sought) == sought)
+ {
+ unsigned int count;
+
+ count = bit_count (sel->flags & insn_flags);
+
+ if (count >= current_bit_count)
+ {
+ best_fit = sel;
+ current_bit_count = count;
+ }
+ }
+
+ if (best_fit == NULL)
+ abort ();
+ else
+ sel = best_fit;
+ }
+
+ insn_flags = sel->flags;
+ }
+ }
+
+ /* If tuning has not been specified, tune for whichever processor or
+ architecture has been selected. */
+ if (tune_flags == 0)
+ tune_flags = insn_flags;
+
+ /* Make sure that the processor choice does not conflict with any of the
+ other command line choices. */
+ if (TARGET_APCS_32 && !(insn_flags & FL_MODE32))
+ {
+ /* If APCS-32 was not the default then it must have been set by the
+ user, so issue a warning message. If the user has specified
+ "-mapcs-32 -mcpu=arm2" then we loose here. */
+ if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
+ warning ("target CPU does not support APCS-32" );
+ target_flags &= ~ ARM_FLAG_APCS_32;
+ }
+ else if (! TARGET_APCS_32 && !(insn_flags & FL_MODE26))
+ {
+ warning ("target CPU does not support APCS-26" );
+ target_flags |= ARM_FLAG_APCS_32;
+ }
+
+ if (TARGET_THUMB_INTERWORK && !(insn_flags & FL_THUMB))
+ {
+ warning ("target CPU does not support interworking" );
+ target_flags &= ~ARM_FLAG_THUMB;
+ }
+
+ /* If interworking is enabled then APCS-32 must be selected as well. */
+ if (TARGET_THUMB_INTERWORK)
+ {
+ if (! TARGET_APCS_32)
+ warning ("interworking forces APCS-32 to be used" );
+ target_flags |= ARM_FLAG_APCS_32;
+ }
+
+ if (TARGET_APCS_STACK && ! TARGET_APCS)
+ {
+ warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
+ target_flags |= ARM_FLAG_APCS_FRAME;
+ }
+
+ if (write_symbols != NO_DEBUG && flag_omit_frame_pointer)
+ warning ("-g with -fomit-frame-pointer may not give sensible debugging");
+
+ if (TARGET_POKE_FUNCTION_NAME)
+ target_flags |= ARM_FLAG_APCS_FRAME;
+
+ if (TARGET_APCS_REENT && flag_pic)
+ fatal ("-fpic and -mapcs-reent are incompatible");
+
+ if (TARGET_APCS_REENT)
+ warning ("APCS reentrant code not supported. Ignored");
+
+ /* If stack checking is disabled, we can use r10 as the PIC register,
+ which keeps r9 available. */
+ if (flag_pic && ! TARGET_APCS_STACK)
+ arm_pic_register = 10;
+
+ /* Well, I'm about to have a go, but pic is NOT going to be compatible
+ with APCS reentrancy, since that requires too much support in the
+ assembler and linker, and the ARMASM assembler seems to lack some
+ required directives. */
+ if (flag_pic)
+ warning ("Position independent code not supported");
+
+ if (TARGET_APCS_FLOAT)
+ warning ("Passing floating point arguments in fp regs not yet supported");
+
+ /* Initialise boolean versions of the flags, for use in the arm.md file. */
+ arm_fast_multiply = insn_flags & FL_FAST_MULT;
+ arm_arch4 = insn_flags & FL_ARCH4;
+
+ arm_ld_sched = tune_flags & FL_LDSCHED;
+ arm_is_strong = tune_flags & FL_STRONG;
+ arm_is_6_or_7 = ((tune_flags & (FL_MODE26 | FL_MODE32))
+ && !(tune_flags & FL_ARCH4));
+
+ /* Default value for floating point code... if no co-processor
+ bus, then schedule for emulated floating point. Otherwise,
+ assume the user has an FPA.
+ Note: this does not prevent use of floating point instructions,
+ -msoft-float does that. */
+ arm_fpu = (tune_flags & FL_CO_PROC) ? FP_HARD : FP_SOFT3;
+
+ if (target_fpe_name)
+ {
+ if (! strcmp (target_fpe_name, "2"))
+ arm_fpu_arch = FP_SOFT2;
+ else if (! strcmp (target_fpe_name, "3"))
+ arm_fpu_arch = FP_SOFT3;
+ else
+ fatal ("Invalid floating point emulation option: -mfpe-%s",
+ target_fpe_name);
+ }
+ else
+ arm_fpu_arch = FP_DEFAULT;
+
+ if (TARGET_FPE && arm_fpu != FP_HARD)
+ arm_fpu = FP_SOFT2;
+
+ /* For arm2/3 there is no need to do any scheduling if there is only
+ a floating point emulator, or we are doing software floating-point. */
+ if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD) && (tune_flags & FL_MODE32) == 0)
+ flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+
+ arm_prog_mode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
+
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ /* If optimizing for space, don't synthesize constants.
+ For processors with load scheduling, it never costs more than 2 cycles
+ to load a constant, and the load scheduler may well reduce that to 1. */
+ if (optimize_size || (tune_flags & FL_LDSCHED))
+ arm_constant_limit = 1;
+
+ /* If optimizing for size, bump the number of instructions that we
+ are prepared to conditionally execute (even on a StrongARM).
+ Otherwise for the StrongARM, which has early execution of branches,
+ a sequence that is worth skipping is shorter. */
+ if (optimize_size)
+ max_insns_skipped = 6;
+ else if (arm_is_strong)
+ max_insns_skipped = 3;
+}
+
+
+/* Return 1 if it is possible to return using a single instruction */
+
+int
+use_return_insn (iscond)
+ int iscond;
+{
+ int regno;
+
+ if (!reload_completed ||current_function_pretend_args_size
+ || current_function_anonymous_args
+ || ((get_frame_size () + current_function_outgoing_args_size != 0)
+ /* CYGNUS LOCAL nickc */
+ && !(TARGET_APCS && frame_pointer_needed)))
+ /* END CYGNUS LOCAL */
+ return 0;
+
+ /* Can't be done if interworking with Thumb, and any registers have been
+ stacked. Similarly, on StrongARM, conditional returns are expensive
+ if they aren't taken and registers have been stacked. */
+ if (iscond && arm_is_strong && frame_pointer_needed)
+ return 0;
+ if ((iscond && arm_is_strong)
+ || TARGET_THUMB_INTERWORK)
+ for (regno = 0; regno < 16; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ /* Can't be done if any of the FPU regs are pushed, since this also
+ requires an insn */
+ for (regno = 16; regno < 24; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return 0;
+
+ return 1;
+}
+
+/* Return TRUE if int I is a valid immediate ARM constant. */
+
+int
+const_ok_for_arm (i)
+ HOST_WIDE_INT i;
+{
+ unsigned HOST_WIDE_INT mask = ~(unsigned HOST_WIDE_INT)0xFF;
+
+ /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
+ be all zero, or all one. */
+ if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
+ && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
+ != ((~(unsigned HOST_WIDE_INT) 0)
+ & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
+ return FALSE;
+
+ /* Fast return for 0 and powers of 2 */
+ if ((i & (i - 1)) == 0)
+ return TRUE;
+
+ do
+ {
+ if ((i & mask & (unsigned HOST_WIDE_INT) 0xffffffff) == 0)
+ return TRUE;
+ mask =
+ (mask << 2) | ((mask & (unsigned HOST_WIDE_INT) 0xffffffff)
+ >> (32 - 2)) | ~((unsigned HOST_WIDE_INT) 0xffffffff);
+ } while (mask != ~(unsigned HOST_WIDE_INT) 0xFF);
+
+ return FALSE;
+}
+
+/* Return true if I is a valid constant for the operation CODE. */
+int
+const_ok_for_op (i, code, mode)
+ HOST_WIDE_INT i;
+ enum rtx_code code;
+ enum machine_mode mode;
+{
+ if (const_ok_for_arm (i))
+ return 1;
+
+ switch (code)
+ {
+ case PLUS:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
+
+ case MINUS: /* Should only occur with (MINUS I reg) => rsb */
+ case XOR:
+ case IOR:
+ return 0;
+
+ case AND:
+ return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
+
+ default:
+ abort ();
+ }
+}
+
+/* Emit a sequence of insns to handle a large constant.
+ CODE is the code of the operation required, it can be any of SET, PLUS,
+ IOR, AND, XOR, MINUS;
+ MODE is the mode in which the operation is being performed;
+ VAL is the integer to operate on;
+ SOURCE is the other operand (a register, or a null-pointer for SET);
+ SUBTARGETS means it is safe to create scratch registers if that will
+ either produce a simpler sequence, or we will want to cse the values.
+ Return value is the number of insns emitted. */
+
+int
+arm_split_constant (code, mode, val, target, source, subtargets)
+ enum rtx_code code;
+ enum machine_mode mode;
+ HOST_WIDE_INT val;
+ rtx target;
+ rtx source;
+ int subtargets;
+{
+ if (subtargets || code == SET
+ || (GET_CODE (target) == REG && GET_CODE (source) == REG
+ && REGNO (target) != REGNO (source)))
+ {
+ /* After arm_reorg has been called, we can't fix up expensive
+ constants by pushing them into memory so we must synthesise
+ them in-line, regardless of the cost. This is only likely to
+ be more costly on chips that have load delay slots and we are
+ compiling without running the scheduler (so no splitting
+ occurred before the final instruction emission).
+
+ Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
+ */ /* CYGNUS LOCAL nickc/strongarm */
+ if ((! after_arm_reorg || optimize == 0)
+ /* END CYGNUS LOCAL */
+ && (arm_gen_constant (code, mode, val, target, source, 1, 0)
+ > arm_constant_limit + (code != SET)))
+ {
+ if (code == SET)
+ {
+ /* Currently SET is the only monadic value for CODE, all
+ the rest are diadic. */
+ emit_insn (gen_rtx (SET, VOIDmode, target, GEN_INT (val)));
+ return 1;
+ }
+ else
+ {
+ rtx temp = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_insn (gen_rtx (SET, VOIDmode, temp, GEN_INT (val)));
+ /* For MINUS, the value is subtracted from, since we never
+ have subtraction of a constant. */
+ if (code == MINUS)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, temp, source)));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, source, temp)));
+ return 2;
+ }
+ }
+ }
+
+ return arm_gen_constant (code, mode, val, target, source, subtargets, 1);
+}
+
+/* As above, but extra parameter GENERATE which, if clear, suppresses
+ RTL generation. */
+int
+arm_gen_constant (code, mode, val, target, source, subtargets, generate)
+ enum rtx_code code;
+ enum machine_mode mode;
+ HOST_WIDE_INT val;
+ rtx target;
+ rtx source;
+ int subtargets;
+ int generate;
+{
+ int can_invert = 0;
+ int can_negate = 0;
+ int can_negate_initial = 0;
+ int can_shift = 0;
+ int i;
+ int num_bits_set = 0;
+ int set_sign_bit_copies = 0;
+ int clear_sign_bit_copies = 0;
+ int clear_zero_bit_copies = 0;
+ int set_zero_bit_copies = 0;
+ int insns = 0;
+ unsigned HOST_WIDE_INT temp1, temp2;
+ unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
+
+ /* find out which operations are safe for a given CODE. Also do a quick
+ check for degenerate cases; these can occur when DImode operations
+ are split. */
+ switch (code)
+ {
+ case SET:
+ can_invert = 1;
+ can_shift = 1;
+ can_negate = 1;
+ break;
+
+ case PLUS:
+ can_negate = 1;
+ can_negate_initial = 1;
+ break;
+
+ case IOR:
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ GEN_INT (ARM_SIGN_EXTEND (val))));
+ return 1;
+ }
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ break;
+
+ case AND:
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, const0_rtx));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ can_invert = 1;
+ break;
+
+ case XOR:
+ if (remainder == 0)
+ {
+ if (reload_completed && rtx_equal_p (target, source))
+ return 0;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target, source));
+ return 1;
+ }
+ if (remainder == 0xffffffff)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode, source)));
+ return 1;
+ }
+
+ /* We don't know how to handle this yet below. */
+ abort ();
+
+ case MINUS:
+ /* We treat MINUS as (val - source), since (source - val) is always
+ passed as (source + (-val)). */
+ if (remainder == 0)
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NEG, mode, source)));
+ return 1;
+ }
+ if (const_ok_for_arm (val))
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (MINUS, mode, GEN_INT (val), source)));
+ return 1;
+ }
+ can_negate = 1;
+
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* If we can do it in one insn get out quickly */
+ if (const_ok_for_arm (val)
+ || (can_negate_initial && const_ok_for_arm (-val))
+ || (can_invert && const_ok_for_arm (~val)))
+ {
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ (source ? gen_rtx (code, mode, source,
+ GEN_INT (val))
+ : GEN_INT (val))));
+ return 1;
+ }
+
+
+ /* Calculate a few attributes that may be useful for specific
+ optimizations. */
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 31; i >= 0; i--)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_sign_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) == 0)
+ clear_zero_bit_copies++;
+ else
+ break;
+ }
+
+ for (i = 0; i <= 31; i++)
+ {
+ if ((remainder & (1 << i)) != 0)
+ set_zero_bit_copies++;
+ else
+ break;
+ }
+
+ switch (code)
+ {
+ case SET:
+ /* See if we can do this by sign_extending a constant that is known
+ to be negative. This is a good, way of doing it, since the shift
+ may well merge into a subsequent insn. */
+ if (set_sign_bit_copies > 1)
+ {
+ if (const_ok_for_arm
+ (temp1 = ARM_SIGN_EXTEND (remainder
+ << (set_sign_bit_copies - 1))))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_insn (gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ /* For an inverted constant, we will need to set the low bits,
+ these will be shifted out of harm's way. */
+ temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
+ if (const_ok_for_arm (~temp1))
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, new_src,
+ GEN_INT (temp1)));
+ emit_insn (gen_ashrsi3 (target, new_src,
+ GEN_INT (set_sign_bit_copies - 1)));
+ }
+ return 2;
+ }
+ }
+
+ /* See if we can generate this by setting the bottom (or the top)
+ 16 bits, and then shifting these into the other half of the
+ word. We only look for the simplest cases, to do more would cost
+ too much. Be careful, however, not to generate this when the
+ alternative would take fewer insns. */
+ if (val & 0xffff0000)
+ {
+ temp1 = remainder & 0xffff0000;
+ temp2 = remainder & 0x0000ffff;
+
+ /* Overlaps outside this range are best done using other methods. */
+ for (i = 9; i < 24; i++)
+ {
+ if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
+ && ! const_ok_for_arm (temp2))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, temp2, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (IOR, mode,
+ gen_rtx (ASHIFT, mode, source,
+ GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+
+ /* Don't duplicate cases already considered. */
+ for (i = 17; i < 24; i++)
+ {
+ if (((temp1 | (temp1 >> i)) == remainder)
+ && ! const_ok_for_arm (temp1))
+ {
+ rtx new_src = (subtargets
+ ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
+ : target);
+ insns = arm_gen_constant (code, mode, temp1, new_src,
+ source, subtargets, generate);
+ source = new_src;
+ if (generate)
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (IOR, mode,
+ gen_rtx (LSHIFTRT, mode,
+ source, GEN_INT (i)),
+ source)));
+ return insns + 1;
+ }
+ }
+ }
+ break;
+
+ case IOR:
+ case XOR:
+ /* If we have IOR or XOR, and the constant can be loaded in a
+ single instruction, and we can find a temporary to put it in,
+ then this can be done in two instructions instead of 3-4. */
+ if (subtargets
+ /* TARGET can't be NULL if SUBTARGETS is 0 */
+ || (reload_completed && ! reg_mentioned_p (target, source)))
+ {
+ if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub, GEN_INT (val)));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (code, mode, source, sub)));
+ }
+ return 2;
+ }
+ }
+
+ if (code == XOR)
+ break;
+
+ if (set_sign_bit_copies > 8
+ && (val & (-1 << (32 - set_sign_bit_copies))) == val)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_sign_bit_copies);
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode,
+ gen_rtx (ASHIFT, mode, source,
+ shift))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode,
+ gen_rtx (LSHIFTRT, mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (set_zero_bit_copies > 8
+ && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (set_zero_bit_copies);
+
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode,
+ gen_rtx (LSHIFTRT, mode, source,
+ shift))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode,
+ gen_rtx (ASHIFT, mode, sub,
+ shift))));
+ }
+ return 2;
+ }
+
+ if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~ val)))
+ {
+ if (generate)
+ {
+ rtx sub = subtargets ? gen_reg_rtx (mode) : target;
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (NOT, mode, source)));
+ source = sub;
+ if (subtargets)
+ sub = gen_reg_rtx (mode);
+ emit_insn (gen_rtx (SET, VOIDmode, sub,
+ gen_rtx (AND, mode, source,
+ GEN_INT (temp1))));
+ emit_insn (gen_rtx (SET, VOIDmode, target,
+ gen_rtx (NOT, mode, sub)));
+ }
+ return 3;
+ }
+ break;
+
+ case AND:
+ /* See if two shifts will do 2 or more insn's worth of work. */
+ if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = ((0xffffffff
+ << (32 - clear_sign_bit_copies))
+ & 0xffffffff);
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_sign_bit_copies);
+
+ emit_insn (gen_ashlsi3 (new_src, source, shift));
+ emit_insn (gen_lshrsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
+ {
+ HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
+
+ if ((remainder | shift_mask) != 0xffffffff)
+ {
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ new_src, source, subtargets, 1);
+ source = new_src;
+ }
+ else
+ {
+ rtx targ = subtargets ? NULL_RTX : target;
+
+ insns = arm_gen_constant (AND, mode, remainder | shift_mask,
+ targ, source, subtargets, 0);
+ }
+ }
+
+ if (generate)
+ {
+ rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
+ rtx shift = GEN_INT (clear_zero_bit_copies);
+
+ emit_insn (gen_lshrsi3 (new_src, source, shift));
+ emit_insn (gen_ashlsi3 (target, new_src, shift));
+ }
+
+ return insns + 2;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = 0; i < 32; i++)
+ if (remainder & (1 << i))
+ num_bits_set++;
+
+ if (code == AND || (can_invert && num_bits_set > 16))
+ remainder = (~remainder) & 0xffffffff;
+ else if (code == PLUS && num_bits_set > 16)
+ remainder = (-remainder) & 0xffffffff;
+ else
+ {
+ can_invert = 0;
+ can_negate = 0;
+ }
+
+ /* Now try and find a way of doing the job in either two or three
+ instructions.
+ We start by looking for the largest block of zeros that are aligned on
+ a 2-bit boundary, we then fill up the temps, wrapping around to the
+ top of the word when we drop off the bottom.
+ In the worst case this code should produce no more than four insns. */
+ {
+ int best_start = 0;
+ int best_consecutive_zeros = 0;
+
+ for (i = 0; i < 32; i += 2)
+ {
+ int consecutive_zeros = 0;
+
+ if (! (remainder & (3 << i)))
+ {
+ while ((i < 32) && ! (remainder & (3 << i)))
+ {
+ consecutive_zeros += 2;
+ i += 2;
+ }
+ if (consecutive_zeros > best_consecutive_zeros)
+ {
+ best_consecutive_zeros = consecutive_zeros;
+ best_start = i - consecutive_zeros;
+ }
+ i -= 2;
+ }
+ }
+
+ /* Now start emitting the insns, starting with the one with the highest
+ bit set: we do this so that the smallest number will be emitted last;
+ this is more likely to be combinable with addressing insns. */
+ i = best_start;
+ do
+ {
+ int end;
+
+ if (i <= 0)
+ i += 32;
+ if (remainder & (3 << (i - 2)))
+ {
+ end = i - 8;
+ if (end < 0)
+ end += 32;
+ temp1 = remainder & ((0x0ff << end)
+ | ((i < end) ? (0xff >> (32 - end)) : 0));
+ remainder &= ~temp1;
+
+ if (generate)
+ {
+ rtx new_src;
+
+ if (code == SET)
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (subtargets
+ ? gen_reg_rtx (mode)
+ : target),
+ GEN_INT (can_invert ? ~temp1 : temp1)));
+ else if (code == MINUS)
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (subtargets
+ ? gen_reg_rtx (mode)
+ : target),
+ gen_rtx (code, mode, GEN_INT (temp1),
+ source)));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode,
+ new_src = (remainder
+ ? (subtargets
+ ? gen_reg_rtx (mode)
+ : target)
+ : target),
+ gen_rtx (code, mode, source,
+ GEN_INT (can_invert ? ~temp1
+ : (can_negate
+ ? -temp1
+ : temp1)))));
+ source = new_src;
+ }
+
+ if (code == SET)
+ {
+ can_invert = 0;
+ code = PLUS;
+ }
+ else if (code == MINUS)
+ code = PLUS;
+
+ insns++;
+ i -= 6;
+ }
+ i -= 2;
+ } while (remainder);
+ }
+ return insns;
+}
+
+/* Canonicalize a comparison so that we are more likely to recognize it.
+ This can be done for a few constant compares, where we can make the
+ immediate value easier to load. */
+enum rtx_code
+arm_canonicalize_comparison (code, op1)
+ enum rtx_code code;
+ rtx *op1;
+{
+ unsigned HOST_WIDE_INT i = INTVAL (*op1);
+
+ switch (code)
+ {
+ case EQ:
+ case NE:
+ return code;
+
+ case GT:
+ case LE:
+ if (i != ((((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
+ - 1)
+ && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ {
+ *op1 = GEN_INT (i+1);
+ return code == GT ? GE : LT;
+ }
+ break;
+
+ case GE:
+ case LT:
+ if (i != (((unsigned HOST_WIDE_INT) 1) << (HOST_BITS_PER_WIDE_INT - 1))
+ && (const_ok_for_arm (i-1) || const_ok_for_arm (- (i-1))))
+ {
+ *op1 = GEN_INT (i-1);
+ return code == GE ? GT : LE;
+ }
+ break;
+
+ case GTU:
+ case LEU:
+ if (i != ~((unsigned HOST_WIDE_INT) 0)
+ && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ {
+ *op1 = GEN_INT (i + 1);
+ return code == GTU ? GEU : LTU;
+ }
+ break;
+
+ case GEU:
+ case LTU:
+ if (i != 0
+ && (const_ok_for_arm (i - 1) || const_ok_for_arm (- (i - 1))))
+ {
+ *op1 = GEN_INT (i - 1);
+ return code == GEU ? GTU : LEU;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ return code;
+}
+
+/* CYGNSU LOCAL */
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+arm_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 1;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+/* END CYGNUS LOCAL */
+
+int
+legitimate_pic_operand_p (x)
+ rtx x;
+{
+ if (CONSTANT_P (x) && flag_pic
+ && (GET_CODE (x) == SYMBOL_REF
+ || (GET_CODE (x) == CONST
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)))
+ return 0;
+
+ return 1;
+}
+
+rtx
+legitimize_pic_address (orig, mode, reg)
+ rtx orig;
+ enum machine_mode mode;
+ rtx reg;
+{
+ if (GET_CODE (orig) == SYMBOL_REF)
+ {
+ rtx pic_ref, address;
+ rtx insn;
+ int subregs = 0;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+
+ subregs = 1;
+ }
+
+#ifdef AOF_ASSEMBLER
+ /* The AOF assembler can generate relocations for these directly, and
+ understands that the PIC register has to be added into the offset.
+ */
+ insn = emit_insn (gen_pic_load_addr_based (reg, orig));
+#else
+ if (subregs)
+ address = gen_reg_rtx (Pmode);
+ else
+ address = reg;
+
+ emit_insn (gen_pic_load_addr (address, orig));
+
+ pic_ref = gen_rtx (MEM, Pmode,
+ gen_rtx (PLUS, Pmode, pic_offset_table_rtx, address));
+ RTX_UNCHANGING_P (pic_ref) = 1;
+ insn = emit_move_insn (reg, pic_ref);
+#endif
+ current_function_uses_pic_offset_table = 1;
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx (EXPR_LIST, REG_EQUAL, orig,
+ REG_NOTES (insn));
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ /* The base register doesn't really matter, we only want to
+ test the index for the appropriate mode. */
+ GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
+
+ if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ abort ();
+
+ win:
+ if (GET_CODE (offset) == CONST_INT)
+ return plus_constant_for_output (base, INTVAL (offset));
+ }
+
+ if (GET_MODE_SIZE (mode) > 4
+ && (GET_MODE_CLASS (mode) == MODE_INT
+ || TARGET_SOFT_FLOAT))
+ {
+ emit_insn (gen_addsi3 (reg, base, offset));
+ return reg;
+ }
+
+ return gen_rtx (PLUS, Pmode, base, offset);
+ }
+ else if (GET_CODE (orig) == LABEL_REF)
+ current_function_uses_pic_offset_table = 1;
+
+ return orig;
+}
+
+static rtx pic_rtx;
+
+int
+is_pic(x)
+ rtx x;
+{
+ if (x == pic_rtx)
+ return 1;
+ return 0;
+}
+
+void
+arm_finalize_pic ()
+{
+#ifndef AOF_ASSEMBLER
+ rtx l1, pic_tmp, pic_tmp2, seq;
+ rtx global_offset_table;
+
+ if (current_function_uses_pic_offset_table == 0)
+ return;
+
+ if (! flag_pic)
+ abort ();
+
+ start_sequence ();
+ l1 = gen_label_rtx ();
+
+ global_offset_table = gen_rtx (SYMBOL_REF, Pmode, "_GLOBAL_OFFSET_TABLE_");
+ /* The PC contains 'dot'+8, but the label L1 is on the next
+ instruction, so the offset is only 'dot'+4. */
+ pic_tmp = gen_rtx (CONST, VOIDmode,
+ gen_rtx (PLUS, Pmode,
+ gen_rtx (LABEL_REF, VOIDmode, l1),
+ GEN_INT (4)));
+ pic_tmp2 = gen_rtx (CONST, VOIDmode,
+ gen_rtx (PLUS, Pmode,
+ global_offset_table,
+ pc_rtx));
+
+ pic_rtx = gen_rtx (CONST, Pmode,
+ gen_rtx (MINUS, Pmode, pic_tmp2, pic_tmp));
+
+ emit_insn (gen_pic_load_addr (pic_offset_table_rtx, pic_rtx));
+ emit_jump_insn (gen_pic_add_dot_plus_eight(l1, pic_offset_table_rtx));
+ emit_label (l1);
+
+ seq = gen_sequence ();
+ end_sequence ();
+ emit_insn_after (seq, get_insns ());
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up. */
+ emit_insn (gen_rtx (USE, VOIDmode, pic_offset_table_rtx));
+#endif /* AOF_ASSEMBLER */
+}
+
+#define REG_OR_SUBREG_REG(X) \
+ (GET_CODE (X) == REG \
+ || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
+
+#define REG_OR_SUBREG_RTX(X) \
+ (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+int
+arm_rtx_costs (x, code, outer_code)
+ rtx x;
+ enum rtx_code code, outer_code;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum rtx_code subcode;
+ int extra_cost;
+
+ switch (code)
+ {
+ case MEM:
+ /* Memory costs quite a lot for the first word, but subsequent words
+ load at the equivalent of a single insn each. */
+ return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
+ + (CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
+
+ case DIV:
+ case MOD:
+ return 100;
+
+ case ROTATE:
+ if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
+ return 4;
+ /* Fall through */
+ case ROTATERT:
+ if (mode != SImode)
+ return 8;
+ /* Fall through */
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT:
+ if (mode == DImode)
+ return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
+ + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 8));
+ return (1 + ((GET_CODE (XEXP (x, 0)) == REG
+ || (GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
+ ? 0 : 4)
+ + ((GET_CODE (XEXP (x, 1)) == REG
+ || (GET_CODE (XEXP (x, 1)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT))
+ ? 0 : 4));
+
+ case MINUS:
+ if (mode == DImode)
+ return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
+ ? 0 : 8));
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
+ ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 0))))
+ ? 0 : 8));
+
+ if (((GET_CODE (XEXP (x, 0)) == CONST_INT
+ && const_ok_for_arm (INTVAL (XEXP (x, 0)))
+ && REG_OR_SUBREG_REG (XEXP (x, 1))))
+ || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
+ || subcode == ASHIFTRT || subcode == LSHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
+ (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
+ && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
+ || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
+ && REG_OR_SUBREG_REG (XEXP (x, 0))))
+ return 1;
+ /* Fall through */
+
+ case PLUS:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
+ && const_double_rtx_ok_for_fpu (XEXP (x, 1))))
+ ? 0 : 8));
+
+ /* Fall through */
+ case AND: case XOR: case IOR:
+ extra_cost = 0;
+
+ /* Normally the frame registers will be spilt into reg+const during
+ reload, so it is a bad idea to combine them with other instructions,
+ since then they might not be moved outside of loops. As a compromise
+ we allow integration with ops that have a constant as their second
+ operand. */
+ if ((REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
+ && GET_CODE (XEXP (x, 1)) != CONST_INT)
+ || (REG_OR_SUBREG_REG (XEXP (x, 0))
+ && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
+ extra_cost = 4;
+
+ if (mode == DImode)
+ return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode)))
+ ? 0 : 8));
+
+ if (REG_OR_SUBREG_REG (XEXP (x, 0)))
+ return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
+ + ((REG_OR_SUBREG_REG (XEXP (x, 1))
+ || (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && const_ok_for_op (INTVAL (XEXP (x, 1)), code, mode)))
+ ? 0 : 4));
+
+ else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
+ return (1 + extra_cost
+ + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
+ || subcode == LSHIFTRT || subcode == ASHIFTRT
+ || subcode == ROTATE || subcode == ROTATERT
+ || (subcode == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
+ (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
+ && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
+ && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
+ || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
+ ? 0 : 4));
+
+ return 8;
+
+ case MULT:
+ /* There is no point basing this on the tuning, since it is always the
+ fast variant if it exists at all */
+ if (arm_fast_multiply && mode == DImode
+ && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
+ && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
+ return 8;
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || mode == DImode)
+ return 30;
+
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
+ & (unsigned HOST_WIDE_INT) 0xffffffff);
+ int add_cost = const_ok_for_arm (i) ? 4 : 8;
+ int j;
+ /* Tune as appropriate */
+ int booth_unit_size = ((tune_flags & FL_FAST_MULT) ? 8 : 2);
+
+ for (j = 0; i && j < 32; j += booth_unit_size)
+ {
+ i >>= booth_unit_size;
+ add_cost += 2;
+ }
+
+ return add_cost;
+ }
+
+ return (((tune_flags & FL_FAST_MULT) ? 8 : 30)
+ + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
+ + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4));
+
+ case TRUNCATE:
+ if (arm_fast_multiply && mode == SImode
+ && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
+ == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
+ && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
+ || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
+ return 8;
+ return 99;
+
+ case NEG:
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
+ /* Fall through */
+ case NOT:
+ if (mode == DImode)
+ return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
+
+ case IF_THEN_ELSE:
+ if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
+ return 14;
+ return 2;
+
+ case COMPARE:
+ return 1;
+
+ case ABS:
+ return 4 + (mode == DImode ? 4 : 0);
+
+ case SIGN_EXTEND:
+ if (GET_MODE (XEXP (x, 0)) == QImode)
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+ /* Fall through */
+ case ZERO_EXTEND:
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ return (1 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case HImode:
+ return (4 + (mode == DImode ? 4 : 0)
+ + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ case SImode:
+ return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
+
+ default:
+ break;
+ }
+ abort ();
+
+ default:
+ return 99;
+ }
+}
+
+int
+arm_adjust_cost (insn, link, dep, cost)
+ rtx insn;
+ rtx link;
+ rtx dep;
+ int cost;
+{
+ rtx i_pat, d_pat;
+
+ if ((i_pat = single_set (insn)) != NULL
+ && GET_CODE (SET_SRC (i_pat)) == MEM
+ && (d_pat = single_set (dep)) != NULL
+ && GET_CODE (SET_DEST (d_pat)) == MEM)
+ {
+ /* This is a load after a store, there is no conflict if the load reads
+ from a cached area. Assume that loads from the stack, and from the
+ constant pool are cached, and that others will miss. This is a
+ hack. */
+
+/* debug_rtx (insn);
+ debug_rtx (dep);
+ debug_rtx (link);
+ fprintf (stderr, "costs %d\n", cost); */
+
+ if (CONSTANT_POOL_ADDRESS_P (XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (stack_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (frame_pointer_rtx, XEXP (SET_SRC (i_pat), 0))
+ || reg_mentioned_p (hard_frame_pointer_rtx,
+ XEXP (SET_SRC (i_pat), 0)))
+ {
+/* fprintf (stderr, "***** Now 1\n"); */
+ return 1;
+ }
+ }
+
+ return cost;
+}
+
+/* This code has been fixed for cross compilation. */
+
+static int fpa_consts_inited = 0;
+
+char *strings_fpa[8] = {
+ "0", "1", "2", "3",
+ "4", "5", "0.5", "10"
+};
+
+static REAL_VALUE_TYPE values_fpa[8];
+
+static void
+init_fpa_table ()
+{
+ int i;
+ REAL_VALUE_TYPE r;
+
+ for (i = 0; i < 8; i++)
+ {
+ r = REAL_VALUE_ATOF (strings_fpa[i], DFmode);
+ values_fpa[i] = r;
+ }
+
+ fpa_consts_inited = 1;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPU constant. */
+
+int
+const_double_rtx_ok_for_fpu (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Return TRUE if rtx X is a valid immediate FPU constant. */
+
+int
+neg_const_double_rtx_ok_for_fpu (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ if (REAL_VALUE_MINUS_ZERO (r))
+ return 0;
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return 1;
+
+ return 0;
+}
+
+/* Predicates for `match_operand' and `match_operator'. */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Only accept reg, subreg(reg), const_int. */
+
+int
+reg_or_int_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return 1;
+
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
+
+/* Return 1 if OP is an item in memory, given that we are in reload. */
+
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return 1 if OP is a valid memory address, but not valid for a signed byte
+ memory access (architecture V4) */
+int
+bad_signed_byte_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (! memory_operand (op, mode) || GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ /* A sum of anything more complex than reg + reg or reg + const is bad */
+ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
+ && (! s_register_operand (XEXP (op, 0), VOIDmode)
+ || (! s_register_operand (XEXP (op, 1), VOIDmode)
+ && GET_CODE (XEXP (op, 1)) != CONST_INT)))
+ return 1;
+
+ /* Big constants are also bad */
+ if (GET_CODE (op) == PLUS && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (INTVAL (XEXP (op, 1)) > 0xff
+ || -INTVAL (XEXP (op, 1)) > 0xff))
+ return 1;
+
+ /* Everything else is good, or can will automatically be made so. */
+ return 0;
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction. */
+
+int
+arm_rhs_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op))));
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction, or a load.
+ */
+
+int
+arm_rhsm_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT && const_ok_for_arm (INTVAL (op)))
+ || memory_operand (op, mode));
+}
+
+/* Return TRUE for valid operands for the rhs of an ARM instruction, or if a
+ constant that is valid when negated. */
+
+int
+arm_add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (const_ok_for_arm (INTVAL (op))
+ || const_ok_for_arm (-INTVAL (op)))));
+}
+
+int
+arm_not_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (const_ok_for_arm (INTVAL (op))
+ || const_ok_for_arm (~INTVAL (op)))));
+}
+
+/* Return TRUE if the operand is a memory reference which contains an
+ offsettable address. */
+int
+offsettable_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ return (mode == GET_MODE (op)
+ && GET_CODE (op) == MEM
+ && offsettable_address_p (reload_completed | reload_in_progress,
+ mode, XEXP (op, 0)));
+}
+
+/* Return TRUE if the operand is a memory reference which is, or can be
+ made word aligned by adjusting the offset. */
+int
+alignable_memory_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ rtx reg;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+
+ if (mode != GET_MODE (op) || GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+
+ return ((GET_CODE (reg = op) == REG
+ || (GET_CODE (op) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (op)) == REG)
+ || (GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (GET_CODE (reg = XEXP (op, 0)) == REG
+ || (GET_CODE (XEXP (op, 0)) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (XEXP (op, 0))) == REG))))
+ && REGNO_POINTER_ALIGN (REGNO (reg)) >= 4);
+}
+
+/* Similar to s_register_operand, but does not allow hard integer
+ registers. */
+int
+f_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) == FPU_REGS));
+}
+
+/* Return TRUE for valid operands for the rhs of an FPU instruction. */
+
+int
+fpu_rhs_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return (const_double_rtx_ok_for_fpu (op));
+
+ return FALSE;
+}
+
+int
+fpu_add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ return (const_double_rtx_ok_for_fpu (op)
+ || neg_const_double_rtx_ok_for_fpu (op));
+
+ return FALSE;
+}
+
+/* Return nonzero if OP is a constant power of two. */
+
+int
+power_of_two_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL(op);
+ return value != 0 && (value & (value - 1)) == 0;
+ }
+ return FALSE;
+}
+
+/* Return TRUE for a valid operand of a DImode operation.
+ Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
+ Note that this disallows MEM(REG+REG), but allows
+ MEM(PRE/POST_INC/DEC(REG)). */
+
+int
+di_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ case CONST_INT:
+ return TRUE;
+
+ case MEM:
+ return memory_address_p (DImode, XEXP (op, 0));
+
+ default:
+ return FALSE;
+ }
+}
+
+/* Return TRUE for a valid operand of a DFmode operation when -msoft-float.
+ Either: REG, SUBREG, CONST_DOUBLE or MEM(DImode_address).
+ Note that this disallows MEM(REG+REG), but allows
+ MEM(PRE/POST_INC/DEC(REG)). */
+
+int
+soft_df_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (s_register_operand (op, mode))
+ return TRUE;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_DOUBLE:
+ return TRUE;
+
+ case MEM:
+ return memory_address_p (DFmode, XEXP (op, 0));
+
+ default:
+ return FALSE;
+ }
+}
+
+/* Return TRUE for valid index operands. */
+
+int
+index_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (s_register_operand(op, mode)
+ || (immediate_operand (op, mode)
+ && INTVAL (op) < 4096 && INTVAL (op) > -4096));
+}
+
+/* Return TRUE for valid shifts by a constant. This also accepts any
+ power of two on the (somewhat overly relaxed) assumption that the
+ shift operator in this case was a mult. */
+
+int
+const_shift_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (power_of_two_operand (op, mode)
+ || (immediate_operand (op, mode)
+ && (INTVAL (op) < 32 && INTVAL (op) > 0)));
+}
+
+/* Return TRUE for arithmetic operators which can be combined with a multiply
+ (shift). */
+
+int
+shiftable_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+ else
+ {
+ enum rtx_code code = GET_CODE (x);
+
+ return (code == PLUS || code == MINUS
+ || code == IOR || code == XOR || code == AND);
+ }
+}
+
+/* Return TRUE for shift operators. */
+
+int
+shift_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+ else
+ {
+ enum rtx_code code = GET_CODE (x);
+
+ if (code == MULT)
+ return power_of_two_operand (XEXP (x, 1));
+
+ return (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT
+ || code == ROTATERT);
+ }
+}
+
+int equality_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ return GET_CODE (x) == EQ || GET_CODE (x) == NE;
+}
+
+/* Return TRUE for SMIN SMAX UMIN UMAX operators. */
+
+int
+minmax_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ if (GET_MODE (x) != mode)
+ return FALSE;
+
+ return code == SMIN || code == SMAX || code == UMIN || code == UMAX;
+}
+
+/* return TRUE if x is EQ or NE */
+
+/* Return TRUE if this is the condition code register, if we aren't given
+ a mode, accept any class CCmode register */
+
+int
+cc_register (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (x);
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return FALSE;
+ }
+
+ if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if this is the condition code register, if we aren't given
+ a mode, accept any class CCmode register which indicates a dominance
+ expression. */
+
+int
+dominant_cc_register (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (mode == VOIDmode)
+ {
+ mode = GET_MODE (x);
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ return FALSE;
+ }
+
+ if (mode != CC_DNEmode && mode != CC_DEQmode
+ && mode != CC_DLEmode && mode != CC_DLTmode
+ && mode != CC_DGEmode && mode != CC_DGTmode
+ && mode != CC_DLEUmode && mode != CC_DLTUmode
+ && mode != CC_DGEUmode && mode != CC_DGTUmode)
+ return FALSE;
+
+ if (mode == GET_MODE (x) && GET_CODE (x) == REG && REGNO (x) == 24)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Return TRUE if X references a SYMBOL_REF. */
+int
+symbol_mentioned_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (symbol_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return TRUE if X references a LABEL_REF. */
+int
+label_mentioned_p (x)
+ rtx x;
+{
+ register char *fmt;
+ register int i;
+
+ if (GET_CODE (x) == LABEL_REF)
+ return 1;
+
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ register int j;
+
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ if (label_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+enum rtx_code
+minmax_code (x)
+ rtx x;
+{
+ enum rtx_code code = GET_CODE (x);
+
+ if (code == SMAX)
+ return GE;
+ else if (code == SMIN)
+ return LE;
+ else if (code == UMIN)
+ return LEU;
+ else if (code == UMAX)
+ return GEU;
+
+ abort ();
+}
+
+/* Return 1 if memory locations are adjacent */
+
+int
+adjacent_mem_locations (a, b)
+ rtx a, b;
+{
+ int val0 = 0, val1 = 0;
+ int reg0, reg1;
+
+ if ((GET_CODE (XEXP (a, 0)) == REG
+ || (GET_CODE (XEXP (a, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
+ && (GET_CODE (XEXP (b, 0)) == REG
+ || (GET_CODE (XEXP (b, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ {
+ if (GET_CODE (XEXP (a, 0)) == PLUS)
+ {
+ reg0 = REGNO (XEXP (XEXP (a, 0), 0));
+ val0 = INTVAL (XEXP (XEXP (a, 0), 1));
+ }
+ else
+ reg0 = REGNO (XEXP (a, 0));
+ if (GET_CODE (XEXP (b, 0)) == PLUS)
+ {
+ reg1 = REGNO (XEXP (XEXP (b, 0), 0));
+ val1 = INTVAL (XEXP (XEXP (b, 0), 1));
+ }
+ else
+ reg1 = REGNO (XEXP (b, 0));
+ return (reg0 == reg1) && ((val1 - val0) == 4 || (val0 - val1) == 4);
+ }
+ return 0;
+}
+
+/* Return 1 if OP is a load multiple operation. It is known to be
+ parallel and the first section will be tested. */
+
+int
+load_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int dest_regno;
+ rtx src_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return 0;
+
+ /* Check to see if this might be a write-back */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
+ || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
+ || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
+ || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
+ != REGNO (SET_DEST (elt)))
+ return 0;
+
+ count--;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, i - 1)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i - base
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Return 1 if OP is a store multiple operation. It is known to be
+ parallel and the first section will be tested. */
+
+int
+store_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+ int src_regno;
+ rtx dest_addr;
+ HOST_WIDE_INT i = 1, base = 0;
+ rtx elt;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ return 0;
+
+ /* Check to see if this might be a write-back */
+ if (GET_CODE (SET_SRC (elt = XVECEXP (op, 0, 0))) == PLUS)
+ {
+ i++;
+ base = 1;
+
+ /* Now check it more carefully */
+ if (GET_CODE (SET_DEST (elt)) != REG
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != REG
+ || REGNO (XEXP (SET_SRC (elt), 0)) != REGNO (SET_DEST (elt))
+ || GET_CODE (XEXP (SET_SRC (elt), 1)) != CONST_INT
+ || INTVAL (XEXP (SET_SRC (elt), 1)) != (count - 2) * 4
+ || GET_CODE (XVECEXP (op, 0, count - 1)) != CLOBBER
+ || GET_CODE (XEXP (XVECEXP (op, 0, count - 1), 0)) != REG
+ || REGNO (XEXP (XVECEXP (op, 0, count - 1), 0))
+ != REGNO (SET_DEST (elt)))
+ return 0;
+
+ count--;
+ }
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= i
+ || GET_CODE (XVECEXP (op, 0, i - 1)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, i - 1))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, i - 1))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, i - 1)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, i - 1)), 0);
+
+ for (; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i - base
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+load_multiple_sequence (operands, nops, regs, base, load_offset)
+ rtx *operands;
+ int nops;
+ int *regs;
+ int *base;
+ HOST_WIDE_INT *load_offset;
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ if (nops < 2 || nops > 4)
+ abort ();
+
+ /* Loop over the operands and check that the memory references are
+ suitable (ie immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg(operands[nops + i]);
+
+ if (GET_CODE (operands[nops + i]) != MEM)
+ abort ();
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO(reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, or if it overwrites the
+ base register but isn't the last insn in the list, then
+ we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
+ || (i != nops - 1 && unsorted_regs[i] == base_reg))
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* ldmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* ldmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* ldmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* ldmdb */
+
+ /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm if
+ the offset isn't small enough. The reason 2 ldrs are faster is because
+ these ARMs are able to do more than one cache access in a single cycle.
+ The ARM9 and StrongARM have Harvard caches, whilst the ARM8 has a double
+ bandwidth cache. This means that these cores can do both an instruction
+ fetch and a data fetch in a single cycle, so the trick of calculating the
+ address into a scratch register (one of the result regs) and then doing a
+ load multiple actually becomes slower (and no smaller in code size). That
+ is the transformation
+
+ ldr rd1, [rbase + offset]
+ ldr rd2, [rbase + offset + 4]
+
+ to
+
+ add rd1, rbase, offset
+ ldmia rd1, {rd1, rd2}
+
+ produces worse code -- '3 cycles + any stalls on rd2' instead of '2 cycles
+ + any stalls on rd2'. On ARMs with only one cache access per cycle, the
+ first sequence could never complete in less than 6 cycles, whereas the ldm
+ sequence would only take 5 and would make better use of sequential accesses
+ if not hitting the cache.
+
+ We cheat here and test 'arm_ld_sched' which we currently know to only be
+ true for the ARM8, ARM9 and StrongARM. If this ever changes, then the test
+ below needs to be reworked. */
+ if (nops == 2 && arm_ld_sched)
+ return 0;
+
+ /* Can't do it without setting up the offset, only do this if it takes
+ no more than one insn. */
+ return (const_ok_for_arm (unsorted_offsets[order[0]])
+ || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
+}
+
+char *
+emit_ldm_seq (operands, nops)
+ rtx *operands;
+ int nops;
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ strcpy (buf, "ldm%?ia\t");
+ break;
+
+ case 2:
+ strcpy (buf, "ldm%?ib\t");
+ break;
+
+ case 3:
+ strcpy (buf, "ldm%?da\t");
+ break;
+
+ case 4:
+ strcpy (buf, "ldm%?db\t");
+ break;
+
+ case 5:
+ if (offset >= 0)
+ sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) offset);
+ else
+ sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
+ reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
+ (long) -offset);
+ output_asm_insn (buf, operands);
+ base_reg = regs[0];
+ strcpy (buf, "ldm%?ia\t");
+ break;
+
+ default:
+ abort ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole ldm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+store_multiple_sequence (operands, nops, regs, base, load_offset)
+ rtx *operands;
+ int nops;
+ int *regs;
+ int *base;
+ HOST_WIDE_INT *load_offset;
+{
+ int unsorted_regs[4];
+ HOST_WIDE_INT unsorted_offsets[4];
+ int order[4];
+ int base_reg = -1;
+ int i;
+
+ /* Can only handle 2, 3, or 4 insns at present, though could be easily
+ extended if required. */
+ if (nops < 2 || nops > 4)
+ abort ();
+
+ /* Loop over the operands and check that the memory references are
+ suitable (ie immediate offsets from the same base register). At
+ the same time, extract the target register, and the memory
+ offsets. */
+ for (i = 0; i < nops; i++)
+ {
+ rtx reg;
+ rtx offset;
+
+ /* Convert a subreg of a mem into the mem itself. */
+ if (GET_CODE (operands[nops + i]) == SUBREG)
+ operands[nops + i] = alter_subreg(operands[nops + i]);
+
+ if (GET_CODE (operands[nops + i]) != MEM)
+ abort ();
+
+ /* Don't reorder volatile memory references; it doesn't seem worth
+ looking for the case where the order is ok anyway. */
+ if (MEM_VOLATILE_P (operands[nops + i]))
+ return 0;
+
+ offset = const0_rtx;
+
+ if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
+ && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
+ == REG)
+ || (GET_CODE (reg) == SUBREG
+ && GET_CODE (reg = SUBREG_REG (reg)) == REG))
+ && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
+ == CONST_INT)))
+ {
+ if (i == 0)
+ {
+ base_reg = REGNO(reg);
+ unsorted_regs[0] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ order[0] = 0;
+ }
+ else
+ {
+ if (base_reg != REGNO (reg))
+ /* Not addressed from the same base register. */
+ return 0;
+
+ unsorted_regs[i] = (GET_CODE (operands[i]) == REG
+ ? REGNO (operands[i])
+ : REGNO (SUBREG_REG (operands[i])));
+ if (unsorted_regs[i] < unsorted_regs[order[0]])
+ order[0] = i;
+ }
+
+ /* If it isn't an integer register, then we can't do this. */
+ if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
+ return 0;
+
+ unsorted_offsets[i] = INTVAL (offset);
+ }
+ else
+ /* Not a suitable memory address. */
+ return 0;
+ }
+
+ /* All the useful information has now been extracted from the
+ operands into unsorted_regs and unsorted_offsets; additionally,
+ order[0] has been set to the lowest numbered register in the
+ list. Sort the registers into order, and check that the memory
+ offsets are ascending and adjacent. */
+
+ for (i = 1; i < nops; i++)
+ {
+ int j;
+
+ order[i] = order[i - 1];
+ for (j = 0; j < nops; j++)
+ if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
+ && (order[i] == order[i - 1]
+ || unsorted_regs[j] < unsorted_regs[order[i]]))
+ order[i] = j;
+
+ /* Have we found a suitable register? if not, one must be used more
+ than once. */
+ if (order[i] == order[i - 1])
+ return 0;
+
+ /* Is the memory address adjacent and ascending? */
+ if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
+ return 0;
+ }
+
+ if (base)
+ {
+ *base = base_reg;
+
+ for (i = 0; i < nops; i++)
+ regs[i] = unsorted_regs[order[i]];
+
+ *load_offset = unsorted_offsets[order[0]];
+ }
+
+ if (unsorted_offsets[order[0]] == 0)
+ return 1; /* stmia */
+
+ if (unsorted_offsets[order[0]] == 4)
+ return 2; /* stmib */
+
+ if (unsorted_offsets[order[nops - 1]] == 0)
+ return 3; /* stmda */
+
+ if (unsorted_offsets[order[nops - 1]] == -4)
+ return 4; /* stmdb */
+
+ return 0;
+}
+
+char *
+emit_stm_seq (operands, nops)
+ rtx *operands;
+ int nops;
+{
+ int regs[4];
+ int base_reg;
+ HOST_WIDE_INT offset;
+ char buf[100];
+ int i;
+
+ switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
+ {
+ case 1:
+ strcpy (buf, "stm%?ia\t");
+ break;
+
+ case 2:
+ strcpy (buf, "stm%?ib\t");
+ break;
+
+ case 3:
+ strcpy (buf, "stm%?da\t");
+ break;
+
+ case 4:
+ strcpy (buf, "stm%?db\t");
+ break;
+
+ default:
+ abort ();
+ }
+
+ sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
+ reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
+
+ for (i = 1; i < nops; i++)
+ sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
+ reg_names[regs[i]]);
+
+ strcat (buf, "}\t%@ phole stm");
+
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+int
+multi_register_push (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != PARALLEL
+ || (GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ || (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC)
+ || (XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != 2))
+ return 0;
+
+ return 1;
+}
+
+
+/* Routines for use with attributes */
+
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing. */
+
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+ return 0;
+}
+
+/* Return nonzero if ATTR is a valid attribute for TYPE.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ short_call: assume the offset from the caller to the callee is small.
+
+ long_call: don't assume the offset is small. */
+
+int
+arm_valid_machine_type_attribute (type, attributes, attr, args)
+ tree type;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("long_call", attr))
+ return 1;
+
+ if (is_attribute_p ("short_call", attr))
+ return 1;
+
+ return 0;
+}
+
+/* Encode long_call or short_call attribute by prefixing
+ symbol name in DECL with a special character FLAG. */
+
+void
+arm_encode_call_attribute (decl, flag)
+ tree decl;
+ int flag;
+{
+ const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+ int len = strlen (str);
+ char * newstr;
+
+ /* Do not allow weak functions to be treated as short call. */
+ if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
+ return;
+
+ if (ENCODED_SHORT_CALL_ATTR_P (str)
+ || ENCODED_LONG_CALL_ATTR_P (str))
+ return;
+
+ newstr = malloc (len + 2);
+ newstr[0] = flag;
+ strcpy (newstr + 1, str);
+
+ XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
+}
+
+/* Return the length of a function name prefix
+ that starts with the character 'c'. */
+
+static int
+arm_get_strip_length (char c)
+{
+ switch (c)
+ {
+ ARM_NAME_ENCODING_LENGTHS
+ default: return 0;
+ }
+}
+
+/* Return a pointer to a function's name with any
+ and all prefix encodings stripped from it. */
+
+char *
+arm_strip_name_encoding (char * name)
+{
+ int skip;
+
+ while ((skip = arm_get_strip_length (* name)))
+ name += skip;
+
+ return name;
+}
+
+/* Return 1 if the operand is a SYMBOL_REF for a function known to be
+ defined within the current compilation unit. If this caanot be
+ determined, then 0 is returned. */
+
+static int
+current_file_function_operand (sym_ref)
+ rtx sym_ref;
+{
+ /* This is a bit of a fib. A function will have a short call flag
+ applied to its name if it has the short call attribute, or it has
+ already been defined within the current compilation unit. */
+ if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
+ return 1;
+
+ /* The current function is always defined within the current compilation
+ unit. if it s a weak definition however, then this may not be the real
+ definition of the function, and so we have to say no. */
+ if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
+ && !DECL_WEAK (current_function_decl))
+ return 1;
+
+ /* We cannot make the determination - default to returning 0. */
+ return 0;
+}
+
+/* Return non-zero if a 32 bit "long_call" should be generated for
+ this call. We generate a long_call if the function:
+
+ a. has an __attribute__((long call))
+ or b. the -mlong-calls command line switch has been specified
+
+ However we do not generate a long call if the function:
+
+ c. has an __attribute__ ((short_call))
+ or d. has an __attribute__ ((section))
+ or e. is defined within the current compilation unit.
+
+ This function will be called by C fragments contained in the machine
+ description file. CALL_REF and CALL_COOKIE correspond to the matched
+ rtl operands. CALL_SYMBOL is used to distinguish between
+ two different callers of the function. It is set to 1 in the
+ "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
+ and "call_value" patterns. This is because of the difference in the
+ SYM_REFs passed by these patterns. */
+
+int
+arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
+ rtx sym_ref;
+ int call_cookie;
+ int call_symbol;
+{
+ if (!call_symbol)
+ {
+ if (GET_CODE (sym_ref) != MEM)
+ return 0;
+
+ sym_ref = XEXP (sym_ref, 0);
+ }
+
+ if (GET_CODE (sym_ref) != SYMBOL_REF)
+ return 0;
+
+ if (call_cookie & CALL_SHORT)
+ return 0;
+
+ if (TARGET_LONG_CALLS && flag_function_sections)
+ return 1;
+
+ if (current_file_function_operand (sym_ref))
+ return 0;
+
+ return (call_cookie & CALL_LONG)
+ || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
+ || TARGET_LONG_CALLS;
+}
+
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+
+/* Routines for use in generating RTL */
+
+rtx
+arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
+ in_struct_p, scalar_p)
+ int base_regno;
+ int count;
+ rtx from;
+ int up;
+ int write_back;
+ int unchanging_p;
+ int in_struct_p;
+ int scalar_p;
+{
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem;
+
+ result = gen_rtx (PARALLEL, VOIDmode,
+ rtvec_alloc (count + (write_back ? 2 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx (SET, GET_MODE (from), from,
+ plus_constant (from, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ mem = gen_rtx (MEM, SImode, plus_constant (from, j * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+ XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, SImode, base_regno + j),
+ mem);
+ }
+
+ if (write_back)
+ XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, from);
+
+ return result;
+}
+
+rtx
+arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
+ in_struct_p, scalar_p)
+ int base_regno;
+ int count;
+ rtx to;
+ int up;
+ int write_back;
+ int unchanging_p;
+ int in_struct_p;
+ int scalar_p;
+{
+ int i = 0, j;
+ rtx result;
+ int sign = up ? 1 : -1;
+ rtx mem;
+
+ result = gen_rtx (PARALLEL, VOIDmode,
+ rtvec_alloc (count + (write_back ? 2 : 0)));
+ if (write_back)
+ {
+ XVECEXP (result, 0, 0)
+ = gen_rtx (SET, GET_MODE (to), to,
+ plus_constant (to, count * 4 * sign));
+ i = 1;
+ count++;
+ }
+
+ for (j = 0; i < count; i++, j++)
+ {
+ mem = gen_rtx (MEM, SImode, plus_constant (to, j * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+
+ XVECEXP (result, 0, i) = gen_rtx (SET, VOIDmode, mem,
+ gen_rtx (REG, SImode, base_regno + j));
+ }
+
+ if (write_back)
+ XVECEXP (result, 0, i) = gen_rtx (CLOBBER, SImode, to);
+
+ return result;
+}
+
+int
+arm_gen_movstrqi (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
+ int i;
+ rtx src, dst;
+ rtx st_src, st_dst, fin_src, fin_dst;
+ rtx part_bytes_reg = NULL;
+ rtx mem;
+ int dst_unchanging_p, dst_in_struct_p, src_unchanging_p, src_in_struct_p;
+ int dst_scalar_p, src_scalar_p;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT
+ || INTVAL (operands[2]) > 64
+ || INTVAL (operands[3]) & 3)
+ return 0;
+
+ st_dst = XEXP (operands[0], 0);
+ st_src = XEXP (operands[1], 0);
+
+ dst_unchanging_p = RTX_UNCHANGING_P (operands[0]);
+ dst_in_struct_p = MEM_IN_STRUCT_P (operands[0]);
+ dst_scalar_p = MEM_SCALAR_P (operands[0]);
+ src_unchanging_p = RTX_UNCHANGING_P (operands[1]);
+ src_in_struct_p = MEM_IN_STRUCT_P (operands[1]);
+ src_scalar_p = MEM_SCALAR_P (operands[1]);
+
+ fin_dst = dst = copy_to_mode_reg (SImode, st_dst);
+ fin_src = src = copy_to_mode_reg (SImode, st_src);
+
+ in_words_to_go = (INTVAL (operands[2]) + 3) / 4;
+ out_words_to_go = INTVAL (operands[2]) / 4;
+ last_bytes = INTVAL (operands[2]) & 3;
+
+ if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
+ part_bytes_reg = gen_rtx (REG, SImode, (in_words_to_go - 1) & 3);
+
+ for (i = 0; in_words_to_go >= 2; i+=4)
+ {
+ if (in_words_to_go > 4)
+ emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
+ src_unchanging_p,
+ src_in_struct_p,
+ src_scalar_p));
+ else
+ emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
+ FALSE, src_unchanging_p,
+ src_in_struct_p, src_scalar_p));
+
+ if (out_words_to_go)
+ {
+ if (out_words_to_go > 4)
+ emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
+ dst_unchanging_p,
+ dst_in_struct_p,
+ dst_scalar_p));
+ else if (out_words_to_go != 1)
+ emit_insn (arm_gen_store_multiple (0, out_words_to_go,
+ dst, TRUE,
+ (last_bytes == 0
+ ? FALSE : TRUE),
+ dst_unchanging_p,
+ dst_in_struct_p,
+ dst_scalar_p));
+ else
+ {
+ mem = gen_rtx (MEM, SImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (REG, SImode, 0));
+ if (last_bytes != 0)
+ emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
+ }
+ }
+
+ in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
+ out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
+ }
+
+ /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
+ if (out_words_to_go)
+ {
+ rtx sreg;
+
+ mem = gen_rtx (MEM, SImode, src);
+ RTX_UNCHANGING_P (mem) = src_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = src_in_struct_p;
+ MEM_SCALAR_P (mem) = src_scalar_p;
+ emit_move_insn (sreg = gen_reg_rtx (SImode), mem);
+ emit_move_insn (fin_src = gen_reg_rtx (SImode), plus_constant (src, 4));
+
+ mem = gen_rtx (MEM, SImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, sreg);
+ emit_move_insn (fin_dst = gen_reg_rtx (SImode), plus_constant (dst, 4));
+ in_words_to_go--;
+
+ if (in_words_to_go) /* Sanity check */
+ abort ();
+ }
+
+ if (in_words_to_go)
+ {
+ if (in_words_to_go < 0)
+ abort ();
+
+ mem = gen_rtx (MEM, SImode, src);
+ RTX_UNCHANGING_P (mem) = src_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = src_in_struct_p;
+ MEM_SCALAR_P (mem) = src_scalar_p;
+ part_bytes_reg = copy_to_mode_reg (SImode, mem);
+ }
+
+ if (BYTES_BIG_ENDIAN && last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ if (part_bytes_reg == NULL)
+ abort ();
+
+ /* The bytes we want are in the top end of the word */
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
+ GEN_INT (8 * (4 - last_bytes))));
+ part_bytes_reg = tmp;
+
+ while (last_bytes)
+ {
+ mem = gen_rtx (MEM, QImode, plus_constant (dst, last_bytes - 1));
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (SUBREG, QImode, part_bytes_reg, 0));
+ if (--last_bytes)
+ {
+ tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+
+ }
+ else
+ {
+ while (last_bytes)
+ {
+ if (part_bytes_reg == NULL)
+ abort ();
+
+ mem = gen_rtx (MEM, QImode, dst);
+ RTX_UNCHANGING_P (mem) = dst_unchanging_p;
+ MEM_IN_STRUCT_P (mem) = dst_in_struct_p;
+ MEM_SCALAR_P (mem) = dst_scalar_p;
+ emit_move_insn (mem, gen_rtx (SUBREG, QImode, part_bytes_reg, 0));
+ if (--last_bytes)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_addsi3 (dst, dst, const1_rtx));
+ emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
+ part_bytes_reg = tmp;
+ }
+ }
+ }
+
+ return 1;
+}
+
+/* Generate a memory reference for a half word, such that it will be loaded
+ into the top 16 bits of the word. We can assume that the address is
+ known to be alignable and of the form reg, or plus (reg, const). */
+rtx
+gen_rotated_half_load (memref)
+ rtx memref;
+{
+ HOST_WIDE_INT offset = 0;
+ rtx base = XEXP (memref, 0);
+
+ if (GET_CODE (base) == PLUS)
+ {
+ offset = INTVAL (XEXP (base, 1));
+ base = XEXP (base, 0);
+ }
+
+ /* If we aren't allowed to generate unaligned addresses, then fail. */
+ if (TARGET_SHORT_BY_BYTES
+ && ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 0)))
+ return NULL;
+
+ base = gen_rtx (MEM, SImode, plus_constant (base, offset & ~2));
+
+ if ((BYTES_BIG_ENDIAN ? 1 : 0) ^ ((offset & 2) == 2))
+ return base;
+
+ return gen_rtx (ROTATE, SImode, base, GEN_INT (16));
+}
+
+static enum machine_mode
+select_dominance_cc_mode (op, x, y, cond_or)
+ enum rtx_code op;
+ rtx x;
+ rtx y;
+ HOST_WIDE_INT cond_or;
+{
+ enum rtx_code cond1, cond2;
+ int swapped = 0;
+
+ /* Currently we will probably get the wrong result if the individual
+ comparisons are not simple. This also ensures that it is safe to
+ reverse a comparison if necessary. */
+ if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
+ != CCmode)
+ || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
+ != CCmode))
+ return CCmode;
+
+ if (cond_or)
+ cond1 = reverse_condition (cond1);
+
+ /* If the comparisons are not equal, and one doesn't dominate the other,
+ then we can't do this. */
+ if (cond1 != cond2
+ && ! comparison_dominates_p (cond1, cond2)
+ && (swapped = 1, ! comparison_dominates_p (cond2, cond1)))
+ return CCmode;
+
+ if (swapped)
+ {
+ enum rtx_code temp = cond1;
+ cond1 = cond2;
+ cond2 = temp;
+ }
+
+ switch (cond1)
+ {
+ case EQ:
+ if (cond2 == EQ || ! cond_or)
+ return CC_DEQmode;
+
+ switch (cond2)
+ {
+ case LE: return CC_DLEmode;
+ case LEU: return CC_DLEUmode;
+ case GE: return CC_DGEmode;
+ case GEU: return CC_DGEUmode;
+ default: break;
+ }
+
+ break;
+
+ case LT:
+ if (cond2 == LT || ! cond_or)
+ return CC_DLTmode;
+ if (cond2 == LE)
+ return CC_DLEmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case GT:
+ if (cond2 == GT || ! cond_or)
+ return CC_DGTmode;
+ if (cond2 == GE)
+ return CC_DGEmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case LTU:
+ if (cond2 == LTU || ! cond_or)
+ return CC_DLTUmode;
+ if (cond2 == LEU)
+ return CC_DLEUmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ case GTU:
+ if (cond2 == GTU || ! cond_or)
+ return CC_DGTUmode;
+ if (cond2 == GEU)
+ return CC_DGEUmode;
+ if (cond2 == NE)
+ return CC_DNEmode;
+ break;
+
+ /* The remaining cases only occur when both comparisons are the
+ same. */
+ case NE:
+ return CC_DNEmode;
+
+ case LE:
+ return CC_DLEmode;
+
+ case GE:
+ return CC_DGEmode;
+
+ case LEU:
+ return CC_DLEUmode;
+
+ case GEU:
+ return CC_DGEUmode;
+
+ default:
+ break;
+ }
+
+ abort ();
+}
+
+enum machine_mode
+arm_select_cc_mode (op, x, y)
+ enum rtx_code op;
+ rtx x;
+ rtx y;
+{
+ /* All floating point compares return CCFP if it is an equality
+ comparison, and CCFPE otherwise. */
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return (op == EQ || op == NE) ? CCFPmode : CCFPEmode;
+
+ /* A compare with a shifted operand. Because of canonicalization, the
+ comparison will have to be swapped when we emit the assembler. */
+ if (GET_MODE (y) == SImode && GET_CODE (y) == REG
+ && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
+ || GET_CODE (x) == ROTATERT))
+ return CC_SWPmode;
+
+ /* This is a special case that is used by combine to allow a
+ comparison of a shifted byte load to be split into a zero-extend
+ followed by a comparison of the shifted integer (only valid for
+ equalities and unsigned inequalities). */
+ if (GET_MODE (x) == SImode
+ && GET_CODE (x) == ASHIFT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
+ && GET_CODE (XEXP (x, 0)) == SUBREG
+ && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
+ && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
+ && (op == EQ || op == NE
+ || op == GEU || op == GTU || op == LTU || op == LEU)
+ && GET_CODE (y) == CONST_INT)
+ return CC_Zmode;
+
+ /* An operation that sets the condition codes as a side-effect, the
+ V flag is not set correctly, so we can only use comparisons where
+ this doesn't matter. (For LT and GE we can use "mi" and "pl"
+ instead. */
+ if (GET_MODE (x) == SImode
+ && y == const0_rtx
+ && (op == EQ || op == NE || op == LT || op == GE)
+ && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
+ || GET_CODE (x) == AND || GET_CODE (x) == IOR
+ || GET_CODE (x) == XOR || GET_CODE (x) == MULT
+ || GET_CODE (x) == NOT || GET_CODE (x) == NEG
+ || GET_CODE (x) == LSHIFTRT
+ || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
+ || GET_CODE (x) == ROTATERT || GET_CODE (x) == ZERO_EXTRACT))
+ return CC_NOOVmode;
+
+ /* A construct for a conditional compare, if the false arm contains
+ 0, then both conditions must be true, otherwise either condition
+ must be true. Not all conditions are possible, so CCmode is
+ returned if it can't be done. */
+ if (GET_CODE (x) == IF_THEN_ELSE
+ && (XEXP (x, 2) == const0_rtx
+ || XEXP (x, 2) == const1_rtx)
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == '<'
+ && GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == '<')
+ return select_dominance_cc_mode (op, XEXP (x, 0), XEXP (x, 1),
+ INTVAL (XEXP (x, 2)));
+
+ if (GET_MODE (x) == QImode && (op == EQ || op == NE))
+ return CC_Zmode;
+
+ if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
+ && GET_CODE (x) == PLUS
+ && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
+ return CC_Cmode;
+
+ return CCmode;
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for register 0 in the proper mode. FP means this is a
+ floating point compare: I don't think that it is needed on the arm. */
+
+rtx
+gen_compare_reg (code, x, y, fp)
+ enum rtx_code code;
+ rtx x, y;
+ int fp;
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg = gen_rtx (REG, mode, 24);
+
+ emit_insn (gen_rtx (SET, VOIDmode, cc_reg,
+ gen_rtx (COMPARE, mode, x, y)));
+
+ return cc_reg;
+}
+
+void
+arm_reload_in_hi (operands)
+ rtx *operands;
+{
+ rtx base = find_replacement (&XEXP (operands[1], 0));
+
+ emit_insn (gen_zero_extendqisi2 (operands[2], gen_rtx (MEM, QImode, base)));
+ /* Handle the case where the address is too complex to be offset by 1. */
+ if (GET_CODE (base) == MINUS
+ || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
+ {
+ rtx base_plus = gen_rtx (REG, SImode, REGNO (operands[0]));
+
+ emit_insn (gen_rtx (SET, VOIDmode, base_plus, base));
+ base = base_plus;
+ }
+
+ emit_insn (gen_zero_extendqisi2 (gen_rtx (SUBREG, SImode, operands[0], 0),
+ gen_rtx (MEM, QImode,
+ plus_constant (base, 1))));
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ gen_rtx (IOR, SImode,
+ gen_rtx (ASHIFT, SImode,
+ gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ GEN_INT (8)),
+ operands[2])));
+ else
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (SUBREG, SImode,
+ operands[0], 0),
+ gen_rtx (IOR, SImode,
+ gen_rtx (ASHIFT, SImode,
+ operands[2],
+ GEN_INT (8)),
+ gen_rtx (SUBREG, SImode, operands[0], 0))));
+}
+
+void
+arm_reload_out_hi (operands)
+ rtx *operands;
+{
+ rtx base = find_replacement (&XEXP (operands[0], 0));
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)),
+ gen_rtx (SUBREG, QImode, operands[1], 0)));
+ emit_insn (gen_lshrsi3 (operands[2],
+ gen_rtx (SUBREG, SImode, operands[1], 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, base),
+ gen_rtx (SUBREG, QImode, operands[2], 0)));
+ }
+ else
+ {
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, base),
+ gen_rtx (SUBREG, QImode, operands[1], 0)));
+ emit_insn (gen_lshrsi3 (operands[2],
+ gen_rtx (SUBREG, SImode, operands[1], 0),
+ GEN_INT (8)));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (base, 1)),
+ gen_rtx (SUBREG, QImode, operands[2], 0)));
+ }
+}
+
+/* CYGNUS LOCAL */
+/* Check to see if a branch is forwards or backwards. Return TRUE if it
+ is backwards. */
+
+int
+arm_backwards_branch (from, to)
+ int from, to;
+{
+ return insn_addresses[to] <= insn_addresses[from];
+}
+
+/* Check to see if a branch is within the distance that can be done using
+ an arithmetic expression. */
+int
+short_branch (from, to)
+ int from, to;
+{
+ int delta = insn_addresses[from] + 8 - insn_addresses[to];
+
+ return abs (delta) < 980; /* A small margin for safety */
+}
+
+/* Check to see that the insn isn't the target of the conditionalizing
+ code */
+int
+arm_insn_not_targeted (insn)
+ rtx insn;
+{
+ return insn != arm_target_insn;
+}
+/* END CYGNUS LOCAL */
+
+/* Routines for manipulation of the constant pool. */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Arm instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find th
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its offset within the current
+ pool.
+
+ X is the rtx we want to replace. MODE is its mode. On return,
+ ADDRESS_ONLY will be non-zero if we really want the address of such
+ a constant, not the constant itself. */
+static HOST_WIDE_INT
+add_constant (x, mode, address_only)
+ rtx x;
+ enum machine_mode mode;
+ int * address_only;
+{
+ int i;
+ HOST_WIDE_INT offset;
+
+ * address_only = 0;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+ else if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P(x))
+ {
+ *address_only = 1;
+ mode = get_pool_mode (x);
+ x = get_pool_constant (x);
+ }
+#ifndef AOF_ASSEMBLER
+ else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == 3)
+ x = XVECEXP (x, 0, 0);
+#endif
+
+#ifdef AOF_ASSEMBLER
+ /* PIC Symbol references need to be converted into offsets into the
+ based area. */
+ if (flag_pic && GET_CODE (x) == SYMBOL_REF)
+ x = aof_pic_entry (x);
+#endif /* AOF_ASSEMBLER */
+
+ /* First see if we've already got it */
+ for (i = 0; i < pool_size; i++)
+ {
+ if (GET_CODE (x) == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (GET_CODE (x) == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static int
+fixit (src, mode, destreg)
+ rtx src;
+ enum machine_mode mode;
+ int destreg;
+{
+ if (CONSTANT_P (src))
+ {
+ if (GET_CODE (src) == CONST_INT)
+ return (! const_ok_for_arm (INTVAL (src))
+ && ! const_ok_for_arm (~INTVAL (src)));
+ if (GET_CODE (src) == CONST_DOUBLE)
+ return (GET_MODE (src) == VOIDmode
+ || destreg < 16
+ || (! const_double_rtx_ok_for_fpu (src)
+ && ! neg_const_double_rtx_ok_for_fpu (src)));
+ return symbol_mentioned_p (src);
+ }
+#ifndef AOF_ASSEMBLER
+ else if (GET_CODE (src) == UNSPEC && XINT (src, 1) == 3)
+ return 1;
+#endif
+ else
+ return (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0)));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+static rtx
+find_barrier (from, max_count)
+ rtx from;
+ int max_count;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx last = from;
+
+ while (from && count < max_count)
+ {
+ rtx tmp;
+
+ if (GET_CODE (from) == BARRIER)
+ found_barrier = from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ count += 8;
+ /* Handle table jumps as a single entity. */
+ else if (GET_CODE (from) == JUMP_INSN
+ && JUMP_LABEL (from) != 0
+ && ((tmp = next_real_insn (JUMP_LABEL (from)))
+ == next_real_insn (from))
+ && tmp != NULL
+ && GET_CODE (tmp) == JUMP_INSN
+ && (GET_CODE (PATTERN (tmp)) == ADDR_VEC
+ || GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC))
+ {
+ int elt = GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC ? 1 : 0;
+ count += (get_attr_length (from)
+ + GET_MODE_SIZE (SImode) * XVECLEN (PATTERN (tmp), elt));
+ /* Continue after the dispatch table. */
+ last = from;
+ from = NEXT_INSN (tmp);
+ continue;
+ }
+ else
+ count += get_attr_length (from);
+
+ last = from;
+ from = NEXT_INSN (from);
+ }
+
+ if (! found_barrier)
+ {
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one. */
+ rtx label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (last);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump. */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ }
+
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ int destreg;
+ enum machine_mode mode = GET_MODE (dst);
+
+ if (dst == pc_rtx)
+ return 0;
+
+ if (GET_CODE (dst) == REG)
+ destreg = REGNO (dst);
+ else if (GET_CODE (dst) == SUBREG && GET_CODE (SUBREG_REG (dst)) == REG)
+ destreg = REGNO (SUBREG_REG (dst));
+ else
+ return 0;
+
+ return fixit (src, mode, destreg);
+ }
+ return 0;
+}
+
+void
+arm_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ int count_size;
+
+#if 0
+ /* The ldr instruction can work with up to a 4k offset, and most constants
+ will be loaded with one of these instructions; however, the adr
+ instruction and the ldf instructions only work with a 1k offset. This
+ code needs to be rewritten to use the 4k offset when possible, and to
+ adjust when a 1k offset is needed. For now we just use a 1k offset
+ from the start. */
+ count_size = 4000;
+
+ /* Floating point operands can't work further than 1024 bytes from the
+ PC, so to make things simple we restrict all loads for such functions.
+ */
+ if (TARGET_HARD_FLOAT)
+ {
+ int regno;
+
+ for (regno = 16; regno < 24; regno++)
+ if (regs_ever_live[regno])
+ {
+ count_size = 1000;
+ break;
+ }
+ }
+#else
+ count_size = 1000;
+#endif /* 0 */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn, count_size);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn = scan;
+ rtx newsrc;
+ rtx addr;
+ int scratch;
+ int address_only;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode, &address_only);
+ addr = plus_constant (gen_rtx (LABEL_REF, VOIDmode,
+ pool_vector_label),
+ offset);
+
+ /* If we only want the address of the pool entry, or
+ for wide moves to integer regs we need to split
+ the address calculation off into a separate insn.
+ If necessary, the load can then be done with a
+ load-multiple. This is safe, since we have
+ already noted the length of such insns to be 8,
+ and we are immediately over-writing the scratch
+ we have grabbed with the final result. */
+ if ((address_only || GET_MODE_SIZE (mode) > 4)
+ && (scratch = REGNO (dst)) < 16)
+ {
+ rtx reg;
+
+ if (mode == SImode)
+ reg = dst;
+ else
+ reg = gen_rtx (REG, SImode, scratch);
+
+ newinsn = emit_insn_after (gen_movaddr (reg, addr),
+ newinsn);
+ addr = reg;
+ }
+
+ if (! address_only)
+ {
+ newsrc = gen_rtx (MEM, mode, addr);
+
+ /* XXX Fixme -- I think the following is bogus. */
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after
+ (gen_rtx (SET, VOIDmode, dst, newsrc), newinsn);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+ }
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ insn = scan;
+ }
+ }
+
+ after_arm_reorg = 1;
+}
+
+
+/* Routines to output assembly language. */
+
+/* If the rtx is the correct value then return the string of the number.
+ In this way we can ensure that valid double constants are generated even
+ when cross compiling. */
+char *
+fp_immediate_constant (x)
+ rtx x;
+{
+ REAL_VALUE_TYPE r;
+ int i;
+
+ if (!fpa_consts_inited)
+ init_fpa_table ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (r, values_fpa[i]))
+ return strings_fpa[i];
+
+ abort ();
+}
+
+/* As for fp_immediate_constant, but value is passed directly, not in rtx. */
+static char *
+fp_const_from_val (r)
+ REAL_VALUE_TYPE *r;
+{
+ int i;
+
+ if (! fpa_consts_inited)
+ init_fpa_table ();
+
+ for (i = 0; i < 8; i++)
+ if (REAL_VALUES_EQUAL (*r, values_fpa[i]))
+ return strings_fpa[i];
+
+ abort ();
+}
+
+/* Output the operands of a LDM/STM instruction to STREAM.
+ MASK is the ARM register set mask of which only bits 0-15 are important.
+ INSTR is the possibly suffixed base register. HAT unequals zero if a hat
+ must follow the register list. */
+
+void
+print_multi_reg (stream, instr, mask, hat)
+ FILE *stream;
+ char *instr;
+ int mask, hat;
+{
+ int i;
+ int not_first = FALSE;
+
+ fputc ('\t', stream);
+ fprintf (stream, instr, REGISTER_PREFIX);
+ fputs (", {", stream);
+ for (i = 0; i < 16; i++)
+ if (mask & (1 << i))
+ {
+ if (not_first)
+ fprintf (stream, ", ");
+ fprintf (stream, "%s%s", REGISTER_PREFIX, reg_names[i]);
+ not_first = TRUE;
+ }
+
+ fprintf (stream, "}%s\n", hat ? "^" : "");
+}
+
+/* Output a 'call' insn. */
+
+char *
+output_call (operands)
+ rtx *operands;
+{
+ /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
+
+ if (REGNO (operands[0]) == 14)
+ {
+ operands[0] = gen_rtx (REG, SImode, 12);
+ output_asm_insn ("mov%?\t%0, %|lr", operands);
+ }
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+
+ if (TARGET_THUMB_INTERWORK)
+ output_asm_insn ("bx%?\t%0", operands);
+ else
+ output_asm_insn ("mov%?\t%|pc, %0", operands);
+
+ return "";
+}
+
+static int
+eliminate_lr2ip (x)
+ rtx *x;
+{
+ int something_changed = 0;
+ rtx x0 = *x;
+ int code = GET_CODE (x0);
+ register int i, j;
+ register char *fmt;
+
+ switch (code)
+ {
+ case REG:
+ if (REGNO (x0) == 14)
+ {
+ *x = gen_rtx (REG, SImode, 12);
+ return 1;
+ }
+ return 0;
+ default:
+ /* Scan through the sub-elements and change any references there */
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ if (fmt[i] == 'e')
+ something_changed |= eliminate_lr2ip (&XEXP (x0, i));
+ else if (fmt[i] == 'E')
+ for (j = 0; j < XVECLEN (x0, i); j++)
+ something_changed |= eliminate_lr2ip (&XVECEXP (x0, i, j));
+ return something_changed;
+ }
+}
+
+/* Output a 'call' insn that is a reference in memory. */
+
+char *
+output_call_mem (operands)
+ rtx *operands;
+{
+ operands[0] = copy_rtx (operands[0]); /* Be ultra careful */
+ /* Handle calls using lr by using ip (which may be clobbered in subr anyway).
+ */
+ if (eliminate_lr2ip (&operands[0]))
+ output_asm_insn ("mov%?\t%|ip, %|lr", operands);
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ output_asm_insn ("ldr%?\t%|ip, %0", operands);
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("bx%?\t%|ip", operands);
+ }
+ else
+ {
+ output_asm_insn ("mov%?\t%|lr, %|pc", operands);
+ output_asm_insn ("ldr%?\t%|pc, %0", operands);
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpu registers.
+ OPERANDS[0] is an fpu register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+
+char *
+output_mov_long_double_fpu_from_arm (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[3];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops);
+ output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
+ return "";
+}
+
+/* Output a move from an fpu register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpu register. */
+
+char *
+output_mov_long_double_arm_from_fpu (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[3];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ ops[2] = gen_rtx (REG, SImode, 2 + arm_reg0);
+
+ output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
+ output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops);
+ return "";
+}
+
+/* Output a move from arm registers to arm registers of a long double
+ OPERANDS[0] is the destination.
+ OPERANDS[1] is the source. */
+char *
+output_mov_long_double_arm_from_arm (operands)
+ rtx *operands;
+{
+ /* We have to be careful here because the two might overlap */
+ int dest_start = REGNO (operands[0]);
+ int src_start = REGNO (operands[1]);
+ rtx ops[2];
+ int i;
+
+ if (dest_start < src_start)
+ {
+ for (i = 0; i < 3; i++)
+ {
+ ops[0] = gen_rtx (REG, SImode, dest_start + i);
+ ops[1] = gen_rtx (REG, SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+ else
+ {
+ for (i = 2; i >= 0; i--)
+ {
+ ops[0] = gen_rtx (REG, SImode, dest_start + i);
+ ops[1] = gen_rtx (REG, SImode, src_start + i);
+ output_asm_insn ("mov%?\t%0, %1", ops);
+ }
+ }
+
+ return "";
+}
+
+
+/* Output a move from arm registers to an fpu registers.
+ OPERANDS[0] is an fpu register.
+ OPERANDS[1] is the first registers of an arm register pair. */
+
+char *
+output_mov_double_fpu_from_arm (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[1]);
+ rtx ops[2];
+
+ if (arm_reg0 == 12)
+ abort();
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops);
+ output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
+ return "";
+}
+
+/* Output a move from an fpu register to arm registers.
+ OPERANDS[0] is the first registers of an arm register pair.
+ OPERANDS[1] is an fpu register. */
+
+char *
+output_mov_double_arm_from_fpu (operands)
+ rtx *operands;
+{
+ int arm_reg0 = REGNO (operands[0]);
+ rtx ops[2];
+
+ if (arm_reg0 == 12)
+ abort();
+
+ ops[0] = gen_rtx (REG, SImode, arm_reg0);
+ ops[1] = gen_rtx (REG, SImode, 1 + arm_reg0);
+ output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
+ output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops);
+ return "";
+}
+
+/* Output a move between double words.
+ It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
+ or MEM<-REG and all MEMs must be offsettable addresses. */
+
+char *
+output_move_double (operands)
+ rtx *operands;
+{
+ enum rtx_code code0 = GET_CODE (operands[0]);
+ enum rtx_code code1 = GET_CODE (operands[1]);
+ rtx otherops[3];
+
+ if (code0 == REG)
+ {
+ int reg0 = REGNO (operands[0]);
+
+ otherops[0] = gen_rtx (REG, SImode, 1 + reg0);
+ if (code1 == REG)
+ {
+ int reg1 = REGNO (operands[1]);
+ if (reg1 == 12)
+ abort();
+
+ /* Ensure the second source is not overwritten */
+ if (reg1 == reg0 + (WORDS_BIG_ENDIAN ? -1 : 1))
+ output_asm_insn("mov%?\t%Q0, %Q1\n\tmov%?\t%R0, %R1", operands);
+ else
+ output_asm_insn("mov%?\t%R0, %R1\n\tmov%?\t%Q0, %Q1", operands);
+ }
+ else if (code1 == CONST_DOUBLE)
+ {
+ if (GET_MODE (operands[1]) == DFmode)
+ {
+ long l[2];
+ union real_extract u;
+
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[1]), (char *) &u,
+ sizeof (u));
+ REAL_VALUE_TO_TARGET_DOUBLE (u.d, l);
+ otherops[1] = GEN_INT(l[1]);
+ operands[1] = GEN_INT(l[0]);
+ }
+ else if (GET_MODE (operands[1]) != VOIDmode)
+ abort ();
+ else if (WORDS_BIG_ENDIAN)
+ {
+
+ otherops[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ operands[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ }
+ else
+ {
+
+ otherops[1] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ }
+ output_mov_immediate (operands);
+ output_mov_immediate (otherops);
+ }
+ else if (code1 == CONST_INT)
+ {
+#if HOST_BITS_PER_WIDE_INT > 32
+ /* If HOST_WIDE_INT is more than 32 bits, the intval tells us
+ what the upper word is. */
+ if (WORDS_BIG_ENDIAN)
+ {
+ otherops[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
+ operands[1] = GEN_INT (INTVAL (operands[1]) >> 32);
+ }
+ else
+ {
+ otherops[1] = GEN_INT (INTVAL (operands[1]) >> 32);
+ operands[1] = GEN_INT (ARM_SIGN_EXTEND (INTVAL (operands[1])));
+ }
+#else
+ /* Sign extend the intval into the high-order word */
+ if (WORDS_BIG_ENDIAN)
+ {
+ otherops[1] = operands[1];
+ operands[1] = (INTVAL (operands[1]) < 0
+ ? constm1_rtx : const0_rtx);
+ }
+ else
+ otherops[1] = INTVAL (operands[1]) < 0 ? constm1_rtx : const0_rtx;
+#endif
+ output_mov_immediate (otherops);
+ output_mov_immediate (operands);
+ }
+ else if (code1 == MEM)
+ {
+ switch (GET_CODE (XEXP (operands[1], 0)))
+ {
+ case REG:
+ output_asm_insn ("ldm%?ia\t%m1, %M0", operands);
+ break;
+
+ case PRE_INC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("ldm%?db\t%m1!, %M0", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("ldm%?ia\t%m1!, %M0", operands);
+ break;
+
+ case POST_DEC:
+ abort (); /* Should never happen now */
+ break;
+
+ case LABEL_REF:
+ case CONST:
+ output_asm_insn ("adr%?\t%0, %1", operands);
+ output_asm_insn ("ldm%?ia\t%0, %M0", operands);
+ break;
+
+ default:
+ if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1)))
+ {
+ otherops[0] = operands[0];
+ otherops[1] = XEXP (XEXP (operands[1], 0), 0);
+ otherops[2] = XEXP (XEXP (operands[1], 0), 1);
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ switch (INTVAL (otherops[2]))
+ {
+ case -8:
+ output_asm_insn ("ldm%?db\t%1, %M0", otherops);
+ return "";
+ case -4:
+ output_asm_insn ("ldm%?da\t%1, %M0", otherops);
+ return "";
+ case 4:
+ output_asm_insn ("ldm%?ib\t%1, %M0", otherops);
+ return "";
+ }
+ if (!(const_ok_for_arm (INTVAL (otherops[2]))))
+ output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+ else
+ output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+ return "ldm%?ia\t%0, %M0";
+ }
+ else
+ {
+ otherops[1] = adj_offsettable_operand (operands[1], 4);
+ /* Take care of overlapping base/data reg. */
+ if (reg_mentioned_p (operands[0], operands[1]))
+ {
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ }
+ }
+ }
+ }
+ else
+ abort(); /* Constraints should prevent this */
+ }
+ else if (code0 == MEM && code1 == REG)
+ {
+ if (REGNO (operands[1]) == 12)
+ abort();
+
+ switch (GET_CODE (XEXP (operands[0], 0)))
+ {
+ case REG:
+ output_asm_insn ("stm%?ia\t%m0, %M1", operands);
+ break;
+
+ case PRE_INC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PRE_DEC:
+ output_asm_insn ("stm%?db\t%m0!, %M1", operands);
+ break;
+
+ case POST_INC:
+ output_asm_insn ("stm%?ia\t%m0!, %M1", operands);
+ break;
+
+ case POST_DEC:
+ abort (); /* Should never happen now */
+ break;
+
+ case PLUS:
+ if (GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT)
+ {
+ switch (INTVAL (XEXP (XEXP (operands[0], 0), 1)))
+ {
+ case -8:
+ output_asm_insn ("stm%?db\t%m0, %M1", operands);
+ return "";
+
+ case -4:
+ output_asm_insn ("stm%?da\t%m0, %M1", operands);
+ return "";
+
+ case 4:
+ output_asm_insn ("stm%?ib\t%m0, %M1", operands);
+ return "";
+ }
+ }
+ /* Fall through */
+
+ default:
+ otherops[0] = adj_offsettable_operand (operands[0], 4);
+ otherops[1] = gen_rtx (REG, SImode, 1 + REGNO (operands[1]));
+ output_asm_insn ("str%?\t%1, %0", operands);
+ output_asm_insn ("str%?\t%1, %0", otherops);
+ }
+ }
+ else
+ abort(); /* Constraints should prevent this */
+
+ return "";
+}
+
+
+/* Output an arbitrary MOV reg, #n.
+ OPERANDS[0] is a register. OPERANDS[1] is a const_int. */
+
+char *
+output_mov_immediate (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT n = INTVAL (operands[1]);
+ int n_ones = 0;
+ int i;
+
+ /* Try to use one MOV */
+ if (const_ok_for_arm (n))
+ {
+ output_asm_insn ("mov%?\t%0, %1", operands);
+ return "";
+ }
+
+ /* Try to use one MVN */
+ if (const_ok_for_arm (~n))
+ {
+ operands[1] = GEN_INT (~n);
+ output_asm_insn ("mvn%?\t%0, %1", operands);
+ return "";
+ }
+
+ /* If all else fails, make it out of ORRs or BICs as appropriate. */
+
+ for (i=0; i < 32; i++)
+ if (n & 1 << i)
+ n_ones++;
+
+ if (n_ones > 16) /* Shorter to use MVN with BIC in this case. */
+ output_multi_immediate(operands, "mvn%?\t%0, %1", "bic%?\t%0, %0, %1", 1,
+ ~n);
+ else
+ output_multi_immediate(operands, "mov%?\t%0, %1", "orr%?\t%0, %0, %1", 1,
+ n);
+
+ return "";
+}
+
+
+/* Output an ADD r, s, #n where n may be too big for one instruction. If
+ adding zero to one register, output nothing. */
+
+char *
+output_add_immediate (operands)
+ rtx *operands;
+{
+ HOST_WIDE_INT n = INTVAL (operands[2]);
+
+ if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
+ {
+ if (n < 0)
+ output_multi_immediate (operands,
+ "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
+ -n);
+ else
+ output_multi_immediate (operands,
+ "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
+ n);
+ }
+
+ return "";
+}
+
+/* Output a multiple immediate operation.
+ OPERANDS is the vector of operands referred to in the output patterns.
+ INSTR1 is the output pattern to use for the first constant.
+ INSTR2 is the output pattern to use for subsequent constants.
+ IMMED_OP is the index of the constant slot in OPERANDS.
+ N is the constant value. */
+
+static char *
+output_multi_immediate (operands, instr1, instr2, immed_op, n)
+ rtx *operands;
+ char *instr1, *instr2;
+ int immed_op;
+ HOST_WIDE_INT n;
+{
+#if HOST_BITS_PER_WIDE_INT > 32
+ n &= 0xffffffff;
+#endif
+
+ if (n == 0)
+ {
+ operands[immed_op] = const0_rtx;
+ output_asm_insn (instr1, operands); /* Quick and easy output */
+ }
+ else
+ {
+ int i;
+ char *instr = instr1;
+
+ /* Note that n is never zero here (which would give no output) */
+ for (i = 0; i < 32; i += 2)
+ {
+ if (n & (3 << i))
+ {
+ operands[immed_op] = GEN_INT (n & (255 << i));
+ output_asm_insn (instr, operands);
+ instr = instr2;
+ i += 6;
+ }
+ }
+ }
+ return "";
+}
+
+
+/* Return the appropriate ARM instruction for the operation code.
+ The returned result should not be overwritten. OP is the rtx of the
+ operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
+ was shifted. */
+
+char *
+arithmetic_instr (op, shift_first_arg)
+ rtx op;
+ int shift_first_arg;
+{
+ switch (GET_CODE (op))
+ {
+ case PLUS:
+ return "add";
+
+ case MINUS:
+ return shift_first_arg ? "rsb" : "sub";
+
+ case IOR:
+ return "orr";
+
+ case XOR:
+ return "eor";
+
+ case AND:
+ return "and";
+
+ default:
+ abort ();
+ }
+}
+
+
+/* Ensure valid constant shifts and return the appropriate shift mnemonic
+ for the operation code. The returned result should not be overwritten.
+ OP is the rtx code of the shift.
+ On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
+ shift. */
+
+static char *
+shift_op (op, amountp)
+ rtx op;
+ HOST_WIDE_INT *amountp;
+{
+ char *mnem;
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_CODE (XEXP (op, 1)) == REG || GET_CODE (XEXP (op, 1)) == SUBREG)
+ *amountp = -1;
+ else if (GET_CODE (XEXP (op, 1)) == CONST_INT)
+ *amountp = INTVAL (XEXP (op, 1));
+ else
+ abort ();
+
+ switch (code)
+ {
+ case ASHIFT:
+ mnem = "asl";
+ break;
+
+ case ASHIFTRT:
+ mnem = "asr";
+ break;
+
+ case LSHIFTRT:
+ mnem = "lsr";
+ break;
+
+ case ROTATERT:
+ mnem = "ror";
+ break;
+
+ case MULT:
+ /* We never have to worry about the amount being other than a
+ power of 2, since this case can never be reloaded from a reg. */
+ if (*amountp != -1)
+ *amountp = int_log2 (*amountp);
+ else
+ abort ();
+ return "asl";
+
+ default:
+ abort ();
+ }
+
+ if (*amountp != -1)
+ {
+ /* This is not 100% correct, but follows from the desire to merge
+ multiplication by a power of 2 with the recognizer for a
+ shift. >=32 is not a valid shift for "asl", so we must try and
+ output a shift that produces the correct arithmetical result.
+ Using lsr #32 is identical except for the fact that the carry bit
+ is not set correctly if we set the flags; but we never use the
+ carry bit from such an operation, so we can ignore that. */
+ if (code == ROTATERT)
+ *amountp &= 31; /* Rotate is just modulo 32 */
+ else if (*amountp != (*amountp & 31))
+ {
+ if (code == ASHIFT)
+ mnem = "lsr";
+ *amountp = 32;
+ }
+
+ /* Shifts of 0 are no-ops. */
+ if (*amountp == 0)
+ return NULL;
+ }
+
+ return mnem;
+}
+
+
+/* Obtain the shift from the POWER of two. */
+
+static HOST_WIDE_INT
+int_log2 (power)
+ HOST_WIDE_INT power;
+{
+ HOST_WIDE_INT shift = 0;
+
+ while (((((HOST_WIDE_INT) 1) << shift) & power) == 0)
+ {
+ if (shift > 31)
+ abort ();
+ shift++;
+ }
+
+ return shift;
+}
+
+/* Output a .ascii pseudo-op, keeping track of lengths. This is because
+ /bin/as is horribly restrictive. */
+
+void
+output_ascii_pseudo_op (stream, p, len)
+ FILE *stream;
+ unsigned char *p;
+ int len;
+{
+ int i;
+ int len_so_far = 1000;
+ int chars_so_far = 0;
+
+ for (i = 0; i < len; i++)
+ {
+ register int c = p[i];
+
+ if (len_so_far > 50)
+ {
+ if (chars_so_far)
+ fputs ("\"\n", stream);
+ fputs ("\t.ascii\t\"", stream);
+ len_so_far = 0;
+ /* CYGNUS LOCAL */
+ arm_increase_location (chars_so_far);
+ /* END CYGNUS LOCAL */
+ chars_so_far = 0;
+ }
+
+ if (c == '\"' || c == '\\')
+ {
+ putc('\\', stream);
+ len_so_far++;
+ }
+
+ if (c >= ' ' && c < 0177)
+ {
+ putc (c, stream);
+ len_so_far++;
+ }
+ else
+ {
+ fprintf (stream, "\\%03o", c);
+ len_so_far +=4;
+ }
+
+ chars_so_far++;
+ }
+
+ fputs ("\"\n", stream);
+ /* CYGNUS LOCAL */
+ arm_increase_location (chars_so_far);
+ /* END CYGNUS LOCAL */
+}
+
+
+/* Try to determine whether a pattern really clobbers the link register.
+ This information is useful when peepholing, so that lr need not be pushed
+ if we combine a call followed by a return.
+ NOTE: This code does not check for side-effect expressions in a SET_SRC:
+ such a check should not be needed because these only update an existing
+ value within a register; the register must still be set elsewhere within
+ the function. */
+
+static int
+pattern_really_clobbers_lr (x)
+ rtx x;
+{
+ int i;
+
+ switch (GET_CODE (x))
+ {
+ case SET:
+ switch (GET_CODE (SET_DEST (x)))
+ {
+ case REG:
+ return REGNO (SET_DEST (x)) == 14;
+
+ case SUBREG:
+ if (GET_CODE (XEXP (SET_DEST (x), 0)) == REG)
+ return REGNO (XEXP (SET_DEST (x), 0)) == 14;
+
+ if (GET_CODE (XEXP (SET_DEST (x), 0)) == MEM)
+ return 0;
+ abort ();
+
+ default:
+ return 0;
+ }
+
+ case PARALLEL:
+ for (i = 0; i < XVECLEN (x, 0); i++)
+ if (pattern_really_clobbers_lr (XVECEXP (x, 0, i)))
+ return 1;
+ return 0;
+
+ case CLOBBER:
+ switch (GET_CODE (XEXP (x, 0)))
+ {
+ case REG:
+ return REGNO (XEXP (x, 0)) == 14;
+
+ case SUBREG:
+ if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG)
+ return REGNO (XEXP (XEXP (x, 0), 0)) == 14;
+ abort ();
+
+ default:
+ return 0;
+ }
+
+ case UNSPEC:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static int
+function_really_clobbers_lr (first)
+ rtx first;
+{
+ rtx insn, next;
+
+ for (insn = first; insn; insn = next_nonnote_insn (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ case BARRIER:
+ case NOTE:
+ case CODE_LABEL:
+ case JUMP_INSN: /* Jump insns only change the PC (and conds) */
+ case INLINE_HEADER:
+ break;
+
+ case INSN:
+ if (pattern_really_clobbers_lr (PATTERN (insn)))
+ return 1;
+ break;
+
+ case CALL_INSN:
+ /* Don't yet know how to handle those calls that are not to a
+ SYMBOL_REF */
+ if (GET_CODE (PATTERN (insn)) != PARALLEL)
+ abort ();
+
+ switch (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)))
+ {
+ case CALL:
+ if (GET_CODE (XEXP (XEXP (XVECEXP (PATTERN (insn), 0, 0), 0), 0))
+ != SYMBOL_REF)
+ return 1;
+ break;
+
+ case SET:
+ if (GET_CODE (XEXP (XEXP (SET_SRC (XVECEXP (PATTERN (insn),
+ 0, 0)), 0), 0))
+ != SYMBOL_REF)
+ return 1;
+ break;
+
+ default: /* Don't recognize it, be safe */
+ return 1;
+ }
+
+ /* A call can be made (by peepholing) not to clobber lr iff it is
+ followed by a return. There may, however, be a use insn iff
+ we are returning the result of the call.
+ If we run off the end of the insn chain, then that means the
+ call was at the end of the function. Unfortunately we don't
+ have a return insn for the peephole to recognize, so we
+ must reject this. (Can this be fixed by adding our own insn?) */
+ if ((next = next_nonnote_insn (insn)) == NULL)
+ return 1;
+
+ /* No need to worry about lr if the call never returns */
+ if (GET_CODE (next) == BARRIER)
+ break;
+
+ if (GET_CODE (next) == INSN && GET_CODE (PATTERN (next)) == USE
+ && (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
+ && (REGNO (SET_DEST (XVECEXP (PATTERN (insn), 0, 0)))
+ == REGNO (XEXP (PATTERN (next), 0))))
+ if ((next = next_nonnote_insn (next)) == NULL)
+ return 1;
+
+ if (GET_CODE (next) == JUMP_INSN
+ && GET_CODE (PATTERN (next)) == RETURN)
+ break;
+ return 1;
+
+ default:
+ abort ();
+ }
+ }
+
+ /* We have reached the end of the chain so lr was _not_ clobbered */
+ return 0;
+}
+
+char *
+output_return_instruction (operand, really_return, reverse)
+ rtx operand;
+ int really_return;
+ int reverse;
+{
+ char instr[100];
+ int reg, live_regs = 0;
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ return_used_this_function = 1;
+
+ if (volatile_func)
+ {
+ rtx ops[2];
+ /* If this function was declared non-returning, and we have found a tail
+ call, then we have to trust that the called function won't return. */
+ if (! really_return)
+ return "";
+
+ /* Otherwise, trap an attempted return by aborting. */
+ ops[0] = operand;
+ ops[1] = gen_rtx (SYMBOL_REF, Pmode, "abort");
+ assemble_external_libcall (ops[1]);
+ output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
+ return "";
+ }
+
+ if (current_function_calls_alloca && ! really_return)
+ abort();
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs++;
+
+ if (live_regs || (regs_ever_live[14] && ! lr_save_eliminated))
+ live_regs++;
+
+ if (frame_pointer_needed)
+ live_regs += 4;
+
+ if (live_regs)
+ {
+ if (lr_save_eliminated || ! regs_ever_live[14])
+ live_regs++;
+
+ if (frame_pointer_needed)
+ strcpy (instr,
+ reverse ? "ldm%?%D0ea\t%|fp, {" : "ldm%?%d0ea\t%|fp, {");
+ else
+ strcpy (instr,
+ reverse ? "ldm%?%D0fd\t%|sp!, {" : "ldm%?%d0fd\t%|sp!, {");
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ strcat (instr, "%|");
+ strcat (instr, reg_names[reg]);
+ if (--live_regs)
+ strcat (instr, ", ");
+ }
+
+ if (frame_pointer_needed)
+ {
+ strcat (instr, "%|");
+ strcat (instr, reg_names[11]);
+ strcat (instr, ", ");
+ strcat (instr, "%|");
+ strcat (instr, reg_names[13]);
+ strcat (instr, ", ");
+ strcat (instr, "%|");
+ strcat (instr, TARGET_THUMB_INTERWORK || (! really_return)
+ ? reg_names[14] : reg_names[15] );
+ }
+ else
+ {
+ strcat (instr, "%|");
+ if (TARGET_THUMB_INTERWORK && really_return)
+ strcat (instr, reg_names[12]);
+ else
+ strcat (instr, really_return ? reg_names[15] : reg_names[14]);
+ }
+ strcat (instr, (TARGET_APCS_32 || !really_return) ? "}" : "}^");
+ output_asm_insn (instr, &operand);
+
+ if (TARGET_THUMB_INTERWORK && really_return)
+ {
+ strcpy (instr, "bx%?");
+ strcat (instr, reverse ? "%D0" : "%d0");
+ strcat (instr, "\t%|");
+ strcat (instr, frame_pointer_needed ? "lr" : "ip");
+
+ output_asm_insn (instr, & operand);
+ }
+ }
+ else if (really_return)
+ {
+ /* CYGNUS LOCAL unknown */
+ if (operand && GET_MODE_CLASS (GET_MODE (XEXP (operand, 0))) != MODE_CC)
+ output_asm_insn ("ldr%?\t%|ip, %0", & operand);
+ /* END CYGNUS LOCAL */
+
+ if (TARGET_THUMB_INTERWORK)
+ sprintf (instr, "bx%%?%%%s0\t%%|lr", reverse ? "D" : "d");
+ else
+ sprintf (instr, "mov%%?%%%s0%s\t%%|pc, %%|lr",
+ reverse ? "D" : "d", TARGET_APCS_32 ? "" : "s");
+
+ output_asm_insn (instr, & operand);
+ }
+
+ return "";
+}
+
+/* Return nonzero if optimizing and the current function is volatile.
+ Such functions never return, and many memory cycles can be saved
+ by not storing register values that will never be needed again.
+ This optimization was added to speed up context switching in a
+ kernel application. */
+
+int
+arm_volatile_func ()
+{
+ return (optimize > 0 && TREE_THIS_VOLATILE (current_function_decl));
+}
+
+/* CYGNUS LOCAL unknown */
+/* Return the size of the prologue. It's not too bad if we slightly
+ over-estimate. */
+
+static int
+get_prologue_size ()
+{
+ return profile_flag ? 12 : 0;
+}
+/* END CYGNUS LOCAL */
+
+/* The amount of stack adjustment that happens here, in output_return and in
+ output_epilogue must be exactly the same as was calculated during reload,
+ or things will point to the wrong place. The only time we can safely
+ ignore this constraint is when a function has no arguments on the stack,
+ no stack frame requirement and no live registers execpt for `lr'. If we
+ can guarantee that by making all function calls into tail calls and that
+ lr is not clobbered in any other way, then there is no need to push lr
+ onto the stack. */
+
+void
+output_func_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int reg, live_regs_mask = 0;
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ /* Nonzero if we must stuff some register arguments onto the stack as if
+ they were passed there. */
+ int store_arg_regs = 0;
+
+ if (arm_ccfsm_state || arm_target_insn)
+ abort (); /* Sanity check */
+
+ if (arm_naked_function_p (current_function_decl))
+ return;
+
+ return_used_this_function = 0;
+ lr_save_eliminated = 0;
+
+ fprintf (f, "\t%s args = %d, pretend = %d, frame = %d\n",
+ ASM_COMMENT_START, current_function_args_size,
+ current_function_pretend_args_size, frame_size);
+ fprintf (f, "\t%s frame_needed = %d, current_function_anonymous_args = %d\n",
+ ASM_COMMENT_START, frame_pointer_needed,
+ current_function_anonymous_args);
+
+ if (volatile_func)
+ fprintf (f, "\t%s Volatile function.\n", ASM_COMMENT_START);
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs_mask |= (1 << reg);
+
+ if (frame_pointer_needed)
+ live_regs_mask |= 0xD800;
+ else if (regs_ever_live[14])
+ {
+ if (! current_function_args_size
+ && ! function_really_clobbers_lr (get_insns ()))
+ lr_save_eliminated = 1;
+ else
+ live_regs_mask |= 0x4000;
+ }
+
+ if (live_regs_mask)
+ {
+ /* if a di mode load/store multiple is used, and the base register
+ is r3, then r4 can become an ever live register without lr
+ doing so, in this case we need to push lr as well, or we
+ will fail to get a proper return. */
+
+ live_regs_mask |= 0x4000;
+ lr_save_eliminated = 0;
+
+ }
+
+ if (lr_save_eliminated)
+ fprintf (f,"\t%s I don't think this function clobbers lr\n",
+ ASM_COMMENT_START);
+
+#ifdef AOF_ASSEMBLER
+ if (flag_pic)
+ fprintf (f, "\tmov\t%sip, %s%s\n", REGISTER_PREFIX, REGISTER_PREFIX,
+ reg_names[PIC_OFFSET_TABLE_REGNUM]);
+#endif
+}
+
+
+void
+output_func_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int reg, live_regs_mask = 0;
+ /* CYGNUS LOCAL unknown */
+ int code_size = 0;
+ /* END CYGNUS LOCAL */
+ /* If we need this then it will always be at least this much */
+ int floats_offset = 12;
+ rtx operands[3];
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ if (use_return_insn (FALSE) && return_used_this_function)
+ {
+ if ((frame_size + current_function_outgoing_args_size) != 0
+ /* CYGNUS LOCAL bug fix */
+ && !(frame_pointer_needed && TARGET_APCS))
+ /* END CYGNUS LOCAL */
+ abort ();
+ goto epilogue_done;
+ }
+
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ goto epilogue_done;
+
+ /* A volatile function should never return. Call abort. */
+ if (TARGET_ABORT_NORETURN && volatile_func)
+ {
+ rtx op = gen_rtx (SYMBOL_REF, Pmode, "abort");
+ assemble_external_libcall (op);
+ output_asm_insn ("bl\t%a0", &op);
+ /* CYGNUS LOCAL unknown */
+ code_size = 4;
+ /* END CYGNUS LOCAL */
+ goto epilogue_done;
+ }
+
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ live_regs_mask |= (1 << reg);
+ floats_offset += 4;
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 23; reg > 15; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ floats_offset += 12;
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tldfe\t%s%s, [%sfp, #-%d]\n", REGISTER_PREFIX,
+ reg_names[reg], REGISTER_PREFIX, floats_offset);
+ }
+ }
+ else
+ {
+ int start_reg = 23;
+
+ for (reg = 23; reg > 15; reg--)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ floats_offset += 12;
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ /* We can't unstack more than four registers at once */
+ if (start_reg - reg == 3)
+ {
+ fprintf (f, "\tlfm\t%s%s, 4, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg],
+ REGISTER_PREFIX, floats_offset);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ fprintf (f, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg + 1],
+ start_reg - reg, REGISTER_PREFIX, floats_offset);
+ }
+ /* END CYGNUS LOCAL */
+ start_reg = reg - 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ fprintf (f, "\tlfm\t%s%s, %d, [%sfp, #-%d]\n",
+ REGISTER_PREFIX, reg_names[reg + 1],
+ start_reg - reg, REGISTER_PREFIX, floats_offset);
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (TARGET_THUMB_INTERWORK)
+ {
+ live_regs_mask |= 0x6800;
+ print_multi_reg (f, "ldmea\t%sfp", live_regs_mask, FALSE);
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ /* CYGNUS LOCAL unknown */
+ code_size += 8;
+ /* END CYGNUS LOCAL */
+ }
+ else
+ {
+ live_regs_mask |= 0xA800;
+ print_multi_reg (f, "ldmea\t%sfp", live_regs_mask,
+ TARGET_APCS_32 ? FALSE : TRUE);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ }
+ else
+ {
+ /* Restore stack pointer if necessary. */
+ if (frame_size + current_function_outgoing_args_size != 0)
+ {
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (frame_size
+ + current_function_outgoing_args_size);
+ output_add_immediate (operands);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 16; reg < 24; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tldfe\t%s%s, [%ssp], #12\n", REGISTER_PREFIX,
+ reg_names[reg], REGISTER_PREFIX);
+ }
+ }
+ else
+ {
+ int start_reg = 16;
+
+ for (reg = 16; reg < 24; reg++)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ if (reg - start_reg == 3)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, 4, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ REGISTER_PREFIX);
+ start_reg = reg + 1;
+ }
+ }
+ else
+ {
+ if (reg != start_reg)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ reg - start_reg, REGISTER_PREFIX);
+ }
+
+ start_reg = reg + 1;
+ }
+ }
+
+ /* Just in case the last register checked also needs unstacking. */
+ if (reg != start_reg)
+ {
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ fprintf (f, "\tlfmfd\t%s%s, %d, [%ssp]!\n",
+ REGISTER_PREFIX, reg_names[start_reg],
+ reg - start_reg, REGISTER_PREFIX);
+ }
+ }
+
+ if (current_function_pretend_args_size == 0 && regs_ever_live[14])
+ {
+ if (TARGET_THUMB_INTERWORK)
+ {
+ /* CYGNUS LOCAL */
+ if (! lr_save_eliminated)
+ live_regs_mask |= 0x4000;
+
+ if (live_regs_mask != 0)
+ {
+ code_size += 4;
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE);
+ }
+ /* END CYGNUS LOCAL */
+
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ }
+ else if (lr_save_eliminated)
+ fprintf (f, (TARGET_APCS_32 ? "\tmov\t%spc, %slr\n"
+ : "\tmovs\t%spc, %slr\n"),
+ REGISTER_PREFIX, REGISTER_PREFIX, f);
+ else
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask | 0x8000,
+ TARGET_APCS_32 ? FALSE : TRUE);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ else
+ {
+ if (live_regs_mask || regs_ever_live[14])
+ {
+ /* Restore the integer regs, and the return address into lr */
+ if (! lr_save_eliminated)
+ live_regs_mask |= 0x4000;
+
+ if (live_regs_mask != 0)
+ /* CYGNUS LOCAL unknown */
+ {
+ code_size += 4;
+ print_multi_reg (f, "ldmfd\t%ssp!", live_regs_mask, FALSE);
+ }
+ /* END CYGNUS LOCAL */
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ /* Unwind the pre-pushed regs */
+ operands[0] = operands[1] = stack_pointer_rtx;
+ operands[2] = GEN_INT (current_function_pretend_args_size);
+ output_add_immediate (operands);
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ /* And finally, go home */
+ if (TARGET_THUMB_INTERWORK)
+ fprintf (f, "\tbx\t%slr\n", REGISTER_PREFIX);
+ else if (TARGET_APCS_32)
+ fprintf (f, "\tmov\t%spc, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX );
+ else
+ fprintf (f, "\tmovs\t%spc, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX );
+ /* CYGNUS LOCAL unknown */
+ code_size += 4;
+ /* END CYGNUS LOCAL */
+ }
+ }
+
+epilogue_done:
+
+ /* CYGNUS LOCAL unknown */
+ if (optimize > 0)
+ arm_increase_location (code_size
+ + insn_addresses[INSN_UID (get_last_insn ())]
+ + get_prologue_size ());
+ /* END CYGNUS LOCAL */
+
+ current_function_anonymous_args = 0;
+ after_arm_reorg = 0;
+}
+
+static void
+emit_multi_reg_push (mask)
+ int mask;
+{
+ int num_regs = 0;
+ int i, j;
+ rtx par;
+
+ for (i = 0; i < 16; i++)
+ if (mask & (1 << i))
+ num_regs++;
+
+ if (num_regs == 0 || num_regs > 16)
+ abort ();
+
+ par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (num_regs));
+
+ for (i = 0; i < 16; i++)
+ {
+ if (mask & (1 << i))
+ {
+ XVECEXP (par, 0, 0)
+ = gen_rtx (SET, VOIDmode, gen_rtx (MEM, BLKmode,
+ gen_rtx (PRE_DEC, BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx (UNSPEC, BLKmode,
+ gen_rtvec (1, gen_rtx (REG, SImode, i)),
+ 2));
+ break;
+ }
+ }
+
+ for (j = 1, i++; j < num_regs; i++)
+ {
+ if (mask & (1 << i))
+ {
+ XVECEXP (par, 0, j)
+ = gen_rtx (USE, VOIDmode, gen_rtx (REG, SImode, i));
+ j++;
+ }
+ }
+
+ emit_insn (par);
+}
+
+static void
+emit_sfm (base_reg, count)
+ int base_reg;
+ int count;
+{
+ rtx par;
+ int i;
+
+ par = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (par, 0, 0) = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, BLKmode,
+ gen_rtx (PRE_DEC, BLKmode,
+ stack_pointer_rtx)),
+ gen_rtx (UNSPEC, BLKmode,
+ gen_rtvec (1, gen_rtx (REG, XFmode,
+ base_reg++)),
+ 2));
+ for (i = 1; i < count; i++)
+ XVECEXP (par, 0, i) = gen_rtx (USE, VOIDmode,
+ gen_rtx (REG, XFmode, base_reg++));
+
+ emit_insn (par);
+}
+
+void
+arm_expand_prologue ()
+{
+ int reg;
+ rtx amount = GEN_INT (-(get_frame_size ()
+ + current_function_outgoing_args_size));
+ int live_regs_mask = 0;
+ int store_arg_regs = 0;
+ /* CYGNUS LOCAL unknown */
+ int sp_overflow_check = 0;
+ /* END CYGNUS LOCAL */
+ int volatile_func = (optimize > 0
+ && TREE_THIS_VOLATILE (current_function_decl));
+
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (! volatile_func)
+ for (reg = 0; reg <= 10; reg++)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ live_regs_mask |= 1 << reg;
+
+ if (! volatile_func && regs_ever_live[14])
+ live_regs_mask |= 0x4000;
+
+ if (frame_pointer_needed)
+ {
+ live_regs_mask |= 0xD800;
+ emit_insn (gen_movsi (gen_rtx (REG, SImode, 12),
+ stack_pointer_rtx));
+ }
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ emit_multi_reg_push ((0xf0 >> (current_function_pretend_args_size / 4))
+ & 0xf);
+ else
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-current_function_pretend_args_size)));
+ }
+
+ if (live_regs_mask)
+ {
+ /* If we have to push any regs, then we must push lr as well, or
+ we won't get a proper return. */
+ live_regs_mask |= 0x4000;
+ emit_multi_reg_push (live_regs_mask);
+ }
+
+ /* For now the integer regs are still pushed in output_func_epilogue (). */
+
+ if (! volatile_func)
+ {
+ if (arm_fpu_arch == FP_SOFT2)
+ {
+ for (reg = 23; reg > 15; reg--)
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, XFmode,
+ gen_rtx (PRE_DEC, XFmode,
+ stack_pointer_rtx)),
+ gen_rtx (REG, XFmode, reg)));
+ }
+ else
+ {
+ int start_reg = 23;
+
+ for (reg = 23; reg > 15; reg--)
+ {
+ if (regs_ever_live[reg] && ! call_used_regs[reg])
+ {
+ if (start_reg - reg == 3)
+ {
+ emit_sfm (reg, 4);
+ start_reg = reg - 1;
+ }
+ }
+ else
+ {
+ if (start_reg != reg)
+ emit_sfm (reg + 1, start_reg - reg);
+ start_reg = reg - 1;
+ }
+ }
+
+ if (start_reg != reg)
+ emit_sfm (reg + 1, start_reg - reg);
+ }
+ }
+
+ if (frame_pointer_needed)
+ emit_insn (gen_addsi3 (hard_frame_pointer_rtx, gen_rtx (REG, SImode, 12),
+ (GEN_INT
+ (-(4 + current_function_pretend_args_size)))));
+
+ /* CYGNUS LOCAL */
+ /* The arm vxworks group wants the instructions setting up the frame */
+ /* to be unscheduled or unbroken */
+ if (TARGET_NO_SCHED_PRO)
+ emit_insn (gen_blockage ());
+
+ /* Checking whether the frame amount is zero is not a good enough
+ marker for deciding whether we need to check for stack overflow.
+ We are interested in whether anything has/is being stored on the
+ stack. Since GCC always creates the frame structure at the
+ moment, this is always true. When we add a machine specific flag
+ to allow leaf functions to avoid creating an entry frame we will
+ need to make this conditional (NOTE: This will probably not be a
+ standard feature, since the debugging world may assume that EVERY
+ function has a frame, whereas it is not actually a requirement of
+ the APCS). */
+ if (TARGET_APCS_STACK)
+ {
+ int bound = get_frame_size ();
+
+ /* The software stack overflow handler has two forms. The first
+ is for small stack frames, where 256bytes or less of stack is
+ required:
+ __rt_stkovf_split_small
+
+ The second is for bigger stack frames of more than 256bytes:
+ __rt_stkovf_split_big
+
+ The run-time *MUST* provide these routines when software
+ stack checking is enabled. After calling one of the above
+ routines the fp/r11 and sp/r12 registers do not necessarily
+ point into the same stack chunk. This means that arguments
+ passed on the stack *MUST* be addressed by offsets from
+ fp/r11 and *NOT* from sp/r13. The sl/r10 register should
+ always be at the bottom of the current stack chunk, with at
+ least 256bytes of stack available beneath it (this allows for
+ leaf functions that use less than 256bytes of stack to avoid
+ the stack limit check, aswell as giving the overflow
+ functions some workspace).
+
+ NOTE: The stack-checking APCS does *NOT* cope with alloca(),
+ since the amount of stack required is not known until
+ run-time. Similarly the use of run-time sized vectors causes
+ the same problem. This means that the handler routines
+ should only be used for raising aborts at the moment, and not
+ for providing stack chunk extension.
+
+ TODO: Check code generated for late stack pointer
+ modifications. The APCS allows for these, but a similar
+ stack overflow check and call must be inserted. */
+
+ if (bound < 256)
+ {
+ /* Leaf functions that use less than 256bytes of stack do
+ not need to perform a check: */
+ if (frame_pointer_needed)
+ {
+ /* Stop the prologue being re-ordered: */
+ emit_insn (gen_blockage ());
+ emit_insn (gen_cond_call (stack_pointer_rtx,
+ gen_rtx (REG, SImode, 10),
+ gen_rtx (SYMBOL_REF, Pmode,
+ "*__rt_stkovf_split_small"),
+ gen_rtx (LTU, SImode, 24)));
+ sp_overflow_check = 1;
+ }
+ }
+ else
+ {
+ rtx bamount;
+
+ if (!frame_pointer_needed)
+ abort ();
+
+ if (!const_ok_for_arm ((HOST_WIDE_INT) bound))
+ {
+ /* Find the closest 8bit rotated (by even amount) value
+ above bound: */
+ int count;
+ for (count = 0; ((bound >> count) & ~0xFF); count +=2);
+ bound = (bound & (0xFF << count)) + (1 << count);
+ }
+ bamount = GEN_INT (- bound);
+
+ emit_insn (gen_blockage ()); /* stop prologue being re-ordered */
+ emit_insn (gen_addsi3 (gen_rtx (REG, SImode, 12),
+ stack_pointer_rtx, bamount));
+ emit_insn (gen_cond_call (gen_rtx (REG, SImode, 12),
+ gen_rtx (REG, SImode, 10),
+ gen_rtx (SYMBOL_REF, Pmode,
+ "*__rt_stkovf_split_big"),
+ gen_rtx (LTU, SImode, 24)));
+ sp_overflow_check = 1;
+ }
+ }
+ /* END CYGNUS LOCAL */
+
+ if (amount != const0_rtx)
+ {
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, amount));
+ emit_insn (gen_rtx (CLOBBER, VOIDmode,
+ gen_rtx (MEM, BLKmode, stack_pointer_rtx)));
+ }
+
+ /* CYGNUS LOCAL */
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. Similarly do not allow instructions
+ to be moved to before the stack overflow check or if the user has
+ requested no scheduling in the prolog. */
+ if (profile_flag || profile_block_flag || sp_overflow_check)
+ emit_insn (gen_blockage ());
+ /* END CYGNUS LOCAL */
+}
+
+
+/* If CODE is 'd', then the X is a condition operand and the instruction
+ should only be executed if the condition is true.
+ if CODE is 'D', then the X is a condition operand and the instruction
+ should only be executed if the condition is false: however, if the mode
+ of the comparison is CCFPEmode, then always execute the instruction -- we
+ do this because in these circumstances !GE does not necessarily imply LT;
+ in these cases the instruction pattern will take care to make sure that
+ an instruction containing %d will follow, thereby undoing the effects of
+ doing this instruction unconditionally.
+ If CODE is 'N' then X is a floating point operand that must be negated
+ before output.
+ If CODE is 'B' then output a bitwise inverted value of X (a const int).
+ If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
+
+void
+arm_print_operand (stream, x, code)
+ FILE *stream;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, stream);
+ return;
+
+ case '|':
+ fputs (REGISTER_PREFIX, stream);
+ return;
+
+ case '?':
+ if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
+ fputs (arm_condition_codes[arm_current_cc], stream);
+ return;
+
+ case 'N':
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_FROM_CONST_DOUBLE (r, x);
+ r = REAL_VALUE_NEGATE (r);
+ fprintf (stream, "%s", fp_const_from_val (&r));
+ }
+ return;
+
+ case 'B':
+ if (GET_CODE (x) == CONST_INT)
+ fprintf (stream,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "%d",
+#else
+ "%ld",
+#endif
+ ARM_SIGN_EXTEND (~ INTVAL (x)));
+ else
+ {
+ putc ('~', stream);
+ output_addr_const (stream, x);
+ }
+ return;
+
+ case 'i':
+ fprintf (stream, "%s", arithmetic_instr (x, 1));
+ return;
+
+ case 'I':
+ fprintf (stream, "%s", arithmetic_instr (x, 0));
+ return;
+
+ case 'S':
+ {
+ HOST_WIDE_INT val;
+ char *shift = shift_op (x, &val);
+
+ if (shift)
+ {
+ fprintf (stream, ", %s ", shift_op (x, &val));
+ if (val == -1)
+ arm_print_operand (stream, XEXP (x, 1), 0);
+ else
+ fprintf (stream,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "#%d",
+#else
+ "#%ld",
+#endif
+ val);
+ }
+ }
+ return;
+
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], stream);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], stream);
+ return;
+
+ case 'm':
+ fputs (REGISTER_PREFIX, stream);
+ if (GET_CODE (XEXP (x, 0)) == REG)
+ fputs (reg_names[REGNO (XEXP (x, 0))], stream);
+ else
+ fputs (reg_names[REGNO (XEXP (XEXP (x, 0), 0))], stream);
+ return;
+
+ case 'M':
+ fprintf (stream, "{%s%s-%s%s}", REGISTER_PREFIX, reg_names[REGNO (x)],
+ REGISTER_PREFIX, reg_names[REGNO (x) - 1
+ + ((GET_MODE_SIZE (GET_MODE (x))
+ + GET_MODE_SIZE (SImode) - 1)
+ / GET_MODE_SIZE (SImode))]);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (arm_condition_codes[get_arm_condition_code (x)],
+ stream);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
+ (get_arm_condition_code (x))],
+ stream);
+ return;
+
+ default:
+ if (x == 0)
+ abort ();
+
+ if (GET_CODE (x) == REG)
+ {
+ fputs (REGISTER_PREFIX, stream);
+ fputs (reg_names[REGNO (x)], stream);
+ }
+ else if (GET_CODE (x) == MEM)
+ {
+ output_memory_reference_mode = GET_MODE (x);
+ output_address (XEXP (x, 0));
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ fprintf (stream, "#%s", fp_immediate_constant (x));
+ else if (GET_CODE (x) == NEG)
+ abort (); /* This should never happen now. */
+ else
+ {
+ fputc ('#', stream);
+ output_addr_const (stream, x);
+ }
+ }
+}
+
+/* CYGNUS LOCAL unknown */
+/* Increase the `arm_text_location' by AMOUNT if we're in the text
+ segment. */
+
+void
+arm_increase_location (amount)
+ int amount;
+{
+ if (in_text_section ())
+ arm_text_location += amount;
+}
+
+
+/* Output a label definition. If this label is within the .text segment, it
+ is stored in OFFSET_TABLE, to be used when building `llc' instructions.
+ Maybe GCC remembers names not starting with a `*' for a long time, but this
+ is a minority anyway, so we just make a copy. Do not store the leading `*'
+ if the name starts with one. */
+
+void
+arm_asm_output_label (stream, name)
+ FILE * stream;
+ char * name;
+{
+ char * real_name;
+ char * s;
+ struct label_offset *cur;
+ int hash = 0;
+
+ assemble_name (stream, name);
+ fputs (":\n", stream);
+
+ if (! in_text_section ())
+ return;
+
+ if (name[0] == '*')
+ {
+ real_name = xmalloc (1 + strlen (&name[1]));
+ strcpy (real_name, &name[1]);
+ }
+ else
+ {
+ real_name = xmalloc (2 + strlen (name));
+ strcpy (real_name, user_label_prefix);
+ strcat (real_name, name);
+ }
+ for (s = real_name; *s; s++)
+ hash += *s;
+
+ hash = hash % LABEL_HASH_SIZE;
+ cur = (struct label_offset *) xmalloc (sizeof (struct label_offset));
+ cur->name = real_name;
+ cur->offset = arm_text_location;
+ cur->cdr = offset_table[hash];
+ offset_table[hash] = cur;
+}
+/* END CYGNUS LOCAL */
+
+/* A finite state machine takes care of noticing whether or not instructions
+ can be conditionally executed, and thus decrease execution time and code
+ size by deleting branch instructions. The fsm is controlled by
+ final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
+
+/* The state of the fsm controlling condition codes are:
+ 0: normal, do nothing special
+ 1: make ASM_OUTPUT_OPCODE not output this instruction
+ 2: make ASM_OUTPUT_OPCODE not output this instruction
+ 3: make instructions conditional
+ 4: make instructions conditional
+
+ State transitions (state->state by whom under condition):
+ 0 -> 1 final_prescan_insn if the `target' is a label
+ 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
+ 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
+ 3 -> 0 ASM_OUTPUT_INTERNAL_LABEL if the `target' label is reached
+ (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
+ 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
+ (the target insn is arm_target_insn).
+
+ If the jump clobbers the conditions then we use states 2 and 4.
+
+ A similar thing can be done with conditional return insns.
+
+ XXX In case the `target' is an unconditional branch, this conditionalising
+ of the instructions always reduces code size, but not always execution
+ time. But then, I want to reduce the code size to somewhere near what
+ /bin/cc produces. */
+
+/* Returns the index of the ARM condition code string in
+ `arm_condition_codes'. COMPARISON should be an rtx like
+ `(eq (...) (...))'. */
+
+static enum arm_cond_code
+get_arm_condition_code (comparison)
+ rtx comparison;
+{
+ enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
+ register int code;
+ register enum rtx_code comp_code = GET_CODE (comparison);
+
+ if (GET_MODE_CLASS (mode) != MODE_CC)
+ mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
+ XEXP (comparison, 1));
+
+ switch (mode)
+ {
+ case CC_DNEmode: code = ARM_NE; goto dominance;
+ case CC_DEQmode: code = ARM_EQ; goto dominance;
+ case CC_DGEmode: code = ARM_GE; goto dominance;
+ case CC_DGTmode: code = ARM_GT; goto dominance;
+ case CC_DLEmode: code = ARM_LE; goto dominance;
+ case CC_DLTmode: code = ARM_LT; goto dominance;
+ case CC_DGEUmode: code = ARM_CS; goto dominance;
+ case CC_DGTUmode: code = ARM_HI; goto dominance;
+ case CC_DLEUmode: code = ARM_LS; goto dominance;
+ case CC_DLTUmode: code = ARM_CC;
+
+ dominance:
+ if (comp_code != EQ && comp_code != NE)
+ abort ();
+
+ if (comp_code == EQ)
+ return ARM_INVERSE_CONDITION_CODE (code);
+ return code;
+
+ case CC_NOOVmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_PL;
+ case LT: return ARM_MI;
+ default: abort ();
+ }
+
+ case CC_Zmode:
+ case CCFPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ default: abort ();
+ }
+
+ case CCFPEmode:
+ switch (comp_code)
+ {
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LS;
+ case LT: return ARM_MI;
+ default: abort ();
+ }
+
+ case CC_SWPmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_LE;
+ case GT: return ARM_LT;
+ case LE: return ARM_GE;
+ case LT: return ARM_GT;
+ case GEU: return ARM_LS;
+ case GTU: return ARM_CC;
+ case LEU: return ARM_CS;
+ case LTU: return ARM_HI;
+ default: abort ();
+ }
+
+ case CC_Cmode:
+ switch (comp_code)
+ {
+ case LTU: return ARM_CS;
+ case GEU: return ARM_CC;
+ default: abort ();
+ }
+
+ case CCmode:
+ switch (comp_code)
+ {
+ case NE: return ARM_NE;
+ case EQ: return ARM_EQ;
+ case GE: return ARM_GE;
+ case GT: return ARM_GT;
+ case LE: return ARM_LE;
+ case LT: return ARM_LT;
+ case GEU: return ARM_CS;
+ case GTU: return ARM_HI;
+ case LEU: return ARM_LS;
+ case LTU: return ARM_CC;
+ default: abort ();
+ }
+
+ default: abort ();
+ }
+
+ abort ();
+}
+
+
+void
+final_prescan_insn (insn, opvec, noperands)
+ rtx insn;
+ rtx *opvec;
+ int noperands;
+{
+ /* BODY will hold the body of INSN. */
+ register rtx body = PATTERN (insn);
+
+ /* This will be 1 if trying to repeat the trick, and things need to be
+ reversed if it appears to fail. */
+ int reverse = 0;
+
+ /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
+ taken are clobbered, even if the rtl suggests otherwise. It also
+ means that we have to grub around within the jump expression to find
+ out what the conditions are when the jump isn't taken. */
+ int jump_clobbers = 0;
+
+ /* If we start with a return insn, we only succeed if we find another one. */
+ int seeking_return = 0;
+
+ /* START_INSN will hold the insn from where we start looking. This is the
+ first insn after the following code_label if REVERSE is true. */
+ rtx start_insn = insn;
+
+ /* If in state 4, check if the target branch is reached, in order to
+ change back to state 0. */
+ if (arm_ccfsm_state == 4)
+ {
+ if (insn == arm_target_insn)
+ {
+ arm_target_insn = NULL;
+ arm_ccfsm_state = 0;
+ }
+ return;
+ }
+
+ /* If in state 3, it is possible to repeat the trick, if this insn is an
+ unconditional branch to a label, and immediately following this branch
+ is the previous target label which is only used once, and the label this
+ branch jumps to is not too far off. */
+ if (arm_ccfsm_state == 3)
+ {
+ if (simplejump_p (insn))
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ {
+ /* XXX Isn't this always a barrier? */
+ start_insn = next_nonnote_insn (start_insn);
+ }
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ reverse = TRUE;
+ else
+ return;
+ }
+ else if (GET_CODE (body) == RETURN)
+ {
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == BARRIER)
+ start_insn = next_nonnote_insn (start_insn);
+ if (GET_CODE (start_insn) == CODE_LABEL
+ && CODE_LABEL_NUMBER (start_insn) == arm_target_label
+ && LABEL_NUSES (start_insn) == 1)
+ {
+ reverse = TRUE;
+ seeking_return = 1;
+ }
+ else
+ return;
+ }
+ else
+ return;
+ }
+
+ if (arm_ccfsm_state != 0 && !reverse)
+ abort ();
+ if (GET_CODE (insn) != JUMP_INSN)
+ return;
+
+ /* This jump might be paralleled with a clobber of the condition codes
+ the jump should always come first */
+ if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
+ body = XVECEXP (body, 0, 0);
+
+#if 0
+ /* If this is a conditional return then we don't want to know */
+ if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE
+ && (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN
+ || GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN))
+ return;
+#endif
+
+ if (reverse
+ || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
+ && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
+ {
+ int insns_skipped;
+ int fail = FALSE, succeed = FALSE;
+ /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
+ int then_not_else = TRUE;
+ rtx this_insn = start_insn, label = 0;
+
+ if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
+ {
+ /* The code below is wrong for these, and I haven't time to
+ fix it now. So we just do the safe thing and return. This
+ whole function needs re-writing anyway. */
+ jump_clobbers = 1;
+ return;
+ }
+
+ /* Register the insn jumped to. */
+ if (reverse)
+ {
+ if (!seeking_return)
+ label = XEXP (SET_SRC (body), 0);
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
+ label = XEXP (XEXP (SET_SRC (body), 1), 0);
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
+ {
+ label = XEXP (XEXP (SET_SRC (body), 2), 0);
+ then_not_else = FALSE;
+ }
+ else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
+ seeking_return = 1;
+ else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
+ {
+ seeking_return = 1;
+ then_not_else = FALSE;
+ }
+ else
+ abort ();
+
+ /* See how many insns this branch skips, and what kind of insns. If all
+ insns are okay, and the label or unconditional branch to the same
+ label is not too far away, succeed. */
+ for (insns_skipped = 0;
+ !fail && !succeed && insns_skipped++ < max_insns_skipped;)
+ {
+ rtx scanbody;
+
+ this_insn = next_nonnote_insn (this_insn);
+ if (!this_insn)
+ break;
+
+ switch (GET_CODE (this_insn))
+ {
+ case CODE_LABEL:
+ /* Succeed if it is the target label, otherwise fail since
+ control falls in from somewhere else. */
+ if (this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case BARRIER:
+ /* Succeed if the following insn is the target label.
+ Otherwise fail.
+ If return insns are used then the last insn in a function
+ will be a barrier. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && this_insn == label)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ break;
+
+ case CALL_INSN:
+ /* If using 32-bit addresses the cc is not preserved over
+ calls */
+ if (TARGET_APCS_32)
+ {
+ /* Succeed if the following insn is the target label,
+ or if the following two insns are a barrier and
+ the target label. */
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && GET_CODE (this_insn) == BARRIER)
+ this_insn = next_nonnote_insn (this_insn);
+
+ if (this_insn && this_insn == label
+ && insns_skipped < max_insns_skipped)
+ {
+ if (jump_clobbers)
+ {
+ arm_ccfsm_state = 2;
+ this_insn = next_nonnote_insn (this_insn);
+ }
+ else
+ arm_ccfsm_state = 1;
+ succeed = TRUE;
+ }
+ else
+ fail = TRUE;
+ }
+ break;
+
+ case JUMP_INSN:
+ /* If this is an unconditional branch to the same label, succeed.
+ If it is to another label, do nothing. If it is conditional,
+ fail. */
+ /* XXX Probably, the tests for SET and the PC are unnecessary. */
+
+ scanbody = PATTERN (this_insn);
+ if (GET_CODE (scanbody) == SET
+ && GET_CODE (SET_DEST (scanbody)) == PC)
+ {
+ if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
+ && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
+ fail = TRUE;
+ }
+ /* Fail if a conditional return is undesirable (eg on a
+ StrongARM), but still allow this if optimizing for size. */
+ else if (GET_CODE (scanbody) == RETURN
+ && ! use_return_insn (TRUE)
+ && ! optimize_size)
+ fail = TRUE;
+ else if (GET_CODE (scanbody) == RETURN
+ && seeking_return)
+ {
+ arm_ccfsm_state = 2;
+ succeed = TRUE;
+ }
+ else if (GET_CODE (scanbody) == PARALLEL)
+ {
+ switch (get_attr_conds (this_insn))
+ {
+ case CONDS_NOCOND:
+ break;
+ default:
+ fail = TRUE;
+ break;
+ }
+ }
+ break;
+
+ case INSN:
+ /* Instructions using or affecting the condition codes make it
+ fail. */
+ scanbody = PATTERN (this_insn);
+ if (! (GET_CODE (scanbody) == SET
+ || GET_CODE (scanbody) == PARALLEL)
+ || get_attr_conds (this_insn) != CONDS_NOCOND)
+ fail = TRUE;
+ break;
+
+ default:
+ break;
+ }
+ }
+ if (succeed)
+ {
+ if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
+ arm_target_label = CODE_LABEL_NUMBER (label);
+ else if (seeking_return || arm_ccfsm_state == 2)
+ {
+ while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
+ {
+ this_insn = next_nonnote_insn (this_insn);
+ if (this_insn && (GET_CODE (this_insn) == BARRIER
+ || GET_CODE (this_insn) == CODE_LABEL))
+ abort ();
+ }
+ if (!this_insn)
+ {
+ /* Oh, dear! we ran off the end.. give up */
+ recog (PATTERN (insn), insn, NULL_PTR);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ return;
+ }
+ arm_target_insn = this_insn;
+ }
+ else
+ abort ();
+ if (jump_clobbers)
+ {
+ if (reverse)
+ abort ();
+ arm_current_cc =
+ get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
+ 0), 0), 1));
+ if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ else
+ {
+ /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
+ what it was. */
+ if (!reverse)
+ arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
+ 0));
+ }
+
+ if (reverse || then_not_else)
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ }
+ /* restore recog_operand (getting the attributes of other insns can
+ destroy this array, but final.c assumes that it remains intact
+ across this call; since the insn has been recognized already we
+ call recog direct). */
+ recog (PATTERN (insn), insn, NULL_PTR);
+ }
+}
+
+#ifdef AOF_ASSEMBLER
+/* Special functions only needed when producing AOF syntax assembler. */
+
+rtx aof_pic_label = NULL_RTX;
+struct pic_chain
+{
+ struct pic_chain *next;
+ char *symname;
+};
+
+static struct pic_chain *aof_pic_chain = NULL;
+
+rtx
+aof_pic_entry (x)
+ rtx x;
+{
+ struct pic_chain **chainp;
+ int offset;
+
+ if (aof_pic_label == NULL_RTX)
+ {
+ /* This needs to persist throughout the compilation. */
+ end_temporary_allocation ();
+ aof_pic_label = gen_rtx (SYMBOL_REF, Pmode, "x$adcons");
+ resume_temporary_allocation ();
+ }
+
+ for (offset = 0, chainp = &aof_pic_chain; *chainp;
+ offset += 4, chainp = &(*chainp)->next)
+ if ((*chainp)->symname == XSTR (x, 0))
+ return plus_constant (aof_pic_label, offset);
+
+ *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
+ (*chainp)->next = NULL;
+ (*chainp)->symname = XSTR (x, 0);
+ return plus_constant (aof_pic_label, offset);
+}
+
+void
+aof_dump_pic_table (f)
+ FILE *f;
+{
+ struct pic_chain *chain;
+
+ if (aof_pic_chain == NULL)
+ return;
+
+ fprintf (f, "\tAREA |%s$$adcons|, BASED %s%s\n",
+ reg_names[PIC_OFFSET_TABLE_REGNUM], REGISTER_PREFIX,
+ reg_names[PIC_OFFSET_TABLE_REGNUM]);
+ fputs ("|x$adcons|\n", f);
+
+ for (chain = aof_pic_chain; chain; chain = chain->next)
+ {
+ fputs ("\tDCD\t", f);
+ assemble_name (f, chain->symname);
+ fputs ("\n", f);
+ }
+}
+
+int arm_text_section_count = 1;
+
+char *
+aof_text_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ if (flag_pic)
+ strcat (buf, ", PIC, REENTRANT");
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare
+ a function as imported near the beginning of the file, and then to
+ export it later on. It is, however, possible to delay the decision
+ until all the functions in the file have been compiled. To get
+ around this, we maintain a list of the imports and exports, and
+ delete from it any that are subsequently defined. At the end of
+ compilation we spit the remainder of the list out before the END
+ directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+int arm_main_function = 0;
+
+void
+aof_dump_imports (f)
+ FILE *f;
+{
+ /* The AOF assembler needs this to cause the startup code to be extracted
+ from the library. Brining in __main causes the whole thing to work
+ automagically. */
+ if (arm_main_function)
+ {
+ text_section ();
+ fputs ("\tIMPORT __main\n", f);
+ fputs ("\tDCD __main\n", f);
+ }
+
+ /* Now dump the remaining imports. */
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif /* AOF_ASSEMBLER */
+
+/* CYGNUS LOCAL */
+
+/* Return non-zero if X is a symbolic operand (contains a SYMBOL_REF). */
+int
+symbolic_operand (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case CONST_DOUBLE:
+ case CONST:
+ case MEM:
+ case PLUS:
+ return symbolic_operand (mode, XEXP (x, 0));
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* Handle a special case when computing the offset
+ of an argument from the frame pointer. */
+int
+arm_debugger_arg_offset (value, addr)
+ int value;
+ struct rtx_def * addr;
+{
+ rtx insn;
+
+ /* We are only interested if dbxout_parms() failed to compute the offset. */
+ if (value != 0)
+ return 0;
+
+ /* We can only cope with the case where the address is held in a register. */
+ if (GET_CODE (addr) != REG)
+ return 0;
+
+ /* If we are using the frame pointer to point at the argument, then an offset of 0 is correct. */
+ if (REGNO (addr) == HARD_FRAME_POINTER_REGNUM)
+ return 0;
+
+ /* Oh dear. The argument is pointed to by a register rather
+ than being held in a register, or being stored at a known
+ offset from the frame pointer. Since GDB only understands
+ those two kinds of argument we must translate the address
+ held in the register into an offset from the frame pointer.
+ We do this by searching through the insns for the function
+ looking to see where this register gets its value. If the
+ register is initialised from the frame pointer plus an offset
+ then we are in luck and we can continue, otherwise we give up.
+
+ This code is exercised by producing debugging information
+ for a function with arguments like this:
+
+ double func (double a, double b, int c, double d) {return d;}
+
+ Without this code the stab for parameter 'd' will be set to
+ an offset of 0 from the frame pointer, rather than 8. */
+
+ /* The if() statement says:
+
+ If the insn is a normal instruction
+ and if the insn is setting the value in a register
+ and if the register being set is the register holding the address of the argument
+ and if the address is computing by an addition
+ that involves adding to a register
+ which is the frame pointer
+ a constant integer
+
+ then... */
+
+ for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
+ {
+ if ( GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
+ && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == HARD_FRAME_POINTER_REGNUM
+ && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
+ )
+ {
+ value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
+
+ break;
+ }
+ }
+
+ if (value == 0)
+ {
+ warning ("Unable to compute real location of stacked parameter" );
+ value = 8; /* XXX magic hack */
+ }
+
+ return value;
+}
+
+/* Return nonzero if this insn is a call insn. */
+
+static int
+is_call_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == CALL_INSN)
+ return 1;
+
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SEQUENCE
+ && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == CALL_INSN)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if this insn, which is known to occur after a call insn,
+ will not stop the call from being interpreted as a tail call. */
+
+static int
+is_safe_after_call_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == NOTE)
+ return 1;
+
+ if (GET_CODE (insn) == INSN)
+ {
+ rtx pattern = PATTERN (insn);
+
+ if (GET_CODE (pattern) == USE)
+ return 1;
+
+ /* Special case: Assignment of the result of the call that
+ has just been made to the return value for this function
+ will result in a move from the result register to itself.
+ Detect this case and rely upon the fact that a later pass
+ will eliminate this redundant move. */
+
+ if (GET_CODE (pattern) == SET
+ && GET_CODE (SET_SRC (pattern)) == REG
+ && GET_CODE (SET_DEST (pattern)) == REG
+ && REGNO (SET_SRC (pattern)) == REGNO (SET_DEST (pattern)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return nonzero if this function is suitable for a tail call optimisation. */
+
+int
+can_tail_call_optimise ()
+{
+ rtx insn;
+ int found_call = 0;
+
+ /* Functions that need frames cannot have tail call optimisations applied. */
+ if (get_frame_size() > 0
+ || current_function_anonymous_args)
+ return 0;
+
+ /* Functions that perform more than one function call,
+ or that perform some computation after their only
+ function call cannot be optimised either. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (is_call_insn (insn))
+ {
+ if (found_call)
+ return 0;
+ else
+ found_call = 1;
+ }
+ else if (found_call)
+ {
+ if (! is_safe_after_call_insn (insn))
+ return 0;
+ }
+ }
+
+ /* Repeat the tests for the insns in the epilogue list. */
+ for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
+ {
+ if (is_call_insn (insn))
+ {
+ if (found_call)
+ return 0;
+ else
+ found_call = 1;
+ }
+ else if (found_call)
+ {
+ if (! is_safe_after_call_insn (insn))
+ return 0;
+ }
+ }
+
+ return found_call;
+}
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL nickc */
+int
+ok_integer_or_other (operand)
+ rtx operand;
+{
+ if (GET_CODE (operand) == CONST_INT)
+ {
+ if (const_ok_for_arm (INTVAL (operand))
+ || const_ok_for_arm (~INTVAL (operand)))
+ return 1;
+ return 0;
+ }
+
+ return 1;
+}
+/* END CYGNUS LOCAL */
diff --git a/gcc_arm/config/arm/arm_020422.h b/gcc_arm/config/arm/arm_020422.h
new file mode 100755
index 0000000..ec12928
--- /dev/null
+++ b/gcc_arm/config/arm/arm_020422.h
@@ -0,0 +1,2309 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999, 2002 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode ((enum attr_prog_mode) arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode \
+ ? GEN_INT ((CUM).call_cookie) \
+ : (NAMED) \
+ ? ((CUM).nregs >= 16 ? 0 : gen_rtx (REG, MODE, (CUM).nregs / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM).nregs < 16 && 16 < (CUM).nregs + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM).nregs / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT . */
+ int call_cookie;
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM).nregs = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) \
+ ? 4 : 0), \
+ (CUM).call_cookie = \
+ (((FNTYPE) && lookup_attribute ("short_call", TYPE_ATTRIBUTES (FNTYPE))) \
+ ? CALL_SHORT \
+ : (((FNTYPE) && lookup_attribute ("long_call", \
+ TYPE_ATTRIBUTES (FNTYPE)))\
+ || TARGET_LONG_CALLS) \
+ ? CALL_LONG \
+ : CALL_NORMAL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nregs += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM).nregs < 16) \
+ (PRETEND_SIZE) = 16 - (CUM).nregs; \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+#else
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+#endif
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+int arm_valid_machine_type_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+#define VALID_MACHINE_TYPE_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_type_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag.
+ This macro is used inside the ENCODE_SECTION macro. */
+#define ARM_ENCODE_CALL_TYPE(decl) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
+ { \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
+ else if (! TREE_PUBLIC (decl)) \
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
+ }
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define ENCODED_SHORT_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == SHORT_CALL_FLAG_CHAR)
+
+#define ENCODED_LONG_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == LONG_CALL_FLAG_CHAR)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragement for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case SHORT_CALL_FLAG_CHAR: return 1; \
+ case LONG_CALL_FLAG_CHAR: return 1; \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This has to be handled by a function because more than part of the
+ ARM backend uses function name prefixes to encode attributes. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+ (VAR) = arm_strip_name_encoding (SYMBOL_NAME)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ asm_fprintf (FILE, "%U%s", arm_strip_name_encoding (NAME))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+char *arm_strip_name_encoding (/* const char * */);
+int arm_is_longcall_p (/* rtx, int, int */);
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+int s_register_operand (/* register rtx op, enum machine_mode mode */);
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm_020422.md b/gcc_arm/config/arm/arm_020422.md
new file mode 100755
index 0000000..c8f974f
--- /dev/null
+++ b/gcc_arm/config/arm/arm_020422.md
@@ -0,0 +1,6508 @@
+;;- Machine description for Advanced RISC Machines' ARM for GNU compiler
+;; Copyright (C) 1991, 93-98, 1999, 2002 Free Software Foundation, Inc.
+;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+;; and Martin Simmons (@harleqn.co.uk).
+;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; There are patterns in this file to support XFmode arithmetic.
+;; Unfortunately RISC iX doesn't work well with these so they are disabled.
+;; (See arm.h)
+
+;; UNSPEC Usage:
+;; 0 `sin' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 1 `cos' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 2 `push multiple' operation: operand 0 is the first register. Subsequent
+;; registers are in parallel (use...) expressions.
+;; 3 A symbol that has been treated properly for pic usage, that is, we
+;; will add the pic_register value to it before trying to dereference it.
+;; Note: sin and cos are no-longer used.
+
+;; Attributes
+
+; PROG_MODE attribute is used to determine whether condition codes are
+; clobbered by a call insn: they are if in prog32 mode. This is controlled
+; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
+(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
+
+(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
+
+; Floating Point Unit. If we only have floating point emulation, then there
+; is no point in scheduling the floating point insns. (Well, for best
+; performance we should try and group them together).
+
+(define_attr "fpu" "fpa,fpe2,fpe3" (const (symbol_ref "arm_fpu_attr")))
+
+; LENGTH of an instruction (in bytes)
+(define_attr "length" "" (const_int 4))
+
+; An assembler sequence may clobber the condition codes without us knowing
+(define_asm_attributes
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")])
+
+; TYPE attribute is used to detect floating point instructions which, if
+; running on a co-processor can run in parallel with other, basic instructions
+; If write-buffer scheduling is enabled then it can also be used in the
+; scheduling of writes.
+
+; Classification of each insn
+; normal any data instruction that doesn't hit memory or fp regs
+; mult a multiply instruction
+; block blockage insn, this blocks all functional units
+; float a floating point arithmetic operation (subject to expansion)
+; fdivx XFmode floating point division
+; fdivd DFmode floating point division
+; fdivs SFmode floating point division
+; fmul Floating point multiply
+; ffmul Fast floating point multiply
+; farith Floating point arithmetic (4 cycle)
+; ffarith Fast floating point arithmetic (2 cycle)
+; float_em a floating point arithmetic operation that is normally emulated
+; even on a machine with an fpa.
+; f_load a floating point load from memory
+; f_store a floating point store to memory
+; f_mem_r a transfer of a floating point register to a real reg via mem
+; r_mem_f the reverse of f_mem_r
+; f_2_r fast transfer float to arm (no memory needed)
+; r_2_f fast transfer arm to float
+; call a subroutine call
+; load any load from memory
+; store1 store 1 word to memory from arm registers
+; store2 store 2 words
+; store3 store 3 words
+; store4 store 4 words
+;
+(define_attr "type"
+ "normal,mult,block,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith,float_em,f_load,f_store,f_mem_r,r_mem_f,f_2_r,r_2_f,call,load,store1,store2,store3,store4"
+ (const_string "normal"))
+
+;; CYGNUS LOCAL load scheduling
+; Load scheduling, set from the arm_ld_sched variable
+; initialised by arm_override_options()
+(define_attr "ldsched" "no,yes"
+ (const (symbol_ref "arm_ld_sched")))
+;; END CYGNUS LOCAL
+
+; condition codes: this one is used by final_prescan_insn to speed up
+; conditionalizing instructions. It saves having to scan the rtl to see if
+; it uses or alters the condition codes.
+
+; USE means that the condition codes are used by the insn in the process of
+; outputting code, this means (at present) that we can't use the insn in
+; inlined branches
+
+; SET means that the purpose of the insn is to set the condition codes in a
+; well defined manner.
+
+; CLOB means that the condition codes are altered in an undefined manner, if
+; they are altered at all
+
+; JUMP_CLOB is used when the conditions are not defined if a branch is taken,
+; but are if the branch wasn't taken; the effect is to limit the branch
+; elimination scanning.
+
+; NOCOND means that the condition codes are neither altered nor affect the
+; output of this insn
+
+(define_attr "conds" "use,set,clob,jump_clob,nocond"
+ (if_then_else (eq_attr "type" "call")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_string "clob") (const_string "nocond"))
+ (const_string "nocond")))
+
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrent modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes" (const (symbol_ref "arm_is_6_or_7")))
+
+(define_attr "write_conflict" "no,yes"
+ (if_then_else (eq_attr "type"
+ "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
+ (const_string "yes")
+ (const_string "no")))
+
+(define_attr "core_cycles" "single,multi"
+ (if_then_else (eq_attr "type"
+ "normal,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith")
+ (const_string "single")
+ (const_string "multi")))
+
+; The write buffer on some of the arm6 processors is hard to model exactly.
+; There is room in the buffer for up to two addresses and up to eight words
+; of memory, but the two needn't be split evenly. When writing the two
+; addresses are fully pipelined. However, a read from memory that is not
+; currently in the cache will block until the writes have completed.
+; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
+; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
+; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
+; cycle to add as well.
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivx")) 71 69)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivd")) 59 57)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivs")) 31 29)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fmul")) 9 7)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffmul")) 6 4)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "farith")) 4 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffarith")) 2 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "r_2_f")) 5 3)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_2_r")) 1 2)
+
+;; The fpa10 doesn't really have a memory read unit, but it can start to
+;; speculatively execute the instruction in the pipeline, provided the data
+;; is already loaded, so pretend reads have a delay of 2 (and that the
+;; pipeline is infinite.
+
+(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_load")) 3 1)
+
+;;--------------------------------------------------------------------
+;; Write buffer
+;;--------------------------------------------------------------------
+;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1,r_mem_f")) 5 3)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 4)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 5)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")) 11 6)
+
+;;--------------------------------------------------------------------
+;; Write blockage unit
+;;--------------------------------------------------------------------
+;; The write_blockage unit models (partially), the fact that reads will stall
+;; until the write buffer empties.
+;; The f_mem_r and r_mem_f could also block, but they are to the stack,
+;; so we don't model them here
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")) 5 5
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 7
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 9
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes") (eq_attr "type" "store4")) 11 11
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "write_conflict" "yes")) 1 1)
+
+;;--------------------------------------------------------------------
+;; Core unit
+;;--------------------------------------------------------------------
+;; Everything must spend at least one cycle in the core unit
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load")) 3 3)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")) 6 6)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r")) 7 7)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "no") (eq_attr "type" "mult")) 16 16)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "no"))
+ (eq_attr "type" "mult")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "yes"))
+ (eq_attr "type" "mult")) 3 2)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store2") 3 3)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store3") 4 4)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store4") 5 5)
+
+;; CYGNUS LOCAL
+;; APCS support: When generating code for the software stack checking
+;; model, we need to be able to perform calls to the special exception
+;; handler routines. These routines are *NOT* APCS conforming, so we
+;; do not need to mark any registers as clobbered over the call other
+;; than the lr/r14 modified by the actual BL instruction. Rather than
+;; trying to force the RTL for the existing comparison and call to
+;; achieve this, we simply have a pattern that does the desired job.
+
+;; TODO: This is not ideal since it does not specify all of the
+;; operators involved:
+;; cmp %op0,%op1 cmpsi_insn (compare)
+;; bl%op3 %op2 call_value_symbol (call)
+;; Unfortunately since we do not go through the normal arm_ccfsm_state
+;; processing we cannot use the %? operand replacment for the BL
+;; condition.
+
+(define_insn "cond_call"
+ [(compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "" "X")
+ (match_operator 3 "comparison_operator" [(reg:CC 24) (const_int 0)])
+ (clobber (reg:CC 24))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[2]) == SYMBOL_REF && GET_CODE (operands[3]) == LTU"
+ "cmp\\t%0, %1\;bllt\\t%a2"
+[(set_attr "conds" "clob")
+ (set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; END CYGNUS LOCAL
+
+;; Note: For DImode insns, there is normally no reason why operands should
+;; not be in the same register, what we don't want is for something being
+;; written to partially overlap something that is an input.
+
+;; Addition insns.
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %Q2\;adc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*addsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))]
+ ""
+ "@
+ add%?\\t%0, %1, %2
+ sub%?\\t%0, %1, #%n2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_insn "*addsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (const_int 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+;; The next four insns work because they compare the result with one of
+;; the operands, and we know that the use of the condition code is
+;; either GEU or LTU, so we can use the carry flag from the addition
+;; instead of doing the compare a second time.
+(define_insn "*addsi3_compare_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare_op2"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 2)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op0"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 1)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_carryin"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt1"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (ltu:SI (reg:CC_C 24) (const_int 0))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ add%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+; If a constant is too big to fit in a single instruction then the constant
+; will be pre-loaded into a register taking at least two insns, we might be
+; able to merge it with an add, but it depends on the exact value.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "!(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
+ "
+{
+ unsigned int val = (unsigned) INTVAL (operands[2]);
+ int i;
+ unsigned int temp;
+
+ /* this code is similar to the approach followed in movsi, but it must
+ generate exactly two insns */
+
+ for (i = 30; i >= 0; i -= 2)
+ {
+ if (val & (3 << i))
+ {
+ i -= 6;
+ if (i < 0) i = 0;
+ if (const_ok_for_arm (temp = (val & ~(255 << i))))
+ {
+ val &= 255 << i;
+ break;
+ }
+ /* we might be able to do this as (larger number - small number) */
+ temp = ((val >> i) & 255) + 1;
+ if (temp > 255 && i < 24)
+ {
+ i += 2;
+ temp = ((val >> i) & 255) + 1;
+ }
+ if (const_ok_for_arm ((temp << i) - val))
+ {
+ i = temp << i;
+ temp = (unsigned) - (int) (i - val);
+ val = i;
+ break;
+ }
+ FAIL;
+ }
+ }
+ /* if we got here, we have found a way of doing it in two instructions.
+ the two constants are in val and temp */
+ operands[2] = GEN_INT ((int)val);
+ operands[3] = GEN_INT ((int)temp);
+}
+")
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "f,f")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?s\\t%0, %1, %2
+ suf%?s\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f,f")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f,f"))
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "addxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (plus:XF (match_operand:XF 1 "s_register_operand" "f,f")
+ (match_operand:XF 2 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ adf%?e\\t%0, %1, %2
+ suf%?e\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0")
+ (match_operand:DI 2 "s_register_operand" "r,0,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %Q2\;sbc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_sesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "r,0")
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %1, %2\;rsc\\t%R0, %1, %1"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*subsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
+ ""
+ "@
+ rsb%?\\t%0, %2, %1
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[1]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2], 0);
+ DONE;
+")
+
+(define_insn "*subsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ sub%?s\\t%0, %1, %2
+ rsb%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ ""
+ "@
+ sub%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "*,8")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (minus:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?s\\t%0, %1, %2
+ rsf%?s\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f,f"))))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "subxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (minus:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ suf%?e\\t%0, %1, %2
+ rsf%?e\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+;; Multiplication insns
+
+;; Use `&' and then `0' to prevent the operands 0 and 1 being the same
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0")))]
+ ""
+ "mul%?\\t%0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r"))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+;; Unnamed templates to match MLA instruction.
+
+(define_insn "*mulsi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))]
+ ""
+ "mla%?\\t%0, %2, %1, %3"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI (mult:SI (match_dup 2) (match_dup 1))
+ (match_dup 3)))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "smull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "umull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "smulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "smull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "umull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "fml%?s\\t%0, %1, %2"
+[(set_attr "type" "ffmul")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "mulxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mult:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "muf%?e\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+;; Division insns
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (div:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ fdv%?s\\t%0, %1, %2
+ frd%?s\\t%0, %2, %1"
+[(set_attr "type" "fdivs")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ dvf%?d\\t%0, %1, %2
+ rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "divxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (div:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ dvf%?e\\t%0, %1, %2
+ rdf%?e\\t%0, %2, %1"
+[(set_attr "type" "fdivx")])
+
+;; Modulo insns
+
+(define_insn "modsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mod:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?s\\t%0, %1, %2"
+[(set_attr "type" "fdivs")])
+
+(define_insn "moddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "modxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mod:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "rmf%?e\\t%0, %1, %2"
+[(set_attr "type" "fdivx")])
+
+;; Boolean and,ior,xor insns
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %Q2\;and%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_sesdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;and%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,K,?n")))]
+ ""
+ "@
+ and%?\\t%0, %1, %2
+ bic%?\\t%0, %1, #%B2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (~ INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*andsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (and:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ and%?s\\t%0, %1, %2
+ bic%?s\\t%0, %1, #%B2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r"))]
+ ""
+ "@
+ tst%?\\t%0, %1
+ bic%?s\\t%3, %0, #%B1"
+[(set_attr "conds" "set")])
+
+(define_insn "*zeroextractsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))]
+ "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8
+ && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"tst%?\\t%0, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")])
+
+;; ??? This pattern does not work because it does not check for start+length
+;; less than or equal to 8. This is necessary for the bitfield to fit within
+;; a single byte. This pattern was deleted Feb 25, 1999 in egcs, so we can
+;; just disabled it for 99r1.
+
+(define_insn "*zeroextractqi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:QI 0 "memory_operand" "m")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 3 "=r"))]
+ "0 && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 8
+ && INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"ldr%?b\\t%3, %0\", operands);
+ output_asm_insn (\"tst%?\\t%3, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+;;; ??? This pattern is bogus. If operand3 has bits outside the range
+;;; represented by the bitfield, then this will produce incorrect results.
+;;; Somewhere, the value needs to be truncated. On targets like the m68k,
+;;; which have a real bitfield insert instruction, the truncation happens
+;;; in the bitfield insert instruction itself. Since arm does not have a
+;;; bitfield insert instruction, we would have to emit code here to truncate
+;;; the value before we insert. This loses some of the advantage of having
+;;; this insv pattern, so this pattern needs to be reevalutated.
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "nonmemory_operand" ""))]
+ ""
+ "
+{
+ int start_bit = INTVAL (operands[2]);
+ int width = INTVAL (operands[1]);
+ HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
+ rtx target, subtarget;
+
+ target = operands[0];
+ /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
+ subreg as the final target. */
+ if (GET_CODE (target) == SUBREG)
+ {
+ subtarget = gen_reg_rtx (SImode);
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
+ < GET_MODE_SIZE (SImode))
+ target = SUBREG_REG (target);
+ }
+ else
+ subtarget = target;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Since we are inserting a known constant, we may be able to
+ reduce the number of bits that we have to clear so that
+ the mask becomes simple. */
+ /* ??? This code does not check to see if the new mask is actually
+ simpler. It may not be. */
+ rtx op1 = gen_reg_rtx (SImode);
+ /* ??? Truncate operand3 to fit in the bitfield. See comment before
+ start of this pattern. */
+ HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
+ HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
+
+ emit_insn (gen_andsi3 (op1, operands[0], GEN_INT (~mask2)));
+ emit_insn (gen_iorsi3 (subtarget, op1,
+ GEN_INT (op3_value << start_bit)));
+ }
+ else if (start_bit == 0
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* A Trick, since we are setting the bottom bits in the word,
+ we can shift operand[3] up, operand[0] down, OR them together
+ and rotate the result back again. This takes 3 insns, and
+ the third might be mergable into another op. */
+ /* The shift up copes with the possibility that operand[3] is
+ wider than the bitfield. */
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_iorsi3 (op1, gen_rtx (LSHIFTRT, SImode, operands[0],
+ operands[1]),
+ op0));
+ emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
+ }
+ else if ((width + start_bit == 32)
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* Similar trick, but slightly less efficient. */
+
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_iorsi3 (subtarget,
+ gen_rtx (LSHIFTRT, SImode, op1,
+ operands[1]), op0));
+ }
+ else
+ {
+ rtx op0 = GEN_INT (mask);
+ rtx op1 = gen_reg_rtx (SImode);
+ rtx op2 = gen_reg_rtx (SImode);
+
+ if (! (const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ /* Mask out any bits in operand[3] that are not needed. */
+ emit_insn (gen_andsi3 (op1, operands[3], op0));
+
+ if (GET_CODE (op0) == CONST_INT
+ && (const_ok_for_arm (mask << start_bit)
+ || const_ok_for_arm (~ (mask << start_bit))))
+ {
+ op0 = GEN_INT (~(mask << start_bit));
+ emit_insn (gen_andsi3 (op2, operands[0], op0));
+ }
+ else
+ {
+ if (GET_CODE (op0) == CONST_INT)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ if (start_bit != 0)
+ op0 = gen_rtx (ASHIFT, SImode, op0, operands[2]);
+
+ emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
+ }
+
+ if (start_bit != 0)
+ op1 = gen_rtx (ASHIFT, SImode, op1, operands[2]);
+
+ emit_insn (gen_iorsi3 (subtarget, op1, op2));
+ }
+
+ if (subtarget != target)
+ {
+ /* If TARGET is still a SUBREG, then it must be wider than a word,
+ so we must be careful only to set the subword we were asked to. */
+ if (GET_CODE (target) == SUBREG)
+ emit_move_insn (target, subtarget);
+ else
+ emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
+ }
+
+ DONE;
+}
+")
+
+;; constants for op 2 will never be given to these patterns.
+(define_insn "*anddi_notdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0"))
+ (match_operand:DI 1 "s_register_operand" "0,r")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %Q2\;bic%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_notzesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ bic%?\\t%Q0, %Q1, %2
+ bic%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*anddi_notsesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %2\;bic%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "andsi_notsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2")
+
+(define_insn "andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2%S4")
+
+(define_insn "*andsi_notsi_si_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_dup 2)) (match_dup 1)))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi_notsi_si_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (ior:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "r")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %Q2\;orr%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*iordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%?\\t%Q0, %Q1, %2
+ orr%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*iordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %2\;orr%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*iorsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,?n")))]
+ ""
+ "@
+ orr%?\\t%0, %1, %2
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[2]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*iorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*iorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %Q2\;eor%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*xordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ eor%?\\t%Q0, %Q1, %2
+ eor%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*xordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %2\;eor%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "eor%?\\t%0, %1, %2")
+
+(define_insn "*xorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "eor%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*xorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "teq%?\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
+;; (NOT D) we can sometimes merge the final NOT into one of the following
+;; insns
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI")))
+ (match_operand:SI 3 "arm_rhs_operand" "rI")))
+ (clobber (match_operand:SI 4 "s_register_operand" "=r"))]
+ ""
+ [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2))
+ (not:SI (match_dup 3))))
+ (set (match_dup 0) (not:SI (match_dup 4)))]
+ ""
+)
+
+(define_insn "*andsi_iorsi3_notsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r")
+ (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))
+ (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))]
+ ""
+ "orr%?\\t%0, %1, %2\;bic%?\\t%0, %0, %3"
+[(set_attr "length" "8")])
+
+
+
+;; Minimum and maximum insns
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movlt\\t%0, %2
+ cmp\\t%1, %2\;movge\\t%0, %1
+ cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movge\\t%0, %2
+ cmp\\t%1, %2\;movlt\\t%0, %1
+ cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcc\\t%0, %2
+ cmp\\t%1, %2\;movcs\\t%0, %1
+ cmp\\t%1, %2\;movcs\\t%0, %1\;movcc\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcs\\t%0, %2
+ cmp\\t%1, %2\;movcc\\t%0, %1
+ cmp\\t%1, %2\;movcc\\t%0, %1\;movcs\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*store_minmaxsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (match_operator:SI 3 "minmax_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ operands[3] = gen_rtx (minmax_code (operands[3]), SImode, operands[1],
+ operands[2]);
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"str%d3\\t%1, %0\", operands);
+ output_asm_insn (\"str%D3\\t%2, %0\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")
+ (set_attr "type" "store1")])
+
+; Reject the frame pointer in operand[1], since reloading this after
+; it has been eliminated can cause carnage.
+(define_insn "*minmax_arithsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 4 "shiftable_operator"
+ [(match_operator:SI 5 "minmax_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ "GET_CODE (operands[1]) != REG
+ || (REGNO(operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO(operands[1]) != ARG_POINTER_REGNUM)"
+ "*
+{
+ enum rtx_code code = GET_CODE (operands[4]);
+
+ operands[5] = gen_rtx (minmax_code (operands[5]), SImode, operands[2],
+ operands[3]);
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%i4%d5\\t%0, %1, %2\", operands);
+ if (which_alternative != 0 || operands[3] != const0_rtx
+ || (code != PLUS && code != MINUS && code != IOR && code != XOR))
+ output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+
+;; Shift and rotation insns
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (31);
+")
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
+ else
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_subsi3 (reg, GEN_INT (32), operands[2]));
+ operands[2] = reg;
+ }
+")
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) % 32);
+")
+
+(define_insn "*shiftsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")]))]
+ ""
+ "mov%?\\t%0, %1%S3")
+
+(define_insn "*shiftsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*shiftsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "mvn%?\\t%0, %1%S3")
+
+(define_insn "*notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+
+;; Unary arithmetic insns
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "rsbs\\t%Q0, %Q1, #0\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "rsb%?\\t%0, %1, #0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*negdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mnf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; abssi2 doesn't really clobber the condition codes if a different register
+;; is being set. To keep things simple, assume during rtl manipulations that
+;; it does, but tell the final scan operator the truth. Similarly for
+;; (neg (abs...))
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "*neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*absdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "abs%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?s\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "*sqrtdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (sqrt:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "sqt%?e\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+;; SIN COS TAN and family are always emulated, so it's probably better
+;; to always call a library function.
+;(define_insn "sinsf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sindf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*sindf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sinxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 0))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "sin%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cossf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosdf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*cosdf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 1))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "cos%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "mvn%?\\t%Q0, %Q1\;mvn%?\\t%R0, %R1"
+[(set_attr "length" "8")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "mvn%?\\t%0, %1")
+
+(define_insn "*notsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_dup 1)))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; Fixed <--> Floating conversion insns
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float:SF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?s\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float:DF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?d\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsixf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float:XF (match_operand:SI 1 "s_register_operand" "r")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "flt%?e\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncxfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+;; Truncation insns
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; Zero and sign extension instructions.
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+"
+[(set_attr "length" "8")])
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%?b\\t%Q0, %1\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")
+ (set_attr "type" "*,load")])
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, %Q0, asr #31\";
+"
+[(set_attr "length" "8")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_dup 2) (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_movhi_bytes (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*zero_extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?h\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(lshiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ emit_insn (gen_andsi3 (operands[0], gen_lowpart (SImode, operands[1]),
+ GEN_INT (255)));
+ DONE;
+ }
+")
+
+(define_insn "*load_extendqisi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldr%?b\\t%0, %1\\t%@ zero_extendqisi2"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0)))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "GET_CODE (operands[1]) != MEM"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
+ "")
+
+(define_insn "*compareqi_eq0"
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z (match_operand:QI 0 "s_register_operand" "r")
+ (const_int 0)))]
+ ""
+ "tst\\t%0, #255"
+[(set_attr "conds" "set")])
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_extendhisi2_mem (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "extendhisi2_mem"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 7)))
+ (set (match_dup 6) (ashift:SI (match_dup 4) (const_int 24)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_insn "*extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?sh\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(ashiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, HImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqihi_insn"
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:HI 0 "s_register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0) (sign_extend:HI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[3] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[2] = gen_rtx (MEM, QImode, operands[3]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[3], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, SImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (sign_extend:SI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[2] = gen_rtx (MEM, QImode, operands[0]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[0], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:SF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:DF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+
+;; Move insns (including loads and stores)
+
+;; XXX Just some ideas about movti.
+;; I don't think these are a good idea on the arm, there just aren't enough
+;; registers
+;;(define_expand "loadti"
+;; [(set (match_operand:TI 0 "s_register_operand" "")
+;; (mem:TI (match_operand:SI 1 "address_operand" "")))]
+;; "" "")
+
+;;(define_expand "storeti"
+;; [(set (mem:TI (match_operand:TI 0 "address_operand" ""))
+;; (match_operand:TI 1 "s_register_operand" ""))]
+;; "" "")
+
+;;(define_expand "movti"
+;; [(set (match_operand:TI 0 "general_operand" "")
+;; (match_operand:TI 1 "general_operand" ""))]
+;; ""
+;; "
+;;{
+;; rtx insn;
+;;
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[1] = copy_to_reg (operands[1]);
+;; if (GET_CODE (operands[0]) == MEM)
+;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]);
+;; else if (GET_CODE (operands[1]) == MEM)
+;; insn = gen_loadti (operands[0], XEXP (operands[1], 0));
+;; else
+;; FAIL;
+;;
+;; emit_insn (insn);
+;; DONE;
+;;}")
+
+;; Recognise garbage generated above.
+
+;;(define_insn ""
+;; [(set (match_operand:TI 0 "general_operand" "=r,r,r,<,>,m")
+;; (match_operand:TI 1 "general_operand" "<,>,m,r,r,r"))]
+;; ""
+;; "*
+;; {
+;; register mem = (which_alternative < 3);
+;; register char *template;
+;;
+;; operands[mem] = XEXP (operands[mem], 0);
+;; switch (which_alternative)
+;; {
+;; case 0: template = \"ldmdb\\t%1!, %M0\"; break;
+;; case 1: template = \"ldmia\\t%1!, %M0\"; break;
+;; case 2: template = \"ldmia\\t%1, %M0\"; break;
+;; case 3: template = \"stmdb\\t%0!, %M1\"; break;
+;; case 4: template = \"stmia\\t%0!, %M1\"; break;
+;; case 5: template = \"stmia\\t%0, %M1\"; break;
+;; }
+;; output_asm_insn (template, operands);
+;; return \"\";
+;; }")
+
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "di_operand" "=r,r,o<>")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r"))]
+ ""
+ "*
+ return (output_move_double (operands));
+"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+ /* CYGNUS LOCAL nickc */
+ if (! ok_integer_or_other (operands[1]))
+ /* END CYGNUS LOCAL */
+ {
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX,
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+ if (CONSTANT_P (operands[1]) && flag_pic)
+ operands[1] = legitimize_pic_address (operands[1], SImode,
+ ((reload_in_progress
+ || reload_completed)
+ ? operands[0] : 0));
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,m")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "! (const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX, 0);
+ DONE;
+")
+
+(define_expand "movaddr"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:DI 1 "address_operand" ""))]
+ ""
+ "")
+
+(define_insn "*movaddr_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:DI 1 "address_operand" "p"))]
+ "reload_completed
+ && (GET_CODE (operands[1]) == LABEL_REF
+ || (GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT))"
+ "adr%?\\t%0, %a1")
+
+/* When generating pic, we need to load the symbol offset into a register.
+ So that the optimizer does not confuse this with a normal symbol load
+ we use an unspec. The offset will be loaded from a constant pool entry,
+ since that is the only type of relocation we can use. */
+
+(define_insn "pic_load_addr"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] 3))]
+ "flag_pic"
+ "ldr%?\\t%0, %a1"
+ [(set_attr "type" "load")])
+
+;; This variant is used for AOF assembly, since it needs to mention the
+;; pic register in the rtl.
+(define_expand "pic_load_addr_based"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "") (match_dup 2)] 3))]
+ "flag_pic"
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_insn "*pic_load_addr_based_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")
+ (match_operand 2 "s_register_operand" "r")] 3))]
+ "flag_pic && operands[2] == pic_offset_table_rtx"
+ "*
+#ifdef AOF_ASSEMBLER
+ operands[1] = aof_pic_entry (operands[1]);
+#endif
+ output_asm_insn (\"ldr%?\\t%0, %a1\", operands);
+ return \"\";
+" [(set_attr "type" "load")])
+
+(define_insn "pic_add_dot_plus_eight"
+ [(set (pc) (label_ref (match_operand 0 "" "")))
+ (set (match_operand 1 "register_operand" "+r")
+ (plus:SI (match_dup 1) (const (plus:SI (pc) (const_int 8)))))]
+ "flag_pic"
+ "add%?\\t%1, %|pc, %1")
+
+;; If copying one reg to another we can set the condition codes according to
+;; its value. Such a move is common after a return from subroutine and the
+;; result is being tested against zero.
+
+(define_insn "*movsi_compare0"
+ [(set (reg:CC 24) (compare:CC (match_operand:SI 1 "s_register_operand" "0,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r") (match_dup 1))]
+ ""
+ "@
+ cmp%?\\t%0, #0
+ sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+;; Subroutine to store a half word from a register into memory.
+;; Operand 0 is the source register (HImode)
+;; Operand 1 is the destination address in a register (SImode)
+
+;; In both this routine and the next, we must be careful not to spill
+;; a memory address of reg+large_const into a separate PLUS insn, since this
+;; can generate unrecognizable rtl.
+
+(define_expand "storehi"
+ [;; store the low byte
+ (set (match_operand 1 "" "") (match_dup 3))
+ ;; extract the high byte
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ ;; store the high byte
+ (set (match_dup 4) (subreg:QI (match_dup 2) 0))] ;explicit subreg safe
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+(define_expand "storehi_bigend"
+ [(set (match_dup 4) (match_dup 3))
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ (set (match_operand 1 "" "") (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+;; Subroutine to store a half word integer constant into memory.
+(define_expand "storeinthi"
+ [(set (match_operand 0 "" "")
+ (subreg:QI (match_operand 1 "" "") 0))
+ (set (match_dup 3) (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ rtx addr = XEXP (operands[0], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[1] = gen_reg_rtx (SImode);
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
+ }
+ }
+
+ operands[3] = change_address (operands[0], QImode, plus_constant (addr, 1));
+ operands[0] = change_address (operands[0], QImode, NULL_RTX);
+}
+")
+
+(define_expand "storehi_single_op"
+ [(set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ "arm_arch4"
+ "
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (arm_arch4)
+ {
+ emit_insn (gen_storehi_single_op (operands[0], operands[1]));
+ DONE;
+ }
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_storeinthi (operands[0], operands[1]));
+ else
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_storehi_bigend (operands[1], operands[0]));
+ else
+ emit_insn (gen_storehi (operands[1], operands[0]));
+ }
+ DONE;
+ }
+ /* Sign extend a constant, and keep it in an SImode reg. */
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
+
+ /* If the constant is already valid, leave it alone. */
+ if (! const_ok_for_arm (val))
+ {
+ /* If setting all the top bits will make the constant
+ loadable in a single instruction, then set them.
+ Otherwise, sign extend the number. */
+
+ if (const_ok_for_arm (~ (val | ~0xffff)))
+ val |= ~0xffff;
+ else if (val & 0x8000)
+ val |= ~0xffff;
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (val)));
+ operands[1] = gen_rtx_SUBREG (HImode, reg, 0);
+ }
+ else if (! arm_arch4)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ for v4 and up architectures because LDRH instructions will
+ be used to access the HI values, and these cannot generate
+ unaligned word access faults in the MMU. */
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ if (TARGET_SHORT_BY_BYTES)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+ rtx reg = gen_reg_rtx (SImode);
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && ((INTVAL(offset) & 1) != 1)
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) & ~3;
+ rtx new;
+
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ if (((INTVAL (offset) & 2) != 0)
+ ^ (BYTES_BIG_ENDIAN ? 1 : 0))
+ {
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrsi3 (reg2, reg, GEN_INT (16)));
+ reg = reg2;
+ }
+ }
+ else
+ emit_insn (gen_movhi_bytes (reg, operands[1]));
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (BYTES_BIG_ENDIAN)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ rtx new;
+
+ if ((INTVAL (offset) & 2) == 2)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) ^ 2;
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ }
+ else
+ {
+ new = gen_rtx_MEM (SImode, XEXP (operands[1], 0));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new)
+ = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_rotated_loadsi (reg, new));
+ }
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else
+ {
+ emit_insn (gen_movhi_bigend (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! const_ok_for_arm (INTVAL (operands[1]))
+ && ! const_ok_for_arm (~INTVAL (operands[1])))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}
+")
+
+(define_insn "rotated_loadsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "offsettable_memory_operand" "o")
+ (const_int 16)))]
+ "! TARGET_SHORT_BY_BYTES"
+ "*
+{
+ rtx ops[2];
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 2));
+ output_asm_insn (\"ldr%?\\t%0, %1\\t%@ load-rotate\", ops);
+ return \"\";
+}"
+[(set_attr "type" "load")])
+
+(define_expand "movhi_bytes"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 6)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_expand "movhi_bigend"
+ [(set (match_dup 2)
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "") 0)
+ (const_int 16)))
+ (set (match_dup 3)
+ (ashiftrt:SI (match_dup 2) (const_int 16)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (subreg:HI (match_dup 3) 0))]
+ ""
+ "
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+")
+
+;; Pattern to recognise insn generated default case above
+;; CYGNUS LOCAL nickc: Store before load to avoid problem with reload.
+(define_insn "*movhi_insn_arch4"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
+ "arm_arch4
+ && ok_integer_or_other (operands[0])
+ && ok_integer_or_other (operands[1])" ;; CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ str%?h\\t%1, %0\\t%@ movhi ;; CYGNUS LOCAL nickc
+ ldr%?h\\t%0, %1\\t%@ movhi" ;; CYGNUS LOCAL nickc
+[(set_attr "type" "*,*,store1,load")]) ;; CYGNUS LOCAL nickc
+;; END CYGNUS LOCAL
+
+(define_insn "*movhi_insn_littleend"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && ! BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL nickc */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi"
+[(set_attr "type" "*,*,load")])
+
+(define_insn "*movhi_insn_bigend"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL NICKC */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi_bigend\;mov%?\\t%0, %0, asr #16"
+[(set_attr "type" "*,*,load")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*loadhi_si_bigend"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "m") 0)
+ (const_int 16)))]
+ "BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES"
+ "ldr%?\\t%0, %1\\t%@ movhi_bigend"
+[(set_attr "type" "load")])
+
+(define_insn "*movhi_bytes"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r")
+ (match_operand:HI 1 "arm_rhs_operand" "rI,K"))]
+ "TARGET_SHORT_BY_BYTES"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi")
+
+
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "reload_memory_operand" "=o")
+ (match_operand:HI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ ""
+ "
+ arm_reload_out_hi (operands);
+ DONE;
+")
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "s_register_operand" "=r")
+ (match_operand:HI 1 "reload_memory_operand" "o")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ "TARGET_SHORT_BY_BYTES"
+ "
+ arm_reload_in_hi (operands);
+ DONE;
+")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_rtx (SUBREG, QImode, reg, 0);
+ }
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+")
+
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "general_operand" "=r,r,r,m")
+ (match_operand:QI 1 "general_operand" "rI,K,m,r"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?b\\t%0, %1
+ str%?b\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+")
+
+(define_insn "*movsf_hard_insn"
+ [(set (match_operand:SF 0 "general_operand" "=f,f,f,m,f,r,r,r,m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mvf%?s\\t%0, %1
+ mnf%?s\\t%0, #%N1
+ ldf%?s\\t%0, %1
+ stf%?s\\t%1, %0
+ str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4
+ stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4,4,8,8,4,4,4")
+ (set_attr "type"
+ "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load,store1")])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -msoft-float.
+
+(define_insn "*movsf_soft_insn"
+ [(set (match_operand:SF 0 "general_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4")
+ (set_attr "type" "*,load,store1")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+")
+
+;; Reloading a df mode value stored in integer regs to memory can require a
+;; scratch reg.
+(define_expand "reload_outdf"
+ [(match_operand:DF 0 "reload_memory_operand" "=o")
+ (match_operand:DF 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
+
+ if (code == REG)
+ operands[2] = XEXP (operands[0], 0);
+ else if (code == POST_INC || code == PRE_DEC)
+ {
+ operands[0] = gen_rtx (SUBREG, DImode, operands[0], 0);
+ operands[1] = gen_rtx (SUBREG, DImode, operands[1], 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ else if (code == PRE_INC)
+ {
+ rtx reg = XEXP (XEXP (operands[0], 0), 0);
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
+ operands[2] = reg;
+ }
+ else if (code == POST_DEC)
+ operands[2] = XEXP (XEXP (operands[0], 0), 0);
+ else
+ emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
+ XEXP (XEXP (operands[0], 0), 1)));
+
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (MEM, DFmode, operands[2]),
+ operands[1]));
+
+ if (code == POST_DEC)
+ emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+
+ DONE;
+}
+")
+
+(define_insn "*movdf_hard_insn"
+ [(set (match_operand:DF 0 "general_operand" "=r,Q,r,m,r,f,f,f,m,!f,!r")
+ (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,fG,H,mF,f,r,f"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ rtx ops[3];
+
+ switch (which_alternative)
+ {
+ case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
+ case 2: case 3: case 4: return output_move_double (operands);
+ case 5: return \"mvf%?d\\t%0, %1\";
+ case 6: return \"mnf%?d\\t%0, #%N1\";
+ case 7: return \"ldf%?d\\t%0, %1\";
+ case 8: return \"stf%?d\\t%1, %0\";
+ case 9: return output_mov_double_fpu_from_arm (operands);
+ case 10: return output_mov_double_arm_from_fpu (operands);
+ }
+}
+"
+[(set_attr "length" "4,4,8,8,8,4,4,4,4,8,8")
+ (set_attr "type"
+"load,store2,*,store2,load,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")])
+
+;; Software floating point version. This is essentially the same as movdi.
+;; Do not use `f' as a constraint to prevent reload from ever trying to use
+;; an `f' reg.
+
+(define_insn "*movdf_soft_insn"
+ [(set (match_operand:DF 0 "soft_df_operand" "=r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "r,mF,r"))]
+ "TARGET_SOFT_FLOAT"
+ "* return output_move_double (operands);"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "")
+
+;; Even when the XFmode patterns aren't enabled, we enable this after
+;; reloading so that we can push floating point registers in the prologue.
+
+(define_insn "*movxf_hard_insn"
+ [(set (match_operand:XF 0 "general_operand" "=f,f,f,m,f,r,r")
+ (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))]
+ "TARGET_HARD_FLOAT && (ENABLE_XF_PATTERNS || reload_completed)"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"mvf%?e\\t%0, %1\";
+ case 1: return \"mnf%?e\\t%0, #%N1\";
+ case 2: return \"ldf%?e\\t%0, %1\";
+ case 3: return \"stf%?e\\t%1, %0\";
+ case 4: return output_mov_long_double_fpu_from_arm (operands);
+ case 5: return output_mov_long_double_arm_from_fpu (operands);
+ case 6: return output_mov_long_double_arm_from_arm (operands);
+ }
+"
+[(set_attr "length" "4,4,4,4,8,8,12")
+ (set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")])
+
+
+;; load- and store-multiple insns
+;; The arm can load/store any set of registers, provided that they are in
+;; ascending order; but that is beyond GCC so stick with what it knows.
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) > 14
+ || REGNO (operands[0]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[1], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P(operands[1]),
+ MEM_IN_STRUCT_P(operands[1]),
+ MEM_SCALAR_P (operands[1]));
+")
+
+;; Load multiple with write-back
+
+(define_insn "*ldmsi_postinc"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (match_dup 1)))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"ldm%?ia\\t%0!, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+;; Ordinary load multiple
+
+(define_insn "*ldmsi"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 2 "s_register_operand" "r")))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"ldm%?ia\\t%0, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != REG
+ || GET_CODE (operands[0]) != MEM
+ || REGNO (operands[1]) > 14
+ || REGNO (operands[1]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[0], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P (operands[0]),
+ MEM_IN_STRUCT_P(operands[0]),
+ MEM_SCALAR_P (operands[0]));
+")
+
+;; Store multiple with write-back
+
+(define_insn "*stmsi_postinc"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (mem:SI (match_dup 1))
+ (match_operand:SI 3 "s_register_operand" "r"))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"stm%?ia\\t%0!, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 5))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Ordinary store multiple
+
+(define_insn "*stmsi"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_DEST (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"stm%?ia\\t%0, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 3))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Move a block of memory if it is word aligned and MORE than 2 words long.
+;; We could let this apply for blocks of less than this, but it clobbers so
+;; many registers that there is then probably a better way.
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (arm_gen_movstrqi (operands))
+ DONE;
+ FAIL;
+")
+
+
+;; Comparison and test insns
+
+(define_expand "cmpsi"
+ [(match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "arm_add_operand" "")]
+ ""
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 0;
+ DONE;
+}
+")
+
+(define_expand "cmpsf"
+ [(match_operand:SF 0 "s_register_operand" "")
+ (match_operand:SF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpdf"
+ [(match_operand:DF 0 "s_register_operand" "")
+ (match_operand:DF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpxf"
+ [(match_operand:XF 0 "s_register_operand" "")
+ (match_operand:XF 1 "fpu_rhs_operand" "")]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L")))]
+ ""
+ "@
+ cmp%?\\t%0, %1
+ cmn%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP 24)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_neg_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (neg:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))))]
+ ""
+ "cmn%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpesfdf_df"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_esfdf"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpsf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_esfdf_df_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_df_esfdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?e\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+; This insn allows redundant compares to be removed by cse, nothing should
+; ever appear in the output file since (set (reg x) (reg x)) is a no-op that
+; is deleted later on. The match_dup will match the mode here, so that
+; mode changes of the condition codes aren't lost by this even though we don't
+; specify what they are.
+
+(define_insn "*deleted_compare"
+ [(set (match_operand 0 "cc_register" "") (match_dup 0))]
+ ""
+ "\\t%@ deleted compare"
+[(set_attr "conds" "set")
+ (set_attr "length" "0")])
+
+
+;; Conditional branch insns
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+;; patterns to match conditional branch insns
+
+(define_insn "*condbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%d1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+(define_insn "*condbranch_reversed"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%D1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+
+; scc insns
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (le:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_insn "*mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ ""
+ "mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+
+;; Conditional move insns
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "arm_not_operand" "")
+ (match_operand:SI 3 "arm_not_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "s_register_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
+ Otherwise, ensure it is a valid FP add operand */
+ if ((! TARGET_HARD_FLOAT)
+ || (! fpu_add_operand (operands[3], SFmode)))
+ operands[3] = force_reg (SFmode, operands[3]);
+
+ ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "s_register_operand" "")
+ (match_operand:DF 3 "fpu_add_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_insn "*movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ ""
+ "@
+ mov%D3\\t%0, %2
+ mvn%D3\\t%0, #%B2
+ mov%d3\\t%0, %1
+ mvn%d3\\t%0, #%B1
+ mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_hard_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:SF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3s\\t%0, %2
+ mnf%D3s\\t%0, #%N2
+ mvf%d3s\\t%0, %1
+ mnf%d3s\\t%0, #%N1
+ mvf%d3s\\t%0, %1\;mvf%D3s\\t%0, %2
+ mvf%d3s\\t%0, %1\;mnf%D3s\\t%0, #%N2
+ mnf%d3s\\t%0, #%N1\;mvf%D3s\\t%0, %2
+ mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_SOFT_FLOAT"
+ "@
+ mov%D3\\t%0, %2
+ mov%d3\\t%0, %1"
+ [(set_attr "conds" "use")])
+
+(define_insn "*movdfcc_insn"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:DF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3d\\t%0, %2
+ mnf%D3d\\t%0, #%N2
+ mvf%d3d\\t%0, %1
+ mnf%d3d\\t%0, #%N1
+ mvf%d3d\\t%0, %1\;mvf%D3d\\t%0, %2
+ mvf%d3d\\t%0, %1\;mnf%D3d\\t%0, #%N2
+ mnf%d3d\\t%0, #%N1\;mvf%D3d\\t%0, %2
+ mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+;; Jump and linkage insns
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%?\\t%l0\";
+}")
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[0], 0)) != REG
+ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_reg"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" "g"))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (operands);
+"
+;; length is worst case, normally it is only two
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_mem"
+ [(call (mem:SI (match_operand 0 "memory_operand" "m"))
+ (match_operand 1 "general_operand" "g"))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call_mem (operands);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "=rf")
+ (call (match_operand 1 "memory_operand" "m")
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[1], 0)) != REG
+ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_reg"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_value_mem"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand 1 "memory_operand" "m"))
+ (match_operand 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 14))]
+ "! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))"
+ "*
+ return output_call_mem (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses
+;; The 'a' causes the operand to be treated as an address, i.e. no '#' output.
+
+(define_insn "*call_symbol"
+ [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "bl%?\\t%a0"
+[(set_attr "type" "call")])
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 14))]
+ "GET_CODE(operands[1]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "bl%?\\t%a1"
+[(set_attr "type" "call")])
+
+;; Often the return insn will be the same as loading from memory, so set attr
+(define_insn "return"
+ [(return)]
+ "USE_RETURN_INSN (FALSE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (NULL, TRUE, FALSE);
+}"
+[(set_attr "type" "load")])
+
+(define_insn "*cond_return"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (return)
+ (pc)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, FALSE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+(define_insn "*cond_return_inverted"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (pc)
+ (return)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, TRUE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+[(set_attr "length" "0")
+ (set_attr "type" "block")])
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "s_register_operand" "") ; index to jump on
+ (match_operand:SI 1 "const_int_operand" "") ; lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; total range
+ (match_operand:SI 3 "" "") ; table label
+ (match_operand:SI 4 "" "")] ; Out of range label
+ ""
+ "
+{
+ rtx reg;
+ if (operands[1] != const0_rtx)
+ {
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_addsi3 (reg, operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = reg;
+ }
+
+ if (! const_ok_for_arm (INTVAL (operands[2])))
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
+ operands[4]));
+ DONE;
+}")
+
+;; The USE in this pattern is needed to tell flow analysis that this is
+;; a CASESI insn. It has no other purpose.
+(define_insn "casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (use (label_ref (match_dup 2)))])]
+ ""
+ "*
+ if (flag_pic)
+ return \"cmp\\t%0, %1\;addls\\t%|pc, %|pc, %0, asl #2\;b\\t%l3\";
+ return \"cmp\\t%0, %1\;ldrls\\t%|pc, [%|pc, %0, asl #2]\;b\\t%l3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "s_register_operand" "r"))]
+ ""
+ "mov%?\\t%|pc, %0\\t%@ indirect jump")
+
+(define_insn "*load_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "memory_operand" "m"))]
+ ""
+ "ldr%?\\t%|pc, %0\\t%@ indirect jump"
+[(set_attr "type" "load")])
+
+;; Misc insns
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "mov%?\\tr0, r0\\t%@ nop")
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ ""
+ "%i1%?\\t%0, %2, %4%S3")
+
+(define_insn "*arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")])))]
+ ""
+ "sub%?\\t%0, %1, %3%S2")
+
+(define_insn "*sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+;; These variants of the above insns can occur if the first operand is the
+;; frame pointer and we eliminate that. This is a kludge, but there doesn't
+;; seem to be a way around it. Most of the predicates have to be null
+;; because the format can be generated part way through reload, so
+;; if we don't match it as soon as it becomes available, reload doesn't know
+;; how to reload pseudos that haven't got hard registers; the constraints will
+;; sort everything out.
+
+(define_insn "*reload_mulsi3"
+ [(set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 2 "" "r"))
+ (match_operand:SI 1 "const_int_operand" "n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"add%?\\t%0, %2, %3%S5\", operands);
+ operands[2] = operands[1];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+; we have no idea how long the add_immediate is, it could be up to 4.
+[(set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)])
+ (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+;; These are similar, but are needed when the mla pattern contains the
+;; eliminated register as operand 3.
+
+(define_insn "*reload_muladdsi"
+ [(set (match_operand:SI 0 "" "=&r,&r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "" "%0,r")
+ (match_operand:SI 2 "" "r,r"))
+ (match_operand:SI 3 "" "r,r"))
+ (match_operand:SI 4 "const_int_operand" "n,n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"mla%?\\t%0, %2, %1, %3\", operands);
+ operands[2] = operands[4];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+[(set_attr "length" "20")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands);
+ return \"\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"mla%?s\\t%0, %3, %4, %0\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+
+
+(define_insn "*and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator 1 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ ""
+ "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+(define_insn "*compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[1]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %2, lsr #31\";
+
+ if (GET_CODE (operands[1]) == GE && operands[3] == const0_rtx)
+ return \"mvn\\t%0, %2\;mov\\t%0, %0, lsr #31\";
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ else if (GET_CODE (operands[5]) == MINUS)
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ else if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*cmp_ite0"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 0))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%2, %3\;cmp%d5\\t%0, %1\",\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\"},
+ {\"cmp\\t%2, %3\;cmn%d5\\t%0, #%n1\", \"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\"},
+ {\"cmn\\t%2, #%n3\;cmp%d5\\t%0, %1\", \"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\"},
+ {\"cmn\\t%2, #%n3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*cmp_ite1"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 1))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\", \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]),
+ reverse_condition (GET_CODE (operands[4])));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %1, asr #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*ifcompare_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")))]
+ ""
+ "@
+ add%d4\\t%0, %2, %3
+ sub%d4\\t%0, %2, #%n3
+ add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1
+ add%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))))]
+ ""
+ "@
+ add%D4\\t%0, %2, %3
+ sub%D4\\t%0, %2, #%n3
+ add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1
+ add%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 9 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))]
+ ""
+ "%I6%d5\\t%0, %1, %2\;%I7%D5\\t%0, %3, %4"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[3] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[5]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[4])
+ && REGNO (operands[4]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == LT)
+ return \"and\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ else if (GET_CODE (operands[6]) == GE)
+ return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ }
+ if (GET_CODE (operands[3]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[3])))
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ return \"ldr%D6\\t%0, %1\";
+ else
+ return \"mov%D6\\t%0, %1\";
+ }
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")))]
+ ""
+ "@
+ %I5%d4\\t%0, %2, %3
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ %I5%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[5] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[3]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[2])
+ && REGNO (operands[2]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == GE)
+ return \"and\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ else if (GET_CODE (operands[6]) == LT)
+ return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ }
+
+ if (GET_CODE (operands[5]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[5])))
+ output_asm_insn (\"cmn\\t%4, #%n5\", operands);
+ else
+ output_asm_insn (\"cmp\\t%4, %5\", operands);
+
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ output_asm_insn (\"ldr%d6\\t%0, %1\", operands);
+ else
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
+ }
+ return \"%I7%D6\\t%0, %2, %3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])))]
+ ""
+ "@
+ %I5%D4\\t%0, %2, %3
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ %I5%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2
+ mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mvn%d4\\t%0, %2
+ mov%D4\\t%0, %1\;mvn%d4\\t%0, %2
+ mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mov%d5\\t%0, %2%S4
+ mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
+ mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])))]
+ ""
+ "@
+ mov%D5\\t%0, %2%S4
+ mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
+ mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 7 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 9 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))]
+ ""
+ "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))]
+ ""
+ "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))]
+ ""
+ "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ rsb%d4\\t%0, %2, #0
+ mov%D4\\t%0, %1\;rsb%d4\\t%0, %2, #0
+ mvn%D4\\t%0, #%B1\;rsb%d4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ rsb%D4\\t%0, %2, #0
+ mov%d4\\t%0, %1\;rsb%D4\\t%0, %2, #0
+ mvn%d4\\t%0, #%B1\;rsb%D4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*arith_adjacentmem"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operand:SI 2 "memory_operand" "m")
+ (match_operand:SI 3 "memory_operand" "m")]))
+ (clobber (match_scratch:SI 4 "=r"))]
+ "adjacent_mem_locations (operands[2], operands[3])"
+ "*
+{
+ rtx ldm[3];
+ rtx arith[4];
+ int val1 = 0, val2 = 0;
+
+ if (REGNO (operands[0]) > REGNO (operands[4]))
+ {
+ ldm[1] = operands[4];
+ ldm[2] = operands[0];
+ }
+ else
+ {
+ ldm[1] = operands[0];
+ ldm[2] = operands[4];
+ }
+ if (GET_CODE (XEXP (operands[2], 0)) != REG)
+ val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
+ if (GET_CODE (XEXP (operands[3], 0)) != REG)
+ val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
+ arith[0] = operands[0];
+ arith[3] = operands[1];
+ if (val1 < val2)
+ {
+ arith[1] = ldm[1];
+ arith[2] = ldm[2];
+ }
+ else
+ {
+ arith[1] = ldm[2];
+ arith[2] = ldm[1];
+ }
+ if (val1 && val2)
+ {
+ rtx ops[3];
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = XEXP (XEXP (operands[2], 0), 0);
+ ops[2] = XEXP (XEXP (operands[2], 0), 1);
+ output_add_immediate (ops);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ else if (val1)
+ {
+ ldm[0] = XEXP (operands[3], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ }
+ else
+ {
+ ldm[0] = XEXP (operands[2], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
+ return \"\";
+}
+"
+[(set_attr "length" "12")
+ (set_attr "type" "load")])
+
+;; the arm can support extended pre-inc instructions
+
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+;; We reject the frame pointer if it occurs anywhere in these patterns since
+;; elimination will cause too many headaches.
+
+(define_insn "*strqi_preinc"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_preinc"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_predec"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_preinc"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadsi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadsi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_preinc"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_predec"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "(!BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*strqi_shiftpreinc"
+ [(set (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_shiftpredec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_shiftpreinc"
+ [(set (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strsi_shiftpredec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpreinc"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpredec"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+; It can also support extended post-inc expressions, but combine doesn't
+; try these....
+; It doesn't seem worth adding peepholes for anything but the most common
+; cases since, unlike combine, the increment must immediately follow the load
+; for this pattern to match.
+; When loading we must watch to see that the base register isn't trampled by
+; the load. In such cases this isn't a post-inc expression.
+
+(define_peephole
+ [(set (mem:QI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?b\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:QI 0 "s_register_operand" "=r")
+ (mem:QI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?b\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (mem:HI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2\\t%@ loadhi")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operand:SI 0 "s_register_operand" "+r")
+ (match_operand:SI 1 "index_operand" "rJ")))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))]
+ ""
+ "str%?b\\t%2, [%0, %1]!")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n")])
+ (match_operand:SI 2 "s_register_operand" "+r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_dup 2) (plus:SI (match_op_dup 4 [(match_dup 0) (match_dup 1)])
+ (match_dup 2)))]
+ ""
+ "str%?b\\t%3, [%2, %0%S4]!")
+
+; This pattern is never tried by combine, so do it as a peephole
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (reg:CC 24)
+ (compare:CC (match_dup 1) (const_int 0)))]
+ ""
+ "sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+; Peepholes to spot possible load- and store-multiples, if the ordering is
+; reversed, check that the memory references aren't volatile.
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 6 "memory_operand" "m"))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (match_operand:SI 7 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 2);
+")
+
+(define_peephole
+ [(set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 6 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_operand:SI 7 "memory_operand" "=m")
+ (match_operand:SI 3 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 2 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 2);
+")
+
+;; A call followed by return can be replaced by restoring the regs and
+;; jumping to the subroutine, provided we aren't passing the address of
+;; any of our local variables. If we call alloca then this is unsafe
+;; since restoring the frame frees the memory, which is not what we want.
+;; Sometimes the return might have been targeted by the final prescan:
+;; if so then emit a proper return insn as well.
+;; Unfortunately, if the frame pointer is required, we don't know if the
+;; current function has any implicit stack pointer adjustments that will
+;; be restored by the return: we can't therefore do a tail call.
+;; Another unfortunate that we can't handle is if current_function_args_size
+;; is non-zero: in this case elimination of the argument pointer assumed
+;; that lr was pushed onto the stack, so eliminating upsets the offset
+;; calculations.
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; As above but when this function is not void, we must be returning the
+;; result of the called subroutine.
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (use (match_dup 0))
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; CYGNUS LOCAL
+;; If calling a subroutine and then jumping back to somewhere else, but not
+;; too far away, then we can set the link register with the branch address
+;; and jump direct to the subroutine. On return from the subroutine
+;; execution continues at the branch; this avoids a prefetch stall.
+;; We use the length attribute (via short_branch ()) to establish whether or
+;; not this is possible, this is the same as the sparc does.
+
+(define_peephole
+ [(parallel[(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 2 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[2]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[2]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l2 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l2 - . -4)\", operands);
+ }
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+
+(define_peephole
+ [(parallel[(set (match_operand:SI 0 "s_register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 3 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[3]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l3 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l3 - . -4)\", operands);
+ }
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+;; END CYGNUS LOCAL
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "")
+ (const_int 0))
+ (neg:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "")
+ (match_operand:SI 4 "arm_rhs_operand" "")]))))
+ (clobber (match_operand:SI 5 "s_register_operand" ""))]
+ ""
+ [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31))))
+ (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 5)))]
+ "")
+
+;; This split can be used because CC_Z mode implies that the following
+;; branch will be an equality, or an unsigned inequality, so the sign
+;; extension is not needed.
+
+(define_split
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z
+ (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "") 0)
+ (const_int 24))
+ (match_operand 1 "const_int_operand" "")))
+ (clobber (match_scratch:SI 2 ""))]
+ "((unsigned HOST_WIDE_INT) INTVAL (operands[1]))
+ == (((unsigned HOST_WIDE_INT) INTVAL (operands[1])) >> 24) << 24"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 0)))
+ (set (reg:CC 24) (compare:CC (match_dup 2) (match_dup 1)))]
+ "
+ operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24);
+")
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ arm_expand_prologue ();
+ DONE;
+")
+
+;; This split is only used during output to reduce the number of patterns
+;; that need assembler instructions adding to them. We allowed the setting
+;; of the conditions to be implicit during rtl generation so that
+;; the conditional compare patterns would work. However this conflicts to
+;; some extent with the conditional data operations, so we have to split them
+;; up again here.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (match_dup 5)))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+;; CYGNUS LOCAL
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")])
+ (match_operand:SI 4 "arm_rhs_operand" "")
+ (not:SI
+ (match_operand:SI 5 "s_register_operand" ""))))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (not:SI (match_dup 5))))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+(define_insn "*cond_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+;; END CYGNUS LOCAL
+
+;; The next two patterns occur when an AND operation is followed by a
+;; scc insn sequence
+
+(define_insn "*sign_extract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"ands\\t%0, %1, %2\", operands);
+ return \"mvnne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*not_signextract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n"))))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"tst\\t%1, %2\", operands);
+ output_asm_insn (\"mvneq\\t%0, #0\", operands);
+ return \"movne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+;; Push multiple registers to the stack. The first register is in the
+;; unspec part of the insn; subsequent registers are in parallel (use ...)
+;; expressions.
+(define_insn "*push_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+ extern int lr_save_eliminated;
+
+ if (lr_save_eliminated)
+ {
+ if (XVECLEN (operands[2], 0) > 1)
+ abort ();
+ return \"\";
+ }
+ strcpy (pattern, \"stmfd\\t%m0!, {%1\");
+ for (i = 1; i < XVECLEN (operands[2], 0); i++)
+ {
+ strcat (pattern, \", %|\");
+ strcat (pattern, reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i),
+ 0))]);
+ }
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "store4")])
+
+;; Similarly for the floating point registers
+(define_insn "*push_fp_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:XF 1 "f_register_operand" "f")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+
+ sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "f_store")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/arm_020428.h b/gcc_arm/config/arm/arm_020428.h
new file mode 100755
index 0000000..2e98c66
--- /dev/null
+++ b/gcc_arm/config/arm/arm_020428.h
@@ -0,0 +1,2309 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999, 2002 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode ((enum attr_prog_mode) arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode \
+ ? GEN_INT ((CUM).call_cookie) \
+ : (NAMED) \
+ ? ((CUM).nregs >= 16 ? 0 : gen_rtx (REG, MODE, (CUM).nregs / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM).nregs < 16 && 16 < (CUM).nregs + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM).nregs / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT . */
+ int call_cookie;
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM).nregs = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) \
+ ? 4 : 0), \
+ (CUM).call_cookie = \
+ (((FNTYPE) && lookup_attribute ("short_call", TYPE_ATTRIBUTES (FNTYPE))) \
+ ? CALL_SHORT \
+ : (((FNTYPE) && lookup_attribute ("long_call", \
+ TYPE_ATTRIBUTES (FNTYPE)))\
+ || TARGET_LONG_CALLS) \
+ ? CALL_LONG \
+ : CALL_NORMAL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nregs += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM).nregs < 16) \
+ (PRETEND_SIZE) = 16 - (CUM).nregs; \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+#else
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+#endif
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+int arm_valid_machine_type_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+#define VALID_MACHINE_TYPE_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_type_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag.
+ This macro is used inside the ENCODE_SECTION macro. */
+#define ARM_ENCODE_CALL_TYPE(decl) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
+ { \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
+ else if (! TREE_PUBLIC (decl)) \
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
+ }
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define ENCODED_SHORT_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == SHORT_CALL_FLAG_CHAR)
+
+#define ENCODED_LONG_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == LONG_CALL_FLAG_CHAR)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragement for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case SHORT_CALL_FLAG_CHAR: return 1; \
+ case LONG_CALL_FLAG_CHAR: return 1; \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This has to be handled by a function because more than part of the
+ ARM backend uses function name prefixes to encode attributes. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+ (VAR) = arm_strip_name_encoding (SYMBOL_NAME)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ asm_fprintf (FILE, "%U%s", arm_strip_name_encoding (NAME))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+char *arm_strip_name_encoding (/* const char * */);
+int arm_is_longcall_p (/* rtx, int, int */);
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+//int s_register_operand (/* register rtx op, enum machine_mode mode */);
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm_990720.h b/gcc_arm/config/arm/arm_990720.h
new file mode 100755
index 0000000..6e4a300
--- /dev/null
+++ b/gcc_arm/config/arm/arm_990720.h
@@ -0,0 +1,2210 @@
+/* Definitions of target machine for GNU compiler, for Acorn RISC Machine.
+ Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Configuration triples for ARM ports work as follows:
+ (This is a bit of a mess and needs some thought)
+ arm-*-*: little endian
+ armel-*-*: little endian
+ armeb-*-*: big endian
+ If a non-embedded environment (ie: "real" OS) is specified, `arm'
+ should default to that used by the OS.
+*/
+
+#ifndef __ARM_H__
+#define __ARM_H__
+
+#define TARGET_CPU_arm2 0x0000
+#define TARGET_CPU_arm250 0x0000
+#define TARGET_CPU_arm3 0x0000
+#define TARGET_CPU_arm6 0x0001
+#define TARGET_CPU_arm600 0x0001
+#define TARGET_CPU_arm610 0x0002
+#define TARGET_CPU_arm7 0x0001
+#define TARGET_CPU_arm7m 0x0004
+#define TARGET_CPU_arm7dm 0x0004
+#define TARGET_CPU_arm7dmi 0x0004
+#define TARGET_CPU_arm700 0x0001
+#define TARGET_CPU_arm710 0x0002
+#define TARGET_CPU_arm7100 0x0002
+#define TARGET_CPU_arm7500 0x0002
+#define TARGET_CPU_arm7500fe 0x1001
+#define TARGET_CPU_arm7tdmi 0x0008
+#define TARGET_CPU_arm8 0x0010
+#define TARGET_CPU_arm810 0x0020
+#define TARGET_CPU_strongarm 0x0040
+#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_strongarm1100 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
+/* Configure didn't specify */
+#define TARGET_CPU_generic 0x8000
+
+enum arm_cond_code
+{
+ ARM_EQ = 0, ARM_NE, ARM_CS, ARM_CC, ARM_MI, ARM_PL, ARM_VS, ARM_VC,
+ ARM_HI, ARM_LS, ARM_GE, ARM_LT, ARM_GT, ARM_LE, ARM_AL, ARM_NV
+};
+extern enum arm_cond_code arm_current_cc;
+extern char *arm_condition_codes[];
+
+#define ARM_INVERSE_CONDITION_CODE(X) ((enum arm_cond_code) (((int)X) ^ 1))
+
+/* This is needed by the tail-calling peepholes */
+extern int frame_pointer_needed;
+
+
+/* Just in case configure has failed to define anything. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT TARGET_CPU_generic
+#endif
+
+/* If the configuration file doesn't specify the cpu, the subtarget may
+ override it. If it doesn't, then default to an ARM6. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_generic
+#undef TARGET_CPU_DEFAULT
+#ifdef SUBTARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT SUBTARGET_CPU_DEFAULT
+#else
+#define TARGET_CPU_DEFAULT TARGET_CPU_arm6
+#endif
+#endif
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm2
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_2__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm6 || TARGET_CPU_DEFAULT == TARGET_CPU_arm610 || TARGET_CPU_DEFAULT == TARGET_CPU_arm7500fe
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_arm9
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
+#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
+#else
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Acpu(arm) -Amachine(arm)"
+#endif
+
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+%(cpp_endian) %(subtarget_cpp_spec)"
+
+/* Set the architecture define -- if -march= is set, then it overrides
+ the -mcpu= setting. */
+#define CPP_CPU_ARCH_SPEC "\
+%{m2:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m3:-D__arm2__ -D__ARM_ARCH_2__} \
+%{m6:-D__arm6__ -D__ARM_ARCH_3__} \
+%{march=arm2:-D__ARM_ARCH_2__} \
+%{march=arm250:-D__ARM_ARCH_2__} \
+%{march=arm3:-D__ARM_ARCH_2__} \
+%{march=arm6:-D__ARM_ARCH_3__} \
+%{march=arm600:-D__ARM_ARCH_3__} \
+%{march=arm610:-D__ARM_ARCH_3__} \
+%{march=arm7:-D__ARM_ARCH_3__} \
+%{march=arm700:-D__ARM_ARCH_3__} \
+%{march=arm710:-D__ARM_ARCH_3__} \
+%{march=arm7100:-D__ARM_ARCH_3__} \
+%{march=arm7500:-D__ARM_ARCH_3__} \
+%{march=arm7500fe:-D__ARM_ARCH_3__} \
+%{march=arm7m:-D__ARM_ARCH_3M__} \
+%{march=arm7dm:-D__ARM_ARCH_3M__} \
+%{march=arm7dmi:-D__ARM_ARCH_3M__} \
+%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
+%{march=arm8:-D__ARM_ARCH_4__} \
+%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm920:-D__ARM_ARCH_4__} \
+%{march=arm920t:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
+%{march=strongarm:-D__ARM_ARCH_4__} \
+%{march=strongarm110:-D__ARM_ARCH_4__} \
+%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=armv2:-D__ARM_ARCH_2__} \
+%{march=armv2a:-D__ARM_ARCH_2__} \
+%{march=armv3:-D__ARM_ARCH_3__} \
+%{march=armv3m:-D__ARM_ARCH_3M__} \
+%{march=armv4:-D__ARM_ARCH_4__} \
+%{march=armv4t:-D__ARM_ARCH_4T__} \
+%{!march=*: \
+ %{mcpu=arm2:-D__ARM_ARCH_2__} \
+ %{mcpu=arm250:-D__ARM_ARCH_2__} \
+ %{mcpu=arm3:-D__ARM_ARCH_2__} \
+ %{mcpu=arm6:-D__ARM_ARCH_3__} \
+ %{mcpu=arm600:-D__ARM_ARCH_3__} \
+ %{mcpu=arm610:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7:-D__ARM_ARCH_3__} \
+ %{mcpu=arm700:-D__ARM_ARCH_3__} \
+ %{mcpu=arm710:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7100:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7500fe:-D__ARM_ARCH_3__} \
+ %{mcpu=arm7m:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dm:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7dmi:-D__ARM_ARCH_3M__} \
+ %{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm8:-D__ARM_ARCH_4__} \
+ %{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm920:-D__ARM_ARCH_4__} \
+ %{mcpu=arm920t:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
+ %{mcpu=strongarm:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm110:-D__ARM_ARCH_4__} \
+ %{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
+"
+
+/* Define __APCS_26__ if the PC also contains the PSR */
+/* This also examines deprecated -m[236] if neither of -mapcs-{26,32} is set,
+ ??? Delete this for 2.9. */
+#define CPP_APCS_PC_SPEC "\
+%{mapcs-32:%{mapcs-26:%e-mapcs-26 and -mapcs-32 may not be used together} \
+ -D__APCS_32__} \
+%{mapcs-26:-D__APCS_26__} \
+%{!mapcs-32: %{!mapcs-26:%{m6:-D__APCS_32__} %{m2:-D__APCS_26__} \
+ %{m3:-D__APCS_26__} %{!m6:%{!m3:%{!m2:%(cpp_apcs_pc_default)}}}}} \
+"
+
+#ifndef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_26__"
+#endif
+
+#define CPP_FLOAT_SPEC "\
+%{msoft-float:\
+ %{mhard-float:%e-msoft-float and -mhard_float may not be used together} \
+ -D__SOFTFP__} \
+%{!mhard-float:%{!msoft-float:%(cpp_float_default)}} \
+"
+
+/* Default is hard float, which doesn't define anything */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+#define CPP_ENDIAN_SPEC "\
+%{mbig-endian: \
+ %{mlittle-endian: \
+ %e-mbig-endian and -mlittle-endian may not be used together} \
+ -D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mlittle-endian:%{!mbig-endian:%(cpp_endian_default)}} \
+"
+
+/* Default is little endian, which doesn't define anything. */
+#define CPP_ENDIAN_DEFAULT_SPEC ""
+
+/* Translate (for now) the old -m[236] option into the appropriate -mcpu=...
+ and -mapcs-xx equivalents.
+ ??? Remove support for this style in 2.9.*/
+#define CC1_SPEC "\
+%{m2:-mcpu=arm2 -mapcs-26} \
+%{m3:-mcpu=arm3 -mapcs-26} \
+%{m6:-mcpu=arm6 -mapcs-32} \
+"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+#define EXTRA_SPECS \
+ { "cpp_cpu_arch", CPP_CPU_ARCH_SPEC }, \
+ { "cpp_cpu_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_apcs_pc", CPP_APCS_PC_SPEC }, \
+ { "cpp_apcs_pc_default", CPP_APCS_PC_DEFAULT_SPEC }, \
+ { "cpp_float", CPP_FLOAT_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_CPP_SPEC ""
+
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION \
+ fputs (" (ARM/generic)", stderr);
+#endif
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+extern int target_flags;
+
+/* The floating point instruction architecture, can be 2 or 3 */
+/* CYGNUS LOCAL nickc/renamed from target_fp_name */
+extern char * target_fpe_name;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if the function prologue (and epilogue) should obey
+ the ARM Procedure Call Standard. */
+#define ARM_FLAG_APCS_FRAME (0x0001)
+
+/* Nonzero if the function prologue should output the function name to enable
+ the post mortem debugger to print a backtrace (very useful on RISCOS,
+ unused on RISCiX). Specifying this flag also enables
+ -fno-omit-frame-pointer.
+ XXX Must still be implemented in the prologue. */
+#define ARM_FLAG_POKE (0x0002)
+
+/* Nonzero if floating point instructions are emulated by the FPE, in which
+ case instruction scheduling becomes very uninteresting. */
+#define ARM_FLAG_FPE (0x0004)
+
+/* Nonzero if destined for an ARM6xx. Takes out bits that assume restoration
+ of condition flags when returning from a branch & link (ie. a function) */
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM6 (0x0008)
+
+/* ********* DEPRECATED ******** */
+#define ARM_FLAG_ARM3 (0x0010)
+
+/* Nonzero if destined for a processor in 32-bit program mode. Takes out bit
+ that assume restoration of the condition flags when returning from a
+ branch and link (ie a function). */
+#define ARM_FLAG_APCS_32 (0x0020)
+
+/* Nonzero if stack checking should be performed on entry to each function
+ which allocates temporary variables on the stack. */
+#define ARM_FLAG_APCS_STACK (0x0040)
+
+/* Nonzero if floating point parameters should be passed to functions in
+ floating point registers. */
+#define ARM_FLAG_APCS_FLOAT (0x0080)
+
+/* Nonzero if re-entrant, position independent code should be generated.
+ This is equivalent to -fpic. */
+#define ARM_FLAG_APCS_REENT (0x0100)
+
+/* Nonzero if the MMU will trap unaligned word accesses, so shorts must be
+ loaded byte-at-a-time. */
+#define ARM_FLAG_SHORT_BYTE (0x0200)
+
+/* Nonzero if all floating point instructions are missing (and there is no
+ emulator either). Generate function calls for all ops in this case. */
+#define ARM_FLAG_SOFT_FLOAT (0x0400)
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define ARM_FLAG_BIG_END (0x0800)
+
+/* Nonzero if we should compile for Thumb interworking. */
+#define ARM_FLAG_THUMB (0x1000)
+
+/* Nonzero if we should have little-endian words even when compiling for
+ big-endian (for backwards compatibility with older versions of GCC). */
+#define ARM_FLAG_LITTLE_WORDS (0x2000)
+
+/* CYGNUS LOCAL */
+/* Nonzero if we need to protect the prolog from scheduling */
+#define ARM_FLAG_NO_SCHED_PRO (0x4000)
+/* END CYGNUS LOCAL */
+
+/* Nonzero if a call to abort should be generated if a noreturn
+function tries to return. */
+#define ARM_FLAG_ABORT_NORETURN (0x8000)
+
+#define TARGET_APCS (target_flags & ARM_FLAG_APCS_FRAME)
+#define TARGET_POKE_FUNCTION_NAME (target_flags & ARM_FLAG_POKE)
+#define TARGET_FPE (target_flags & ARM_FLAG_FPE)
+#define TARGET_6 (target_flags & ARM_FLAG_ARM6)
+#define TARGET_3 (target_flags & ARM_FLAG_ARM3)
+#define TARGET_APCS_32 (target_flags & ARM_FLAG_APCS_32)
+#define TARGET_APCS_STACK (target_flags & ARM_FLAG_APCS_STACK)
+#define TARGET_APCS_FLOAT (target_flags & ARM_FLAG_APCS_FLOAT)
+#define TARGET_APCS_REENT (target_flags & ARM_FLAG_APCS_REENT)
+#define TARGET_SHORT_BY_BYTES (target_flags & ARM_FLAG_SHORT_BYTE)
+#define TARGET_SOFT_FLOAT (target_flags & ARM_FLAG_SOFT_FLOAT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_BIG_END (target_flags & ARM_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_LITTLE_WORDS (target_flags & ARM_FLAG_LITTLE_WORDS)
+/* CYGNUS LOCAL */
+#define TARGET_NO_SCHED_PRO (target_flags & ARM_FLAG_NO_SCHED_PRO)
+/* END CYGNUS LOCAL */
+#define TARGET_ABORT_NORETURN (target_flags & ARM_FLAG_ABORT_NORETURN)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis.
+ Bit 31 is reserved. See riscix.h. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"apcs", ARM_FLAG_APCS_FRAME, "" }, \
+ {"apcs-frame", ARM_FLAG_APCS_FRAME, \
+ "Generate APCS conformant stack frames" }, \
+ {"no-apcs-frame", -ARM_FLAG_APCS_FRAME, "" }, \
+ {"poke-function-name", ARM_FLAG_POKE, \
+ "Store function names in object code" }, \
+ {"fpe", ARM_FLAG_FPE, "" }, \
+ {"6", ARM_FLAG_ARM6, "" }, \
+ {"2", ARM_FLAG_ARM3, "" }, \
+ {"3", ARM_FLAG_ARM3, "" }, \
+ {"apcs-32", ARM_FLAG_APCS_32, \
+ "Use the 32bit version of the APCS" }, \
+ {"apcs-26", -ARM_FLAG_APCS_32, \
+ "Use the 26bit version of the APCS" }, \
+ {"apcs-stack-check", ARM_FLAG_APCS_STACK, "" }, \
+ {"no-apcs-stack-check", -ARM_FLAG_APCS_STACK, "" }, \
+ {"apcs-float", ARM_FLAG_APCS_FLOAT, \
+ "Pass FP arguments in FP registers" }, \
+ {"no-apcs-float", -ARM_FLAG_APCS_FLOAT, "" }, \
+ {"apcs-reentrant", ARM_FLAG_APCS_REENT, \
+ "Generate re-entrant, PIC code" }, \
+ {"no-apcs-reentrant", -ARM_FLAG_APCS_REENT, "" }, \
+ {"short-load-bytes", ARM_FLAG_SHORT_BYTE, \
+ "Load shorts a byte at a time" }, \
+ {"no-short-load-bytes", -ARM_FLAG_SHORT_BYTE, "" }, \
+ {"short-load-words", -ARM_FLAG_SHORT_BYTE, \
+ "Load words a byte at a time" }, \
+ {"no-short-load-words", ARM_FLAG_SHORT_BYTE, "" }, \
+ {"soft-float", ARM_FLAG_SOFT_FLOAT, \
+ "Use library calls to perform FP operations" }, \
+ {"hard-float", -ARM_FLAG_SOFT_FLOAT, \
+ "Use hardware floating point instructions" }, \
+ {"big-endian", ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as big endian" }, \
+ {"little-endian", -ARM_FLAG_BIG_END, \
+ "Assume target CPU is configured as little endian" }, \
+ {"words-little-endian", ARM_FLAG_LITTLE_WORDS, \
+ "Assume big endian bytes, little endian words" }, \
+ {"thumb-interwork", ARM_FLAG_THUMB, \
+ "Support calls between THUMB and ARM instructions sets" }, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB, "" }, \
+ {"abort-on-noreturn", ARM_FLAG_ABORT_NORETURN, \
+ "Generate a call to abort if a noreturn function returns"}, \
+ {"no-abort-on-noreturn", -ARM_FLAG_ABORT_NORETURN, ""}, \
+ /* CYGNUS LOCAL */ \
+ {"sched-prolog", -ARM_FLAG_NO_SCHED_PRO, \
+ "Do not move instructions into a function's prologue" }, \
+ {"no-sched-prolog", ARM_FLAG_NO_SCHED_PRO, "" }, \
+ /* END CYGNUS LOCAL */ \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT } \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", & arm_select[0].string, \
+ "Specify the name of the target CPU" }, \
+ {"arch=", & arm_select[1].string, \
+ "Specify the name of the target architecture" }, \
+ {"tune=", & arm_select[2].string, "" }, \
+ {"fpe=", & target_fpe_name, "" }, \
+ {"fp=", & target_fpe_name, \
+ "Specify the version of the floating point emulator" }, \
+ { "structure-size-boundary=", & structure_size_string, \
+ "Specify the minumum bit alignment of structures" } \
+}
+
+struct arm_cpu_select
+{
+ char * string;
+ char * name;
+ struct processors * processors;
+};
+
+/* This is a magic array. If the user specifies a command line switch
+ which matches one of the entries in TARGET_OPTIONS then the corresponding
+ string pointer will be set to the value specified by the user. */
+extern struct arm_cpu_select arm_select[];
+
+enum prog_mode_type
+{
+ prog_mode26,
+ prog_mode32
+};
+
+/* Recast the program mode class to be the prog_mode attribute */
+#define arm_prog_mode ((enum attr_prog_mode) arm_prgmode)
+
+extern enum prog_mode_type arm_prgmode;
+
+/* What sort of floating point unit do we have? Hardware or software.
+ If software, is it issue 2 or issue 3? */
+enum floating_point_type
+{
+ FP_HARD,
+ FP_SOFT2,
+ FP_SOFT3
+};
+
+/* Recast the floating point class to be the floating point attribute. */
+#define arm_fpu_attr ((enum attr_fpu) arm_fpu)
+
+/* What type of floating point to tune for */
+extern enum floating_point_type arm_fpu;
+
+/* What type of floating point instructions are available */
+extern enum floating_point_type arm_fpu_arch;
+
+/* Default floating point architecture. Override in sub-target if
+ necessary. */
+#define FP_DEFAULT FP_SOFT2
+
+/* Nonzero if the processor has a fast multiply insn, and one that does
+ a 64-bit multiply of two 32-bit values. */
+extern int arm_fast_multiply;
+
+/* Nonzero if this chip supports the ARM Architecture 4 extensions */
+extern int arm_arch4;
+
+/* CYGNUS LOCAL nickc/load scheduling */
+/* Nonzero if this chip can benefit from load scheduling. */
+extern int arm_ld_sched;
+/* END CYGNUS LOCAL */
+
+/* Nonzero if this chip is a StrongARM. */
+extern int arm_is_strong;
+
+/* Nonzero if this chip is a an ARM6 or an ARM7. */
+extern int arm_is_6_or_7;
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+/* The frame pointer register used in gcc has nothing to do with debugging;
+ that is controlled by the APCS-FRAME option. */
+/* Not fully implemented yet */
+/* #define CAN_DEBUG_WITHOUT_FP 1 */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS arm_override_options ()
+
+/* Target machine storage Layout. */
+
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+/* It is far faster to zero extend chars than to sign extend them */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ if (MODE == QImode) \
+ UNSIGNEDP = 1; \
+ else if (MODE == HImode) \
+ UNSIGNEDP = TARGET_SHORT_BY_BYTES != 0; \
+ (MODE) = SImode; \
+ }
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/* This is required to ensure that push insns always push a word. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define for XFmode extended real floating point support.
+ This will automatically cause REAL_ARITHMETIC to be defined. */
+/* For the ARM:
+ I think I have added all the code to make this work. Unfortunately,
+ early releases of the floating point emulation code on RISCiX used a
+ different format for extended precision numbers. On my RISCiX box there
+ is a bug somewhere which causes the machine to lock up when running enquire
+ with long doubles. There is the additional aspect that Norcroft C
+ treats long doubles as doubles and we ought to remain compatible.
+ Perhaps someone with an FPA coprocessor and not running RISCiX would like
+ to try this someday. */
+/* #define LONG_DOUBLE_TYPE_SIZE 96 */
+
+/* Disable XFmode patterns in md file */
+#define ENABLE_XF_PATTERNS 0
+
+/* Define if you don't want extended real, but do want to use the
+ software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+/* See comment above */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered.
+ Most ARM processors are run in little endian mode, so that is the default.
+ If you want to have it run-time selectable, change the definition in a
+ cover file to be TARGET_BIG_ENDIAN. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered.
+ This is always false, even when in big-endian mode. */
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN && ! TARGET_LITTLE_WORDS)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__ARMEB__) && !defined(__ARMWEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define this if most significant word of doubles is the lowest numbered.
+ This is always true, even when in little-endian mode. */
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PARM_BOUNDARY 32
+
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Every structures size must be a multiple of 32 bits. */
+/* This is for compatibility with ARMCC. ARM SDT Reference Manual
+ (ARM DUI 0020D) page 2-20 says "Structures are aligned on word
+ boundaries". */
+#ifndef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 32
+#endif
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Standard register usage. */
+
+/* Register allocation in ARM Procedure Call Standard (as used on RISCiX):
+ (S - saved over call).
+
+ r0 * argument word/integer result
+ r1-r3 argument word
+
+ r4-r8 S register variable
+ r9 S (rfp) register variable (real frame pointer)
+ CYGNUS LOCAL nickc/comment change
+ r10 F S (sl) stack limit (used by -mapcs-stack-check)
+ END CYGNUS LOCAL
+ r11 F S (fp) argument pointer
+ r12 (ip) temp workspace
+ r13 F S (sp) lower end of current stack frame
+ r14 (lr) link address/workspace
+ r15 F (pc) program counter
+
+ f0 floating point result
+ f1-f3 floating point scratch
+
+ f4-f7 S floating point variable
+
+ cc This is NOT a real register, but is used internally
+ to represent things that use or set the condition
+ codes.
+ sfp This isn't either. It is used during rtl generation
+ since the offset between the frame pointer and the
+ auto's isn't known until after register allocation.
+ afp Nor this, we only need this because of non-local
+ goto. Without it fp appears to be used and the
+ elimination code won't get rid of sfp. It tracks
+ fp exactly at all times.
+
+ *: See CONDITIONAL_REGISTER_USAGE */
+
+/* The stack backtrace structure is as follows:
+ fp points to here: | save code pointer | [fp]
+ | return link value | [fp, #-4]
+ | return sp value | [fp, #-8]
+ | return fp value | [fp, #-12]
+ [| saved r10 value |]
+ [| saved r9 value |]
+ [| saved r8 value |]
+ [| saved r7 value |]
+ [| saved r6 value |]
+ [| saved r5 value |]
+ [| saved r4 value |]
+ [| saved r3 value |]
+ [| saved r2 value |]
+ [| saved r1 value |]
+ [| saved r0 value |]
+ [| saved f7 value |] three words
+ [| saved f6 value |] three words
+ [| saved f5 value |] three words
+ [| saved f4 value |] three words
+ r0-r3 are not normally saved in a C function. */
+
+/* The number of hard registers is 16 ARM + 8 FPU + 1 CC + 1 SFP. */
+#define FIRST_PSEUDO_REGISTER 27
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,1,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like.
+ The CC is not preserved over function calls on the ARM 6, so it is
+ easier to assume this for all. SFP is preserved, since FP is. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,1,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* If doing stupid life analysis, avoid a bug causing a return value r0 to be
+ trampled. This effectively reduces the number of available registers by 1.
+ XXX It is a hack, I know.
+ XXX Is this still needed? */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (obey_regdecls) \
+ fixed_regs[0] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ { \
+ int regno; \
+ for (regno = 16; regno < 24; ++regno) \
+ fixed_regs[regno] = call_used_regs[regno] = 1; \
+ } \
+ if (flag_pic) \
+ { \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 0; \
+ } \
+ /* CYGNUS LOCAL */ \
+ else if (! TARGET_APCS_STACK) \
+ { \
+ fixed_regs[10] = 0; \
+ call_used_regs[10] = 0; \
+ } \
+ /* END CYGNUS LOCAL */ \
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the ARM regs are UNITS_PER_WORD bits wide; FPU regs can hold any FP
+ mode. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((REGNO) >= 16 && REGNO != FRAME_POINTER_REGNUM \
+ && (REGNO) != ARG_POINTER_REGNUM) ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ This is TRUE for ARM regs since they can hold anything, and TRUE for FPU
+ regs holding FP. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((GET_MODE_CLASS (MODE) == MODE_CC) ? (REGNO == CC_REGNUM) : \
+ ((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Define this if the program counter is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 13
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 25
+
+/* Define this to be where the real frame pointer is if it is not possible to
+ work out the offset between the frame pointer and the automatic variables
+ until after register allocation has taken place. FRAME_POINTER_REGNUM
+ should point to a special register that we will make sure is eliminated. */
+#define HARD_FRAME_POINTER_REGNUM 11
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable.
+ If we have to have a frame pointer we might as well make use of it.
+ APCS says that the frame pointer does not need to be pushed in leaf
+ functions, or simple tail call functions. */
+/* CYGNUS LOCAL */
+#define FRAME_POINTER_REQUIRED \
+ (current_function_has_nonlocal_label \
+ || (TARGET_APCS && (! leaf_function_p () && ! can_tail_call_optimise ())))
+
+extern int can_tail_call_optimise ();
+/* END CYGNUS LOCAL */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 26
+
+/* The native (Norcroft) Pascal compiler for the ARM passes the static chain
+ as an invisible last argument (possible since varargs don't exist in
+ Pascal), so the following is not true. */
+#define STATIC_CHAIN_REGNUM 8
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 0
+
+/* Internal, so that we don't need to refer to a raw number */
+#define CC_REGNUM 24
+
+/* The order in which register should be allocated. It is good to use ip
+ since no saving is required (though calls clobber it) and it never contains
+ function parameters. It is quite good to use lr since other calls may
+ clobber it anyway. Allocate r0 through r3 in reverse order since r3 is
+ least likely to contain a function parameter; in addition results are
+ returned in r0.
+ */
+#define REG_ALLOC_ORDER \
+{ \
+ 3, 2, 1, 0, 12, 14, 4, 5, \
+ 6, 7, 8, 10, 9, 11, 13, 15, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 24, 25, 26 \
+}
+
+/* Register and constant classes. */
+
+/* Register classes: all ARM regs or all FPU regs---simple! */
+enum reg_class
+{
+ NO_REGS,
+ FPU_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "FPU_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x0000000, /* NO_REGS */ \
+ 0x0FF0000, /* FPU_REGS */ \
+ 0x200FFFF, /* GENERAL_REGS */ \
+ 0x2FFFFFF /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+#define REGNO_REG_CLASS(REGNO) \
+ (((REGNO) < 16 || REGNO == FRAME_POINTER_REGNUM \
+ || REGNO == ARG_POINTER_REGNUM) \
+ ? GENERAL_REGS : (REGNO) == CC_REGNUM \
+ ? NO_REGS : FPU_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description.
+ We only need constraint `f' for FPU_REGS (`r' == GENERAL_REGS). */
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C)=='f' ? FPU_REGS : NO_REGS)
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: immediate arithmetic operand (i.e. 8 bits shifted as required).
+ J: valid indexing constants.
+ K: ~value ok in rhs argument of data operand.
+ L: -value ok in rhs argument of data operand.
+ M: 0..32, or a power of 2 (for shifts, or mult done by shift). */
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? const_ok_for_arm (VALUE) : \
+ (C) == 'J' ? ((VALUE) < 4096 && (VALUE) > -4096) : \
+ (C) == 'K' ? (const_ok_for_arm (~(VALUE))) : \
+ (C) == 'L' ? (const_ok_for_arm (-(VALUE))) : \
+ (C) == 'M' ? (((VALUE >= 0 && VALUE <= 32)) \
+ || (((VALUE) & ((VALUE) - 1)) == 0)) \
+ : 0)
+
+/* For the ARM, `Q' means that this is a memory operand that is just
+ an offset from a register.
+ `S' means any symbol that has the SYMBOL_REF_FLAG set or a CONSTANT_POOL
+ address. This means that the symbol is in the text segment and can be
+ accessed without using a load. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (XEXP (OP, 0))) \
+ : (C) == 'S' ? (optimize > 0 && CONSTANT_ADDRESS_P (OP)) \
+ : 0)
+
+/* Constant letter 'G' for the FPU immediate constants.
+ 'H' means the same constant negated. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(X,C) \
+ ((C) == 'G' ? const_double_rtx_ok_for_fpu (X) \
+ : (C) == 'H' ? neg_const_double_rtx_ok_for_fpu (X) : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) (CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && true_regnum (X) == -1) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* If we need to load shorts byte-at-a-time, then we need a scratch. */
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,X) \
+ (((MODE) == HImode && ! arm_arch4 && TARGET_SHORT_BY_BYTES \
+ && (GET_CODE (X) == MEM \
+ || ((GET_CODE (X) == REG || GET_CODE (X) == SUBREG) \
+ && true_regnum (X) == -1))) \
+ ? GENERAL_REGS : NO_REGS)
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the ARM, we wish to handle large displacements off a base
+ register by splitting the addend across a MOV and the mem insn.
+ This can cut the number of reloads needed. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low, high; \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ low = ((val & 0xf) ^ 0x8) - 0x8; \
+ else if (MODE == SImode || MODE == QImode \
+ || (MODE == SFmode && TARGET_SOFT_FLOAT) \
+ || (MODE == HImode && ! arm_arch4)) \
+ /* Need to be careful, -4096 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xfff) : -((-val) & 0xfff); \
+ else if (MODE == HImode && arm_arch4) \
+ /* Need to be careful, -256 is not a valid offset */ \
+ low = val >= 0 ? (val & 0xff) : -((-val) & 0xff); \
+ else if (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT) \
+ /* Need to be careful, -1024 is not a valid offset */ \
+ low = val >= 0 ? (val & 0x3ff) : -((-val) & 0x3ff); \
+ else \
+ break; \
+ \
+ high = ((((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000); \
+ /* Check for overflow or zero */ \
+ if (low == 0 || high == 0 || (high + low != val)) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem. */ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+ ARM regs are UNITS_PER_WORD bits while FPU regs can hold any FP mode */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((CLASS) == FPU_REGS ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Moves between FPU_REGS and GENERAL_REGS are two memory insns. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ ((((CLASS1) == FPU_REGS && (CLASS2) != FPU_REGS) \
+ || ((CLASS2) == FPU_REGS && (CLASS1) != FPU_REGS)) \
+ ? 20 : 2)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD 1
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+/* The push insns do not do this rounding implicitly. So don't define this. */
+/* #define PUSH_ROUNDING(NPUSHED) (((NPUSHED) + 3) & ~3) */
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the ARM, the caller does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ (GET_MODE_CLASS (TYPE_MODE (VALTYPE)) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, TYPE_MODE (VALTYPE), 16) \
+ : gen_rtx (REG, TYPE_MODE (VALTYPE), 0))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT && TARGET_HARD_FLOAT \
+ ? gen_rtx (REG, MODE, 16) \
+ : gen_rtx (REG, MODE, 0))
+
+/* 1 if N is a possible register number for a function value.
+ On the ARM, only r0 and f0 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) \
+ ((REGNO) == 0 || ((REGNO) == 16) && TARGET_HARD_FLOAT)
+
+/* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+/* CYGNUS LOCAL */
+#define RETURN_IN_MEMORY(TYPE) arm_return_in_memory (TYPE)
+/* END CYGNUS LOCAL */
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
+ other arguments are passed on the stack. If (NAMED == 0) (which happens
+ only in assign_parms, since SETUP_INCOMING_VARARGS is defined), say it is
+ passed in the stack (function_prologue will indeed make it pass in the
+ stack if necessary). */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((NAMED) \
+ ? ((CUM) >= 16 ? 0 : gen_rtx (REG, MODE, (CUM) / 4)) \
+ : 0)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ ((CUM) < 16 && 16 < (CUM) + ((MODE) != BLKmode \
+ ? GET_MODE_SIZE (MODE) \
+ : int_size_in_bytes (TYPE)) \
+ ? 4 - (CUM) / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) ? 4 : 0))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+/* 1 if N is a possible register number for function argument passing.
+ On the ARM, r0-r3 are used to pass args. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= 0 && (REGNO) <= 3)
+
+/* Perform any actions needed for a function that is receiving a variable
+ number of arguments. CUM is as above. MODE and TYPE are the mode and type
+ of the current parameter. PRETEND_SIZE is a variable that should be set to
+ the amount of stack that must be pushed by the prolog to pretend that our
+ caller pushed it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
+ named arg and all anonymous args onto the stack.
+ XXX I know the prologue shouldn't be pushing registers, but it is faster
+ that way. */
+#define SETUP_INCOMING_VARARGS(CUM, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Generate assembly output for the start of a function. */
+#define FUNCTION_PROLOGUE(STREAM, SIZE) \
+ output_func_prologue ((STREAM), (SIZE))
+
+/* Call the function profiler with a given profile label. The Acorn compiler
+ puts this BEFORE the prolog but gcc puts it afterwards. The ``mov ip,lr''
+ seems like a good idea to stick with cc convention. ``prof'' doesn't seem
+ to mind about this! */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+ fprintf(STREAM, "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero.
+
+ On the ARM, the function epilogue recovers the stack pointer from the
+ frame. */
+#define EXIT_IGNORE_STACK 1
+
+/* Generate the assembly code for function exit. */
+#define FUNCTION_EPILOGUE(STREAM, SIZE) \
+ output_func_epilogue ((STREAM), (SIZE))
+
+/* Determine if the epilogue should be output as RTL.
+ You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
+
+/* Definitions for register eliminations.
+
+ This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference.
+
+ We have two registers that can be eliminated on the ARM. First, the
+ arg pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the pseudo frame pointer register can always
+ be eliminated; it is replaced with either the stack or the real frame
+ pointer. */
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ All eliminations are permissible. Note that ARG_POINTER_REGNUM and
+ HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
+ pointer, we must eliminate FRAME_POINTER_REGNUM into
+ HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (((TO) == STACK_POINTER_REGNUM && frame_pointer_needed) ? 0 : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int volatile_func = arm_volatile_func (); \
+ if ((FROM) == ARG_POINTER_REGNUM && (TO) == HARD_FRAME_POINTER_REGNUM)\
+ (OFFSET) = 0; \
+ else if ((FROM) == FRAME_POINTER_REGNUM \
+ && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (current_function_outgoing_args_size \
+ + (get_frame_size () + 3 & ~3)); \
+ else \
+ { \
+ int regno; \
+ int offset = 12; \
+ int saved_hard_reg = 0; \
+ \
+ if (! volatile_func) \
+ { \
+ for (regno = 0; regno <= 10; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ saved_hard_reg = 1, offset += 4; \
+ for (regno = 16; regno <=23; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ offset += 12; \
+ } \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = -offset; \
+ else \
+ { \
+ if (! frame_pointer_needed) \
+ offset -= 16; \
+ if (! volatile_func \
+ && (regs_ever_live[14] || saved_hard_reg)) \
+ offset += 4; \
+ offset += current_function_outgoing_args_size; \
+ (OFFSET) = (get_frame_size () + 3 & ~3) + offset; \
+ } \
+ } \
+}
+
+/* CYGNUS LOCAL */
+/* Special case handling of the location of arguments passed on the stack. */
+#define DEBUGGER_ARG_OFFSET(value, addr) value ? value : arm_debugger_arg_offset (value, addr)
+/* END CYGNUS LOCAL */
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\tldr\t%sr8, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%spc, [%spc, #0]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 16
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c.
+
+ On the ARM, don't allow the pc to be used. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 15 || (REGNO) == FRAME_POINTER_REGNUM \
+ || (REGNO) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] < 15 \
+ || (unsigned) reg_renumber[(REGNO)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[(REGNO)] == ARG_POINTER_REGNUM)
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Maximum number of registers that can appear in a valid memory address.
+ Shifts in addresses can't be by a register. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+/* XXX We can address any constant, eventually... */
+
+#ifdef AOF_ASSEMBLER
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X))
+
+#else
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF \
+ && (CONSTANT_POOL_ADDRESS_P (X) \
+ || (optimize > 0 && SYMBOL_REF_FLAG (X))))
+
+#endif /* AOF_ASSEMBLER */
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the ARM, allow any integer (invalid ones are removed later by insn
+ patterns), nice doubles and symbol_refs which refer to the function's
+ constant pool XXX. */
+#define LEGITIMATE_CONSTANT_P(X) (! label_mentioned_p (X))
+
+/* Symbols in the text segment can be accessed without indirecting via the
+ constant pool; it may take an extra binary operation, but this is still
+ faster than indirecting via memory. Don't do this when not optimizing,
+ since we won't be calculating al of the offsets necessary to do this
+ simplification. */
+/* This doesn't work with AOF syntax, since the string table may be in
+ a different AREA. */
+#ifndef AOF_ASSEMBLER
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ if (optimize > 0 && TREE_CONSTANT (decl) \
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST)) \
+ { \
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd' \
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl)); \
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1; \
+ } \
+}
+#endif
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ REG_OK_FOR_BASE_P(X)
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_OK_FOR_PRE_POST_P(X) \
+ (REGNO (X) < 16 || (unsigned) reg_renumber[REGNO (X)] < 16 \
+ || REGNO (X) == FRAME_POINTER_REGNUM || REGNO (X) == ARG_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == FRAME_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO (X)] == ARG_POINTER_REGNUM)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+/* A C statement (sans semicolon) to jump to LABEL for legitimate index RTXs
+ used by the macro GO_IF_LEGITIMATE_ADDRESS. Floating point indices can
+ only be small constants. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, BASE_REGNO, INDEX, LABEL) \
+do \
+{ \
+ HOST_WIDE_INT range; \
+ enum rtx_code code = GET_CODE (INDEX); \
+ \
+ if (TARGET_HARD_FLOAT && GET_MODE_CLASS (MODE) == MODE_FLOAT) \
+ { \
+ if (code == CONST_INT && INTVAL (INDEX) < 1024 \
+ && INTVAL (INDEX) > -1024 \
+ && (INTVAL (INDEX) & 3) == 0) \
+ goto LABEL; \
+ } \
+ else \
+ { \
+ if (INDEX_REGISTER_RTX_P (INDEX) && GET_MODE_SIZE (MODE) <= 4) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) <= 4 && code == MULT \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx xiop0 = XEXP (INDEX, 0); \
+ rtx xiop1 = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (xiop0) \
+ && power_of_two_operand (xiop1, SImode)) \
+ goto LABEL; \
+ if (INDEX_REGISTER_RTX_P (xiop1) \
+ && power_of_two_operand (xiop0, SImode)) \
+ goto LABEL; \
+ } \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && (code == LSHIFTRT || code == ASHIFTRT \
+ || code == ASHIFT || code == ROTATERT) \
+ && (! arm_arch4 || (MODE) != HImode)) \
+ { \
+ rtx op = XEXP (INDEX, 1); \
+ if (INDEX_REGISTER_RTX_P (XEXP (INDEX, 0)) \
+ && GET_CODE (op) == CONST_INT && INTVAL (op) > 0 \
+ && INTVAL (op) <= 31) \
+ goto LABEL; \
+ } \
+ /* NASTY: Since this limits the addressing of unsigned byte loads */ \
+ range = ((MODE) == HImode || (MODE) == QImode) \
+ ? (arm_arch4 ? 256 : 4095) : 4096; \
+ if (code == CONST_INT && INTVAL (INDEX) < range \
+ && INTVAL (INDEX) > -range) \
+ goto LABEL; \
+ } \
+} while (0)
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck. Allow REG, REG+REG, REG+INDEX,
+ INDEX+REG, REG-INDEX, and non floating SYMBOL_REF to the constant pool.
+ Allow REG-only and AUTINC-REG if handling TImode or HImode. Other symbol
+ refs must be forced though a static cell to ensure addressability. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP(X,0); \
+ rtx xop1 = XEXP(X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_PRE_POST_P (XEXP (X, 0))) \
+ goto LABEL; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On the ARM, try to convert [REG, #BIGCONST]
+ into ADD BASE, REG, #UPPERCONST and [BASE, #VALIDCONST],
+ where VALIDCONST == 0 in case of TImode. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+{ \
+ if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0) && ! symbol_mentioned_p (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (BASE_REGISTER_RTX_P (xop0) && GET_CODE (xop1) == CONST_INT) \
+ { \
+ HOST_WIDE_INT n, low_n; \
+ rtx base_reg, val; \
+ n = INTVAL (xop1); \
+ \
+ if (MODE == DImode || (TARGET_SOFT_FLOAT && MODE == DFmode)) \
+ { \
+ low_n = n & 0x0f; \
+ n &= ~0x0f; \
+ if (low_n > 4) \
+ { \
+ n += 16; \
+ low_n -= 16; \
+ } \
+ } \
+ else \
+ { \
+ low_n = ((MODE) == TImode ? 0 \
+ : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff)); \
+ n -= low_n; \
+ } \
+ base_reg = gen_reg_rtx (SImode); \
+ val = force_operand (gen_rtx (PLUS, SImode, xop0, \
+ GEN_INT (n)), NULL_RTX); \
+ emit_move_insn (base_reg, val); \
+ (X) = (low_n == 0 ? base_reg \
+ : gen_rtx (PLUS, SImode, base_reg, GEN_INT (low_n))); \
+ } \
+ else if (xop0 != XEXP (X, 0) || xop1 != XEXP (x, 1)) \
+ (X) = gen_rtx (PLUS, SImode, xop0, xop1); \
+ } \
+ else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (CONSTANT_P (xop0)) \
+ xop0 = force_reg (SImode, xop0); \
+ if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1)) \
+ xop1 = force_reg (SImode, xop1); \
+ if (xop0 != XEXP (X, 0) || xop1 != XEXP (X, 1)) \
+ (X) = gen_rtx (MINUS, SImode, xop0, xop1); \
+ } \
+ if (flag_pic) \
+ (X) = legitimize_pic_address (OLDX, MODE, NULL_RTX); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if (GET_CODE(ADDR) == PRE_DEC || GET_CODE(ADDR) == POST_DEC \
+ || GET_CODE(ADDR) == PRE_INC || GET_CODE(ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* signed 'char' is most compatible, but RISC OS wants it unsigned.
+ unsigned is probably best, but may break some code. */
+#ifndef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 0
+#endif
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) \
+ ((arm_arch4 || (MODE) == QImode) ? ZERO_EXTEND \
+ : ((BYTES_BIG_ENDIAN && (MODE) == HImode) ? SIGN_EXTEND : NIL))
+
+/* Define this if zero-extension is slow (more than one real instruction).
+ On the ARM, it is more than one instruction only if not fetching from
+ memory. */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+/* This is all wrong. Defining SHIFT_COUNT_TRUNCATED tells combine that
+ code like (X << (Y % 32)) for register X, Y is equivalent to (X << Y).
+ On the arm, Y in a register is used modulo 256 for the shift. Only for
+ rotates is modulo 32 used. */
+/* #define SHIFT_COUNT_TRUNCATED 1 */
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Calling from registers is a massive pain. */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The structure type of the machine dependent info field of insns
+ No uses for this yet. */
+/* #define INSN_MACHINE_INFO struct machine_info */
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ if (const_ok_for_arm (INTVAL (RTX))) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (OUTER_CODE == AND \
+ && const_ok_for_arm (~INTVAL (RTX))) \
+ return -1; \
+ else if ((OUTER_CODE == COMPARE \
+ || OUTER_CODE == PLUS || OUTER_CODE == MINUS) \
+ && const_ok_for_arm (-INTVAL (RTX))) \
+ return -1; \
+ else \
+ return 5; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 6; \
+ case CONST_DOUBLE: \
+ if (const_double_rtx_ok_for_fpu (RTX)) \
+ return (OUTER_CODE) == SET ? 2 : -1; \
+ else if (((OUTER_CODE) == COMPARE || (OUTER_CODE) == PLUS) \
+ && neg_const_double_rtx_ok_for_fpu (RTX)) \
+ return -1; \
+ return(7);
+
+#define ARM_FRAME_RTX(X) \
+ ((X) == frame_pointer_rtx || (X) == stack_pointer_rtx \
+ || (X) == arg_pointer_rtx)
+
+#define DEFAULT_RTX_COSTS(X,CODE,OUTER_CODE) \
+ return arm_rtx_costs (X, CODE, OUTER_CODE);
+
+/* Moves to and from memory are quite expensive */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) 10
+
+/* All address computations that can be done are free, but rtx cost returns
+ the same for practically all of them. So we weight the different types
+ of address here in the order (most pref first):
+ PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
+#define ADDRESS_COST(X) \
+ (10 - ((GET_CODE (X) == MEM || GET_CODE (X) == LABEL_REF \
+ || GET_CODE (X) == SYMBOL_REF) \
+ ? 0 \
+ : ((GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC \
+ || GET_CODE (X) == POST_INC || GET_CODE (X) == POST_DEC) \
+ ? 10 \
+ : (((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS) \
+ ? 6 + (GET_CODE (XEXP (X, 1)) == CONST_INT ? 2 \
+ : ((GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 0))) == 'c' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == '2' \
+ || GET_RTX_CLASS (GET_CODE (XEXP (X, 1))) == 'c') \
+ ? 1 : 0)) \
+ : 4)))))
+
+
+
+/* Try to generate sequences that don't involve branches, we can then use
+ conditional instructions */
+#define BRANCH_COST 4
+
+/* A C statement to update the variable COST based on the relationship
+ between INSN that is dependent on DEP through dependence LINK. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ (COST) = arm_adjust_cost ((INSN), (LINK), (DEP), (COST))
+
+/* Position Independent Code. */
+/* We decide which register to use based on the compilation options and
+ the assembler in use; this is more general than the APCS restriction of
+ using sb (r9) all the time. */
+extern int arm_pic_register;
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. */
+#define PIC_OFFSET_TABLE_REGNUM arm_pic_register
+
+#define FINALIZE_PIC arm_finalize_pic ()
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! symbol_mentioned_p (X))
+
+
+
+/* Condition code information. */
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison.
+ CCFPEmode should be used with floating inequalities,
+ CCFPmode should be used with floating equalities.
+ CC_NOOVmode should be used with SImode integer equalities.
+ CC_Zmode should be used if only the Z flag is set correctly
+ CCmode should be used otherwise. */
+
+#define EXTRA_CC_MODES CC_NOOVmode, CC_Zmode, CC_SWPmode, \
+ CCFPmode, CCFPEmode, CC_DNEmode, CC_DEQmode, CC_DLEmode, \
+ CC_DLTmode, CC_DGEmode, CC_DGTmode, CC_DLEUmode, CC_DLTUmode, \
+ CC_DGEUmode, CC_DGTUmode, CC_Cmode
+
+#define EXTRA_CC_NAMES "CC_NOOV", "CC_Z", "CC_SWP", "CCFP", "CCFPE", \
+ "CC_DNE", "CC_DEQ", "CC_DLE", "CC_DLT", "CC_DGE", "CC_DGT", "CC_DLEU", \
+ "CC_DLTU", "CC_DGEU", "CC_DGTU", "CC_C"
+
+enum machine_mode arm_select_cc_mode ();
+#define SELECT_CC_MODE(OP,X,Y) arm_select_cc_mode ((OP), (X), (Y))
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+enum rtx_code arm_canonicalize_comparison ();
+#define CANONICALIZE_COMPARISON(CODE,OP0,OP1) \
+do \
+{ \
+ if (GET_CODE (OP1) == CONST_INT \
+ && ! (const_ok_for_arm (INTVAL (OP1)) \
+ || (const_ok_for_arm (- INTVAL (OP1))))) \
+ { \
+ rtx const_op = OP1; \
+ CODE = arm_canonicalize_comparison ((CODE), &const_op); \
+ OP1 = const_op; \
+ } \
+} while (0)
+
+#define STORE_FLAG_VALUE 1
+
+/* Define the information needed to generate branch insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *arm_compare_op0, *arm_compare_op1;
+extern int arm_compare_fp;
+
+/* Define the codes that are matched by predicates in arm.c */
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"f_register_operand", {SUBREG, REG}}, \
+ {"arm_add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_add_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_rhs_operand", {SUBREG, REG, CONST_INT}}, \
+ {"fpu_rhs_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"arm_not_operand", {SUBREG, REG, CONST_INT}}, \
+ {"offsettable_memory_operand", {MEM}}, \
+ {"bad_signed_byte_operand", {MEM}}, \
+ {"alignable_memory_operand", {MEM}}, \
+ {"shiftable_operator", {PLUS, MINUS, AND, IOR, XOR}}, \
+ {"minmax_operator", {SMIN, SMAX, UMIN, UMAX}}, \
+ {"shift_operator", {ASHIFT, ASHIFTRT, LSHIFTRT, ROTATERT, MULT}}, \
+ {"di_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE, MEM}}, \
+ {"soft_df_operand", {SUBREG, REG, CONST_DOUBLE, MEM}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"equality_operator", {EQ, NE}}, \
+ {"arm_rhsm_operand", {SUBREG, REG, CONST_INT, MEM}}, \
+ {"const_shift_operand", {CONST_INT}}, \
+ {"index_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_int_operand", {SUBREG, REG, CONST_INT}}, \
+ {"multi_register_push", {PARALLEL}}, \
+ {"cc_register", {REG}}, \
+ {"dominant_cc_register", {REG}},
+
+
+
+/* Gcc puts the pool in the wrong place for ARM, since we can only
+ load addresses a limited distance around the pc. We do some
+ special munging to move the constant pool values to the correct
+ point in the code. */
+#define MACHINE_DEPENDENT_REORG(INSN) arm_reorg ((INSN))
+
+/* The pool is empty, since we have moved everything into the code. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE,X,MODE,ALIGN,LABELNO,JUMPTO) \
+ goto JUMPTO
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char * s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL variation */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL variation */ \
+ } while (0)
+#endif
+
+/* CYGNUS LOCAL */
+/* Output a label definition. */
+#undef ASM_OUTPUT_LABEL
+#define ASM_OUTPUT_LABEL(STREAM,NAME) arm_asm_output_label ((STREAM), (NAME))
+/* END CYGNUS LOCAL */
+
+/* Output a push or a pop instruction (only used when profiling). */
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ fprintf (STREAM,"\tstmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf (STREAM,"\tldmfd\t%ssp!,{%s%s}\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX, reg_names [REGNO])
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Only perform branch elimination (by making instructions conditional) if
+ we're optimising. Otherwise it's of no use anyway. */
+#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
+ if (optimize) \
+ final_prescan_insn (INSN, OPVEC, NOPERANDS)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '?' || (CODE) == '|' || (CODE) == '@')
+/* Output an operand of an instruction. */
+#define PRINT_OPERAND(STREAM, X, CODE) \
+ arm_print_operand (STREAM, X, CODE)
+
+#define ARM_SIGN_EXTEND(x) ((HOST_WIDE_INT) \
+ (HOST_BITS_PER_WIDE_INT <= 32 ? (x) \
+ : (((x) & (unsigned HOST_WIDE_INT) 0xffffffff) | \
+ (((x) & (unsigned HOST_WIDE_INT) 0x80000000) \
+ ? ((~ (HOST_WIDE_INT) 0) \
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) \
+ : 0))))
+
+/* Output the address of an operand. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ int is_minus = GET_CODE (X) == MINUS; \
+ \
+ if (GET_CODE (X) == REG) \
+ fprintf (STREAM, "[%s%s, #0]", REGISTER_PREFIX, \
+ reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == PLUS || is_minus) \
+ { \
+ rtx base = XEXP (X, 0); \
+ rtx index = XEXP (X, 1); \
+ char * base_reg_name; \
+ HOST_WIDE_INT offset = 0; \
+ if (GET_CODE (base) != REG) \
+ { \
+ /* Ensure that BASE is a register (one of them must be). */ \
+ rtx temp = base; \
+ base = index; \
+ index = temp; \
+ } \
+ base_reg_name = reg_names[REGNO (base)]; \
+ switch (GET_CODE (index)) \
+ { \
+ case CONST_INT: \
+ offset = INTVAL (index); \
+ if (is_minus) \
+ offset = -offset; \
+ fprintf (STREAM, "[%s%s, #%d]", REGISTER_PREFIX, \
+ base_reg_name, offset); \
+ break; \
+ \
+ case REG: \
+ fprintf (STREAM, "[%s%s, %s%s%s]", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", \
+ REGISTER_PREFIX, reg_names[REGNO (index)] ); \
+ break; \
+ \
+ case MULT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ASHIFT: \
+ case ROTATERT: \
+ { \
+ fprintf (STREAM, "[%s%s, %s%s%s", REGISTER_PREFIX, \
+ base_reg_name, is_minus ? "-" : "", REGISTER_PREFIX,\
+ reg_names[REGNO (XEXP (index, 0))]); \
+ arm_print_operand (STREAM, index, 'S'); \
+ fputs ("]", STREAM); \
+ break; \
+ } \
+ \
+ default: \
+ abort(); \
+ } \
+ } \
+ else if (GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_INC \
+ || GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_DEC) \
+ { \
+ extern int output_memory_reference_mode; \
+ \
+ if (GET_CODE (XEXP (X, 0)) != REG) \
+ abort (); \
+ \
+ if (GET_CODE (X) == PRE_DEC || GET_CODE (X) == PRE_INC) \
+ fprintf (STREAM, "[%s%s, #%s%d]!", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == PRE_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ else \
+ fprintf (STREAM, "[%s%s], #%s%d", REGISTER_PREFIX, \
+ reg_names[REGNO (XEXP (X, 0))], \
+ GET_CODE (X) == POST_DEC ? "-" : "", \
+ GET_MODE_SIZE (output_memory_reference_mode)); \
+ } \
+ else output_addr_const(STREAM, X); \
+}
+
+/* Handles PIC addr specially */
+#define OUTPUT_INT_ADDR_CONST(STREAM,X) \
+ { \
+ if (flag_pic && GET_CODE(X) == CONST && is_pic(X)) \
+ { \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 0), 0)); \
+ fputs(" - (", STREAM); \
+ output_addr_const(STREAM, XEXP (XEXP (XEXP (X, 0), 1), 0)); \
+ fputs(")", STREAM); \
+ } \
+ else output_addr_const(STREAM, X); \
+ }
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int mi_delta = (DELTA); \
+ char *mi_op = mi_delta < 0 ? "sub" : "add"; \
+ int shift = 0; \
+ int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) \
+ ? 1 : 0); \
+ if (mi_delta < 0) mi_delta = -mi_delta; \
+ while (mi_delta != 0) \
+ { \
+ if (mi_delta & (3 << shift) == 0) \
+ shift += 2; \
+ else \
+ { \
+ fprintf (FILE, "\t%s\t%s%s, %s%s, #%d\n", \
+ mi_op, REGISTER_PREFIX, reg_names[this_regno], \
+ REGISTER_PREFIX, reg_names[this_regno], \
+ mi_delta & (0xff << shift)); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+ mi_delta &= ~(0xff << shift); \
+ shift += 8; \
+ } \
+ } \
+ fputs ("\tb\t", FILE); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fputc ('\n', FILE); \
+ /* CYGNUS LOCAL */ \
+ arm_increase_location (4); \
+ /* END CYGNUS LOCAL */ \
+} while (0)
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, -4)) \
+ : NULL_RTX)
+
+/* Used to mask out junk bits from the return address, such as
+ processor state, interrupt status, condition codes and the like. */
+#define MASK_RETURN_ADDR \
+ /* If we are generating code for an ARM2/ARM3 machine or for an ARM6 \
+ in 26 bit mode, the condition codes must be masked out of the \
+ return address. This does not apply to ARM6 and later processors \
+ when running in 32 bit mode. */ \
+ ((!TARGET_APCS_32) ? (GEN_INT (0x03fffffc)) : (GEN_INT (0xffffffff)))
+
+/* Prototypes for arm.c -- actually, they aren't since the types aren't
+ fully defined yet. */
+
+void arm_override_options (/* void */);
+int use_return_insn (/* void */);
+int const_ok_for_arm (/* HOST_WIDE_INT */);
+int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
+ enum machine_mode */);
+int arm_split_constant (/* enum rtx_code, enum machine_mode,
+ HOST_WIDE_INT, struct rtx_def *,
+ struct rtx_def *, int */);
+enum rtx_code arm_canonicalize_comparison (/* enum rtx_code,
+ struct rtx_def ** */);
+int arm_return_in_memory (/* union tree_node * */);
+int legitimate_pic_operand_p (/* struct rtx_def * */);
+struct rtx_def *legitimize_pic_address (/* struct rtx_def *,
+ enum machine_mode,
+ struct rtx_def * */);
+int is_pic (/* struct rtx_def * */);
+void arm_finalize_pic (/* void */);
+int arm_rtx_costs (/* struct rtx_def *, enum rtx_code, enum rtx_code */);
+int arm_adjust_cost (/* struct rtx_def *, struct rtx_def *,
+ struct rtx_def *, int */);
+int const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int neg_const_double_rtx_ok_for_fpu (/* struct rtx_def * */);
+int s_register_operand (/* struct rtx_def *, enum machine_mode */);
+int f_register_operand (/* struct rtx_def *, enum machine_mode */);
+int reg_or_int_operand (/* struct rtx_def *, enum machine_mode */);
+int reload_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_rhsm_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_add_operand (/* struct rtx_def *, enum machine_mode */);
+int arm_not_operand (/* struct rtx_def *, enum machine_mode */);
+int offsettable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int alignable_memory_operand (/* struct rtx_def *, enum machine_mode */);
+int bad_signed_byte_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_rhs_operand (/* struct rtx_def *, enum machine_mode */);
+int fpu_add_operand (/* struct rtx_def *, enum machine_mode */);
+int power_of_two_operand (/* struct rtx_def *, enum machine_mode */);
+int di_operand (/* struct rtx_def *, enum machine_mode */);
+int soft_df_operand (/* struct rtx_def *, enum machine_mode */);
+int index_operand (/* struct rtx_def *, enum machine_mode */);
+int const_shift_operand (/* struct rtx_def *, enum machine_mode */);
+int shiftable_operator (/* struct rtx_def *, enum machine_mode */);
+int shift_operator (/* struct rtx_def *, enum machine_mode */);
+int equality_operator (/* struct rtx_def *, enum machine_mode */);
+int minmax_operator (/* struct rtx_def *, enum machine_mode */);
+int cc_register (/* struct rtx_def *, enum machine_mode */);
+int dominant_cc_register (/* struct rtx_def *, enum machine_mode */);
+int symbol_mentioned_p (/* struct rtx_def * */);
+int label_mentioned_p (/* struct rtx_def * */);
+enum rtx_code minmax_code (/* struct rtx_def * */);
+int adjacent_mem_locations (/* struct rtx_def *, struct rtx_def * */);
+int load_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int store_multiple_operation (/* struct rtx_def *, enum machine_mode */);
+int load_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_ldm_seq (/* struct rtx_def **, int */);
+int store_multiple_sequence (/* struct rtx_def **, int, int *, int *,
+ HOST_WIDE_INT * */);
+char *emit_stm_seq (/* struct rtx_def **, int */);
+int multi_register_push (/* struct rtx_def *, enum machine_mode */);
+int arm_valid_machine_decl_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+struct rtx_def *arm_gen_load_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+struct rtx_def *arm_gen_store_multiple (/* int, int, struct rtx_def *,
+ int, int, int, int, int */);
+int arm_gen_movstrqi (/* struct rtx_def ** */);
+struct rtx_def *gen_rotated_half_load (/* struct rtx_def * */);
+enum machine_mode arm_select_cc_mode (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+struct rtx_def *gen_compare_reg (/* enum rtx_code, struct rtx_def *,
+ struct rtx_def * */);
+void arm_reload_in_hi (/* struct rtx_def ** */);
+void arm_reload_out_hi (/* struct rtx_def ** */);
+void arm_reorg (/* struct rtx_def * */);
+char *fp_immediate_constant (/* struct rtx_def * */);
+void print_multi_reg (/* FILE *, char *, int, int */);
+char *output_call (/* struct rtx_def ** */);
+char *output_call_mem (/* struct rtx_def ** */);
+char *output_mov_long_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_mov_long_double_arm_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_fpu_from_arm (/* struct rtx_def ** */);
+char *output_mov_double_arm_from_fpu (/* struct rtx_def ** */);
+char *output_move_double (/* struct rtx_def ** */);
+char *output_mov_immediate (/* struct rtx_def ** */);
+char *output_add_immediate (/* struct rtx_def ** */);
+char *arithmetic_instr (/* struct rtx_def *, int */);
+void output_ascii_pseudo_op (/* FILE *, unsigned char *, int */);
+char *output_return_instruction (/* struct rtx_def *, int, int */);
+int arm_volatile_func (/* void */);
+void output_func_prologue (/* FILE *, int */);
+void output_func_epilogue (/* FILE *, int */);
+void arm_expand_prologue (/* void */);
+void arm_print_operand (/* FILE *, struct rtx_def *, int */);
+void final_prescan_insn (/* struct rtx_def *, struct rtx_def **, int */);
+#ifdef AOF_ASSEMBLER
+struct rtx_def *aof_pic_entry (/* struct rtx_def * */);
+void aof_dump_pic_table (/* FILE * */);
+char *aof_text_section (/* void */);
+char *aof_data_section (/* void */);
+void aof_add_import (/* char * */);
+void aof_delete_import (/* char * */);
+void aof_dump_imports (/* FILE * */);
+#endif
+/* CYGNUS LOCAL nickc */
+int ok_integer_or_other ();
+/* END CYGNUS LOCAL */
+
+#endif /* __ARM_H__ */
diff --git a/gcc_arm/config/arm/arm_990720.md b/gcc_arm/config/arm/arm_990720.md
new file mode 100755
index 0000000..807d4cb
--- /dev/null
+++ b/gcc_arm/config/arm/arm_990720.md
@@ -0,0 +1,6488 @@
+;;- Machine description for Advanced RISC Machines' ARM for GNU compiler
+;; Copyright (C) 1991, 93-98, 1999 Free Software Foundation, Inc.
+;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+;; and Martin Simmons (@harleqn.co.uk).
+;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; There are patterns in this file to support XFmode arithmetic.
+;; Unfortunately RISC iX doesn't work well with these so they are disabled.
+;; (See arm.h)
+
+;; UNSPEC Usage:
+;; 0 `sin' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 1 `cos' operation: operand 0 is the result, operand 1 the parameter,
+;; the mode is MODE_FLOAT
+;; 2 `push multiple' operation: operand 0 is the first register. Subsequent
+;; registers are in parallel (use...) expressions.
+;; 3 A symbol that has been treated properly for pic usage, that is, we
+;; will add the pic_register value to it before trying to dereference it.
+;; Note: sin and cos are no-longer used.
+
+;; Attributes
+
+; PROG_MODE attribute is used to determine whether condition codes are
+; clobbered by a call insn: they are if in prog32 mode. This is controlled
+; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
+(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
+
+(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
+
+; Floating Point Unit. If we only have floating point emulation, then there
+; is no point in scheduling the floating point insns. (Well, for best
+; performance we should try and group them together).
+
+(define_attr "fpu" "fpa,fpe2,fpe3" (const (symbol_ref "arm_fpu_attr")))
+
+; LENGTH of an instruction (in bytes)
+(define_attr "length" "" (const_int 4))
+
+; An assembler sequence may clobber the condition codes without us knowing
+(define_asm_attributes
+ [(set_attr "conds" "clob")
+ (set_attr "length" "4")])
+
+; TYPE attribute is used to detect floating point instructions which, if
+; running on a co-processor can run in parallel with other, basic instructions
+; If write-buffer scheduling is enabled then it can also be used in the
+; scheduling of writes.
+
+; Classification of each insn
+; normal any data instruction that doesn't hit memory or fp regs
+; mult a multiply instruction
+; block blockage insn, this blocks all functional units
+; float a floating point arithmetic operation (subject to expansion)
+; fdivx XFmode floating point division
+; fdivd DFmode floating point division
+; fdivs SFmode floating point division
+; fmul Floating point multiply
+; ffmul Fast floating point multiply
+; farith Floating point arithmetic (4 cycle)
+; ffarith Fast floating point arithmetic (2 cycle)
+; float_em a floating point arithmetic operation that is normally emulated
+; even on a machine with an fpa.
+; f_load a floating point load from memory
+; f_store a floating point store to memory
+; f_mem_r a transfer of a floating point register to a real reg via mem
+; r_mem_f the reverse of f_mem_r
+; f_2_r fast transfer float to arm (no memory needed)
+; r_2_f fast transfer arm to float
+; call a subroutine call
+; load any load from memory
+; store1 store 1 word to memory from arm registers
+; store2 store 2 words
+; store3 store 3 words
+; store4 store 4 words
+;
+(define_attr "type"
+ "normal,mult,block,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith,float_em,f_load,f_store,f_mem_r,r_mem_f,f_2_r,r_2_f,call,load,store1,store2,store3,store4"
+ (const_string "normal"))
+
+;; CYGNUS LOCAL load scheduling
+; Load scheduling, set from the arm_ld_sched variable
+; initialised by arm_override_options()
+(define_attr "ldsched" "no,yes"
+ (const (symbol_ref "arm_ld_sched")))
+;; END CYGNUS LOCAL
+
+; condition codes: this one is used by final_prescan_insn to speed up
+; conditionalizing instructions. It saves having to scan the rtl to see if
+; it uses or alters the condition codes.
+
+; USE means that the condition codes are used by the insn in the process of
+; outputting code, this means (at present) that we can't use the insn in
+; inlined branches
+
+; SET means that the purpose of the insn is to set the condition codes in a
+; well defined manner.
+
+; CLOB means that the condition codes are altered in an undefined manner, if
+; they are altered at all
+
+; JUMP_CLOB is used when the conditions are not defined if a branch is taken,
+; but are if the branch wasn't taken; the effect is to limit the branch
+; elimination scanning.
+
+; NOCOND means that the condition codes are neither altered nor affect the
+; output of this insn
+
+(define_attr "conds" "use,set,clob,jump_clob,nocond"
+ (if_then_else (eq_attr "type" "call")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_string "clob") (const_string "nocond"))
+ (const_string "nocond")))
+
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrent modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes" (const (symbol_ref "arm_is_6_or_7")))
+
+(define_attr "write_conflict" "no,yes"
+ (if_then_else (eq_attr "type"
+ "block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
+ (const_string "yes")
+ (const_string "no")))
+
+(define_attr "core_cycles" "single,multi"
+ (if_then_else (eq_attr "type"
+ "normal,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith")
+ (const_string "single")
+ (const_string "multi")))
+
+; The write buffer on some of the arm6 processors is hard to model exactly.
+; There is room in the buffer for up to two addresses and up to eight words
+; of memory, but the two needn't be split evenly. When writing the two
+; addresses are fully pipelined. However, a read from memory that is not
+; currently in the cache will block until the writes have completed.
+; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
+; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
+; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
+; cycle to add as well.
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivx")) 71 69)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivd")) 59 57)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fdivs")) 31 29)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "fmul")) 9 7)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffmul")) 6 4)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "farith")) 4 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "ffarith")) 2 2)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "r_2_f")) 5 3)
+
+(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_2_r")) 1 2)
+
+;; The fpa10 doesn't really have a memory read unit, but it can start to
+;; speculatively execute the instruction in the pipeline, provided the data
+;; is already loaded, so pretend reads have a delay of 2 (and that the
+;; pipeline is infinite.
+
+(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
+ (eq_attr "type" "f_load")) 3 1)
+
+;;--------------------------------------------------------------------
+;; Write buffer
+;;--------------------------------------------------------------------
+;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1,r_mem_f")) 5 3)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 4)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 5)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")) 11 6)
+
+;;--------------------------------------------------------------------
+;; Write blockage unit
+;;--------------------------------------------------------------------
+;; The write_blockage unit models (partially), the fact that reads will stall
+;; until the write buffer empties.
+;; The f_mem_r and r_mem_f could also block, but they are to the stack,
+;; so we don't model them here
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")) 5 5
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 7
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 9
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes") (eq_attr "type" "store4")) 11 11
+ [(eq_attr "write_conflict" "yes")])
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "write_conflict" "yes")) 1 1)
+
+;;--------------------------------------------------------------------
+;; Core unit
+;;--------------------------------------------------------------------
+;; Everything must spend at least one cycle in the core unit
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load")) 3 3)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")) 6 6)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r")) 7 7)
+
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "no") (eq_attr "type" "mult")) 16 16)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "no"))
+ (eq_attr "type" "mult")) 4 4)
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "is_strongarm" "yes"))
+ (eq_attr "type" "mult")) 3 2)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store2") 3 3)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store3") 4 4)
+
+(define_function_unit "core" 1 0 (eq_attr "type" "store4") 5 5)
+
+;; CYGNUS LOCAL
+;; APCS support: When generating code for the software stack checking
+;; model, we need to be able to perform calls to the special exception
+;; handler routines. These routines are *NOT* APCS conforming, so we
+;; do not need to mark any registers as clobbered over the call other
+;; than the lr/r14 modified by the actual BL instruction. Rather than
+;; trying to force the RTL for the existing comparison and call to
+;; achieve this, we simply have a pattern that does the desired job.
+
+;; TODO: This is not ideal since it does not specify all of the
+;; operators involved:
+;; cmp %op0,%op1 cmpsi_insn (compare)
+;; bl%op3 %op2 call_value_symbol (call)
+;; Unfortunately since we do not go through the normal arm_ccfsm_state
+;; processing we cannot use the %? operand replacment for the BL
+;; condition.
+
+(define_insn "cond_call"
+ [(compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "" "X")
+ (match_operator 3 "comparison_operator" [(reg:CC 24) (const_int 0)])
+ (clobber (reg:CC 24))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[2]) == SYMBOL_REF && GET_CODE (operands[3]) == LTU"
+ "cmp\\t%0, %1\;bllt\\t%a2"
+[(set_attr "conds" "clob")
+ (set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; END CYGNUS LOCAL
+
+;; Note: For DImode insns, there is normally no reason why operands should
+;; not be in the same register, what we don't want is for something being
+;; written to partially overlap something that is an input.
+
+;; Addition insns.
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %Q2\;adc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*adddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (plus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "adds\\t%Q0, %Q1, %2\;adc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (PLUS, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*addsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))]
+ ""
+ "@
+ add%?\\t%0, %1, %2
+ sub%?\\t%0, %1, #%n2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_insn "*addsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (const_int 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+;; The next four insns work because they compare the result with one of
+;; the operands, and we know that the use of the condition code is
+;; either GEU or LTU, so we can use the carry flag from the addition
+;; instead of doing the compare a second time.
+(define_insn "*addsi3_compare_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 1)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_compare_op2"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (match_dup 2)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ add%?s\\t%0, %1, %2
+ sub%?s\\t%0, %1, #%n2"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op0"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 0)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*compare_addsi2_op1"
+ [(set (reg:CC_C 24)
+ (compare:CC_C
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (match_dup 1)))]
+ ""
+ "@
+ cmn%?\\t%0, %1
+ cmp%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*addsi3_carryin"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt1"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (ltu:SI (reg:CC_C 24) (const_int 0))))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "*addsi3_carryin_alt3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "adc%?\\t%0, %1, %2"
+[(set_attr "conds" "use")])
+
+(define_insn "incscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (plus:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ add%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+; If a constant is too big to fit in a single instruction then the constant
+; will be pre-loaded into a register taking at least two insns, we might be
+; able to merge it with an add, but it depends on the exact value.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ "!(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2])))"
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
+ "
+{
+ unsigned int val = (unsigned) INTVAL (operands[2]);
+ int i;
+ unsigned int temp;
+
+ /* this code is similar to the approach followed in movsi, but it must
+ generate exactly two insns */
+
+ for (i = 30; i >= 0; i -= 2)
+ {
+ if (val & (3 << i))
+ {
+ i -= 6;
+ if (i < 0) i = 0;
+ if (const_ok_for_arm (temp = (val & ~(255 << i))))
+ {
+ val &= 255 << i;
+ break;
+ }
+ /* we might be able to do this as (larger number - small number) */
+ temp = ((val >> i) & 255) + 1;
+ if (temp > 255 && i < 24)
+ {
+ i += 2;
+ temp = ((val >> i) & 255) + 1;
+ }
+ if (const_ok_for_arm ((temp << i) - val))
+ {
+ i = temp << i;
+ temp = (unsigned) - (int) (i - val);
+ val = i;
+ break;
+ }
+ FAIL;
+ }
+ }
+ /* if we got here, we have found a way of doing it in two instructions.
+ the two constants are in val and temp */
+ operands[2] = GEN_INT ((int)val);
+ operands[3] = GEN_INT ((int)temp);
+}
+")
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (plus:SF (match_operand:SF 1 "s_register_operand" "f,f")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?s\\t%0, %1, %2
+ suf%?s\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f,f")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f,f"))
+ (match_operand:DF 2 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ adf%?d\\t%0, %1, %2
+ suf%?d\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*adddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (plus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "adf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "addxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (plus:XF (match_operand:XF 1 "s_register_operand" "f,f")
+ (match_operand:XF 2 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ adf%?e\\t%0, %1, %2
+ suf%?e\\t%0, %1, #%N2"
+[(set_attr "type" "farith")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0")
+ (match_operand:DI 2 "s_register_operand" "r,0,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %Q2\;sbc\\t%R0, %R1, %R2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "?r,0")
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_di_sesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "r,0")
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (minus:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))
+ (clobber (reg:CC 24))]
+ ""
+ "rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, %2, asr #31"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi_zesidi_zesidi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (minus:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "subs\\t%Q0, %1, %2\;rsc\\t%R0, %1, %1"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2],
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*subsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "reg_or_int_operand" "rI,?n")
+ (match_operand:SI 2 "s_register_operand" "r,r")))]
+ ""
+ "@
+ rsb%?\\t%0, %2, %1
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (minus:SI (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "s_register_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[1]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
+ operands[2], 0);
+ DONE;
+")
+
+(define_insn "*subsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ sub%?s\\t%0, %1, %2
+ rsb%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "decscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])))]
+ ""
+ "@
+ sub%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "*,8")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (minus:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?s\\t%0, %1, %2
+ rsf%?s\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (minus:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f,f"))))]
+ "TARGET_HARD_FLOAT"
+ "@
+ suf%?d\\t%0, %1, %2
+ rsf%?d\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+(define_insn "*subdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (minus:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "suf%?d\\t%0, %1, %2"
+[(set_attr "type" "farith")])
+
+(define_insn "subxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (minus:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ suf%?e\\t%0, %1, %2
+ rsf%?e\\t%0, %2, %1"
+[(set_attr "type" "farith")])
+
+;; Multiplication insns
+
+;; Use `&' and then `0' to prevent the operands 0 and 1 being the same
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0")))]
+ ""
+ "mul%?\\t%0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (mult:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 1 "s_register_operand" "%?r,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r"))]
+ ""
+ "mul%?s\\t%0, %2, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+;; Unnamed templates to match MLA instruction.
+
+(define_insn "*mulsi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI
+ (mult:SI (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0")))]
+ ""
+ "mla%?\\t%0, %2, %1, %3"
+[(set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
+ (plus:SI (mult:SI (match_dup 2) (match_dup 1))
+ (match_dup 3)))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*mulsi3addsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
+ ""
+ "mla%?s\\t%0, %2, %1, %3"
+[(set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "smull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r"))))]
+ "arm_fast_multiply"
+ "umull%?\\t%Q0, %R0, %1, %2"
+[(set_attr "type" "mult")])
+
+(define_insn "smulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "smull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "umulsi3_highpart"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
+ (truncate:SI
+ (lshiftrt:DI
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "arm_fast_multiply"
+ "umull%?\\t%3, %0, %2, %1"
+[(set_attr "type" "mult")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "fml%?s\\t%0, %1, %2"
+[(set_attr "type" "ffmul")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "*muldf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mult:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "muf%?d\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+(define_insn "mulxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mult:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "muf%?e\\t%0, %1, %2"
+[(set_attr "type" "fmul")])
+
+;; Division insns
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f")
+ (div:SF (match_operand:SF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ fdv%?s\\t%0, %1, %2
+ frd%?s\\t%0, %2, %1"
+[(set_attr "type" "fdivs")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG,f")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ dvf%?d\\t%0, %1, %2
+ rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (match_operand:DF 1 "fpu_rhs_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rdf%?d\\t%0, %2, %1"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*divdf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (div:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "dvf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "divxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f,f")
+ (div:XF (match_operand:XF 1 "fpu_rhs_operand" "f,G")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG,f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ dvf%?e\\t%0, %1, %2
+ rdf%?e\\t%0, %2, %1"
+[(set_attr "type" "fdivx")])
+
+;; Modulo insns
+
+(define_insn "modsf3"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (mod:SF (match_operand:SF 1 "s_register_operand" "f")
+ (match_operand:SF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?s\\t%0, %1, %2"
+[(set_attr "type" "fdivs")])
+
+(define_insn "moddf3"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_df"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (match_operand:DF 2 "fpu_rhs_operand" "fG")))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_df_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (match_operand:DF 1 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "*moddf_esfdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (mod:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF
+ (match_operand:SF 2 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "rmf%?d\\t%0, %1, %2"
+[(set_attr "type" "fdivd")])
+
+(define_insn "modxf3"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (mod:XF (match_operand:XF 1 "s_register_operand" "f")
+ (match_operand:XF 2 "fpu_rhs_operand" "fG")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "rmf%?e\\t%0, %1, %2"
+[(set_attr "type" "fdivx")])
+
+;; Boolean and,ior,xor insns
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %Q2\;and%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_sesdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "and%?\\t%Q0, %Q1, %2\;and%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,K,?n")))]
+ ""
+ "@
+ and%?\\t%0, %1, %2
+ bic%?\\t%0, %1, #%B2
+ #"
+[(set_attr "length" "4,4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! (const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (~ INTVAL (operands[2])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*andsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (and:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ and%?s\\t%0, %1, %2
+ bic%?s\\t%0, %1, #%B2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_not_operand" "rI,K"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r"))]
+ ""
+ "@
+ tst%?\\t%0, %1
+ bic%?s\\t%3, %0, #%B1"
+[(set_attr "conds" "set")])
+
+(define_insn "*zeroextractsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))]
+ "INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 32
+ && INTVAL (operands[1]) > 0
+ && INTVAL (operands[1]) + (INTVAL (operands[2]) & 1) <= 8
+ && INTVAL (operands[1]) + INTVAL (operands[2]) <= 32"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"tst%?\\t%0, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")])
+
+;; ??? This pattern does not work because it does not check for start+length
+;; less than or equal to 8. This is necessary for the bitfield to fit within
+;; a single byte. This pattern was deleted Feb 25, 1999 in egcs, so we can
+;; just disabled it for 99r1.
+
+(define_insn "*zeroextractqi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (zero_extract:SI
+ (match_operand:QI 0 "memory_operand" "m")
+ (match_operand 1 "const_int_operand" "n")
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:QI 3 "=r"))]
+ "0 && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) < 8
+ && INTVAL (operands[1]) > 0 && INTVAL (operands[1]) <= 8"
+ "*
+{
+ unsigned int mask = 0;
+ int cnt = INTVAL (operands[1]);
+
+ while (cnt--)
+ mask = (mask << 1) | 1;
+ operands[1] = GEN_INT (mask << INTVAL (operands[2]));
+ output_asm_insn (\"ldr%?b\\t%3, %0\", operands);
+ output_asm_insn (\"tst%?\\t%3, %1\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+;;; ??? This pattern is bogus. If operand3 has bits outside the range
+;;; represented by the bitfield, then this will produce incorrect results.
+;;; Somewhere, the value needs to be truncated. On targets like the m68k,
+;;; which have a real bitfield insert instruction, the truncation happens
+;;; in the bitfield insert instruction itself. Since arm does not have a
+;;; bitfield insert instruction, we would have to emit code here to truncate
+;;; the value before we insert. This loses some of the advantage of having
+;;; this insv pattern, so this pattern needs to be reevalutated.
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "nonmemory_operand" ""))]
+ ""
+ "
+{
+ int start_bit = INTVAL (operands[2]);
+ int width = INTVAL (operands[1]);
+ HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
+ rtx target, subtarget;
+
+ target = operands[0];
+ /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
+ subreg as the final target. */
+ if (GET_CODE (target) == SUBREG)
+ {
+ subtarget = gen_reg_rtx (SImode);
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
+ < GET_MODE_SIZE (SImode))
+ target = SUBREG_REG (target);
+ }
+ else
+ subtarget = target;
+
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Since we are inserting a known constant, we may be able to
+ reduce the number of bits that we have to clear so that
+ the mask becomes simple. */
+ /* ??? This code does not check to see if the new mask is actually
+ simpler. It may not be. */
+ rtx op1 = gen_reg_rtx (SImode);
+ /* ??? Truncate operand3 to fit in the bitfield. See comment before
+ start of this pattern. */
+ HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
+ HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
+
+ emit_insn (gen_andsi3 (op1, operands[0], GEN_INT (~mask2)));
+ emit_insn (gen_iorsi3 (subtarget, op1,
+ GEN_INT (op3_value << start_bit)));
+ }
+ else if (start_bit == 0
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* A Trick, since we are setting the bottom bits in the word,
+ we can shift operand[3] up, operand[0] down, OR them together
+ and rotate the result back again. This takes 3 insns, and
+ the third might be mergable into another op. */
+ /* The shift up copes with the possibility that operand[3] is
+ wider than the bitfield. */
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_iorsi3 (op1, gen_rtx (LSHIFTRT, SImode, operands[0],
+ operands[1]),
+ op0));
+ emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
+ }
+ else if ((width + start_bit == 32)
+ && ! (const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* Similar trick, but slightly less efficient. */
+
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_iorsi3 (subtarget,
+ gen_rtx (LSHIFTRT, SImode, op1,
+ operands[1]), op0));
+ }
+ else
+ {
+ rtx op0 = GEN_INT (mask);
+ rtx op1 = gen_reg_rtx (SImode);
+ rtx op2 = gen_reg_rtx (SImode);
+
+ if (! (const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ /* Mask out any bits in operand[3] that are not needed. */
+ emit_insn (gen_andsi3 (op1, operands[3], op0));
+
+ if (GET_CODE (op0) == CONST_INT
+ && (const_ok_for_arm (mask << start_bit)
+ || const_ok_for_arm (~ (mask << start_bit))))
+ {
+ op0 = GEN_INT (~(mask << start_bit));
+ emit_insn (gen_andsi3 (op2, operands[0], op0));
+ }
+ else
+ {
+ if (GET_CODE (op0) == CONST_INT)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ if (start_bit != 0)
+ op0 = gen_rtx (ASHIFT, SImode, op0, operands[2]);
+
+ emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
+ }
+
+ if (start_bit != 0)
+ op1 = gen_rtx (ASHIFT, SImode, op1, operands[2]);
+
+ emit_insn (gen_iorsi3 (subtarget, op1, op2));
+ }
+
+ if (subtarget != target)
+ {
+ /* If TARGET is still a SUBREG, then it must be wider than a word,
+ so we must be careful only to set the subword we were asked to. */
+ if (GET_CODE (target) == SUBREG)
+ emit_move_insn (target, subtarget);
+ else
+ emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
+ }
+
+ DONE;
+}
+")
+
+;; constants for op 2 will never be given to these patterns.
+(define_insn "*anddi_notdi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0"))
+ (match_operand:DI 1 "s_register_operand" "0,r")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %Q2\;bic%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*anddi_notzesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ bic%?\\t%Q0, %Q1, %2
+ bic%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*anddi_notsesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (and:DI (not:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r")))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "bic%?\\t%Q0, %Q1, %2\;bic%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "andsi_notsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2")
+
+(define_insn "andsi_not_shiftsi_si"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM")]))
+ (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "bic%?\\t%0, %1, %2%S4")
+
+(define_insn "*andsi_notsi_si_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (not:SI (match_dup 2)) (match_dup 1)))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*andsi_notsi_si_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (and:SI (not:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "bic%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ (ior:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "r")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %Q2\;orr%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*iordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%?\\t%Q0, %Q1, %2
+ orr%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*iordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (ior:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "orr%?\\t%Q0, %Q1, %2\;orr%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1],
+ (reload_in_progress || reload_completed
+ ? 0 : preserve_subexpressions_p ()));
+ DONE;
+ }
+")
+
+(define_insn "*iorsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int_operand" "rI,?n")))]
+ ""
+ "@
+ orr%?\\t%0, %1, %2
+ #"
+[(set_attr "length" "4,16")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "! const_ok_for_arm (INTVAL (operands[2]))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
+ operands[1], 0);
+ DONE;
+")
+
+(define_insn "*iorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*iorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (ior:SI (match_operand:SI 1 "s_register_operand" "%r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "orr%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (match_operand:DI 1 "s_register_operand" "%0,0")
+ (match_operand:DI 2 "s_register_operand" "r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %Q2\;eor%?\\t%R0, %R1, %R2"
+[(set_attr "length" "8")])
+
+(define_insn "*xordi_zesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (zero_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ eor%?\\t%Q0, %Q1, %2
+ eor%?\\t%Q0, %Q1, %2\;mov%?\\t%R0, %R1"
+[(set_attr "length" "4,8")])
+
+(define_insn "*xordi_sesidi_di"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (xor:DI (sign_extend:DI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "eor%?\\t%Q0, %Q1, %2\;eor%?\\t%R0, %R1, %2, asr #31"
+[(set_attr "length" "8")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")))]
+ ""
+ "eor%?\\t%0, %1, %2")
+
+(define_insn "*xorsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (xor:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "eor%?s\\t%0, %1, %2"
+[(set_attr "conds" "set")])
+
+(define_insn "*xorsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (xor:SI (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "teq%?\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
+;; (NOT D) we can sometimes merge the final NOT into one of the following
+;; insns
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (not:SI (match_operand:SI 2 "arm_rhs_operand" "rI")))
+ (match_operand:SI 3 "arm_rhs_operand" "rI")))
+ (clobber (match_operand:SI 4 "s_register_operand" "=r"))]
+ ""
+ [(set (match_dup 4) (and:SI (ior:SI (match_dup 1) (match_dup 2))
+ (not:SI (match_dup 3))))
+ (set (match_dup 0) (not:SI (match_dup 4)))]
+ ""
+)
+
+(define_insn "*andsi_iorsi3_notsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r")
+ (and:SI (ior:SI (match_operand:SI 1 "s_register_operand" "r,r,0")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI"))
+ (not:SI (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI"))))]
+ ""
+ "orr%?\\t%0, %1, %2\;bic%?\\t%0, %0, %3"
+[(set_attr "length" "8")])
+
+
+
+;; Minimum and maximum insns
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movlt\\t%0, %2
+ cmp\\t%1, %2\;movge\\t%0, %1
+ cmp\\t%1, %2\;movge\\t%0, %1\;movlt\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (smin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movge\\t%0, %2
+ cmp\\t%1, %2\;movlt\\t%0, %1
+ cmp\\t%1, %2\;movlt\\t%0, %1\;movge\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "umaxsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umax:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcc\\t%0, %2
+ cmp\\t%1, %2\;movcs\\t%0, %1
+ cmp\\t%1, %2\;movcs\\t%0, %1\;movcc\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "uminsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (umin:SI (match_operand:SI 1 "s_register_operand" "0,r,?r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%1, %2\;movcs\\t%0, %2
+ cmp\\t%1, %2\;movcc\\t%0, %1
+ cmp\\t%1, %2\;movcc\\t%0, %1\;movcs\\t%0, %2"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*store_minmaxsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (match_operator:SI 3 "minmax_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ operands[3] = gen_rtx (minmax_code (operands[3]), SImode, operands[1],
+ operands[2]);
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"str%d3\\t%1, %0\", operands);
+ output_asm_insn (\"str%D3\\t%2, %0\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")
+ (set_attr "type" "store1")])
+
+; Reject the frame pointer in operand[1], since reloading this after
+; it has been eliminated can cause carnage.
+(define_insn "*minmax_arithsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 4 "shiftable_operator"
+ [(match_operator:SI 5 "minmax_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ "GET_CODE (operands[1]) != REG
+ || (REGNO(operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO(operands[1]) != ARG_POINTER_REGNUM)"
+ "*
+{
+ enum rtx_code code = GET_CODE (operands[4]);
+
+ operands[5] = gen_rtx (minmax_code (operands[5]), SImode, operands[2],
+ operands[3]);
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%i4%d5\\t%0, %1, %2\", operands);
+ if (which_alternative != 0 || operands[3] != const0_rtx
+ || (code != PLUS && code != MINUS && code != IOR && code != XOR))
+ output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands);
+ return \"\";
+}
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+
+;; Shift and rotation insns
+
+(define_expand "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (31);
+")
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ {
+ emit_insn (gen_movsi (operands[0], const0_rtx));
+ DONE;
+ }
+")
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "reg_or_int_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT)
+ operands[2] = GEN_INT ((32 - INTVAL (operands[2])) % 32);
+ else
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_subsi3 (reg, GEN_INT (32), operands[2]));
+ operands[2] = reg;
+ }
+")
+
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "arm_rhs_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (operands[2])) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) % 32);
+")
+
+(define_insn "*shiftsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")]))]
+ ""
+ "mov%?\\t%0, %1%S3")
+
+(define_insn "*shiftsi3_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 3 [(match_dup 1) (match_dup 2)]))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*shiftsi3_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mov%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "mvn%?\\t%0, %1%S3")
+
+(define_insn "*notsi_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*not_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+
+;; Unary arithmetic insns
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "rsbs\\t%Q0, %Q1, #0\;rsc\\t%R0, %R1, #0"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "rsb%?\\t%0, %1, #0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*negdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (neg:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "mnf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "negxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (neg:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mnf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; abssi2 doesn't really clobber the condition codes if a different register
+;; is being set. To keep things simple, assume during rtl manipulations that
+;; it does, but tell the final scan operator the truth. Similarly for
+;; (neg (abs...))
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (abs:SI (match_operand:SI 1 "s_register_operand" "0,r")))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsblt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "*neg_abssi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,&r")
+ (neg:SI (abs:SI (match_operand:SI 1 "s_register_operand" "0,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "@
+ cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
+ eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
+[(set_attr "conds" "clob,*")
+ (set_attr "length" "8")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "*absdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (abs:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "abs%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "absxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (abs:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "abs%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?s\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "*sqrtdf_esfdf"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (sqrt:DF (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "sqt%?d\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+(define_insn "sqrtxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (sqrt:XF (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "sqt%?e\\t%0, %1"
+[(set_attr "type" "float_em")])
+
+;; SIN COS TAN and family are always emulated, so it's probably better
+;; to always call a library function.
+;(define_insn "sinsf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sindf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*sindf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 0))]
+; "TARGET_HARD_FLOAT"
+; "sin%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "sinxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 0))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "sin%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cossf2"
+; [(set (match_operand:SF 0 "s_register_operand" "=f")
+; (unspec:SF [(match_operand:SF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?s\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosdf2"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(match_operand:DF 1 "s_register_operand" "f")] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "*cosdf_esfdf"
+; [(set (match_operand:DF 0 "s_register_operand" "=f")
+; (unspec:DF [(float_extend:DF
+; (match_operand:SF 1 "s_register_operand" "f"))] 1))]
+; "TARGET_HARD_FLOAT"
+; "cos%?d\\t%0, %1"
+;[(set_attr "type" "float_em")])
+;
+;(define_insn "cosxf2"
+; [(set (match_operand:XF 0 "s_register_operand" "=f")
+; (unspec:XF [(match_operand:XF 1 "s_register_operand" "f")] 1))]
+; "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+; "cos%?e\\t%0, %1"
+;[(set_attr "type" "float_em")])
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
+ (not:DI (match_operand:DI 1 "s_register_operand" "?r,0")))]
+ ""
+ "mvn%?\\t%Q0, %Q1\;mvn%?\\t%R0, %R1"
+[(set_attr "length" "8")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "mvn%?\\t%0, %1")
+
+(define_insn "*notsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_dup 1)))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+(define_insn "*notsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "mvn%?s\\t%0, %1"
+[(set_attr "conds" "set")])
+
+;; Fixed <--> Floating conversion insns
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float:SF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?s\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float:DF (match_operand:SI 1 "s_register_operand" "r")))]
+ "TARGET_HARD_FLOAT"
+ "flt%?d\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "floatsixf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float:XF (match_operand:SI 1 "s_register_operand" "r")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "flt%?e\\t%0, %1"
+[(set_attr "type" "r_2_f")])
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+(define_insn "fix_truncxfsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (fix:SI (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "fix%?z\\t%0, %1"
+[(set_attr "type" "f_2_r")])
+
+;; Truncation insns
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfsf2"
+ [(set (match_operand:SF 0 "s_register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?s\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "truncxfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_truncate:DF
+ (match_operand:XF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+;; Zero and sign extension instructions.
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, #0\";
+"
+[(set_attr "length" "8")])
+
+(define_insn "zero_extendqidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "@
+ and%?\\t%Q0, %1, #255\;mov%?\\t%R0, #0
+ ldr%?b\\t%Q0, %1\;mov%?\\t%R0, #0"
+[(set_attr "length" "8")
+ (set_attr "type" "*,load")])
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "r")))]
+ ""
+ "*
+ if (REGNO (operands[1]) != REGNO (operands[0]) + (WORDS_BIG_ENDIAN ? 1 : 0))
+ output_asm_insn (\"mov%?\\t%Q0, %1\", operands);
+ return \"mov%?\\t%R0, %Q0, asr #31\";
+"
+[(set_attr "length" "8")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_dup 2) (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_movhi_bytes (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*zero_extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?h\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(lshiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ emit_insn (gen_andsi3 (operands[0], gen_lowpart (SImode, operands[1]),
+ GEN_INT (255)));
+ DONE;
+ }
+")
+
+(define_insn "*load_extendqisi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldr%?b\\t%0, %1\\t%@ zero_extendqisi2"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (subreg:QI (match_operand:SI 1 "" "") 0)))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "GET_CODE (operands[1]) != MEM"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (and:SI (match_dup 2) (const_int 255)))]
+ "")
+
+(define_insn "*compareqi_eq0"
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z (match_operand:QI 0 "s_register_operand" "r")
+ (const_int 0)))]
+ ""
+ "tst\\t%0, #255"
+[(set_attr "conds" "set")])
+
+(define_expand "extendhisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:HI 1 "nonimmediate_operand" "")
+ (const_int 16)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 16)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ here because the insn below will generate an LDRH instruction
+ rather than an LDR instruction, so we cannot get an unaligned
+ word access. */
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_SIGN_EXTEND (SImode, operands[1])));
+ DONE;
+ }
+
+ if (TARGET_SHORT_BY_BYTES && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_extendhisi2_mem (operands[0], operands[1]));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "extendhisi2_mem"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 7)))
+ (set (match_dup 6) (ashift:SI (match_dup 4) (const_int 24)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_insn "*extendhisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "ldr%?sh\\t%0, %1"
+[(set_attr "type" "load")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (ashiftrt:SI (match_dup 2) (const_int 16)))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operator:SI 3 "shiftable_operator"
+ [(sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
+ (match_operand:SI 4 "s_register_operand" "")]))
+ (clobber (match_operand:SI 2 "s_register_operand" ""))]
+ "! arm_arch4"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0)
+ (match_op_dup 3
+ [(ashiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
+ "
+{
+ if ((operands[1] = gen_rotated_half_load (operands[1])) == NULL)
+ FAIL;
+}")
+
+(define_expand "extendqihi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, HImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqihi_insn"
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:HI 0 "s_register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0) (sign_extend:HI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[3] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[2] = gen_rtx (MEM, QImode, operands[3]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[3], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_expand "extendqisi2"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "general_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "s_register_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ ""
+ "
+{
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx (SET, VOIDmode, operands[0],
+ gen_rtx (SIGN_EXTEND, SImode, operands[1])));
+ DONE;
+ }
+ if (! s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+; Rather than restricting all byte accesses to memory addresses that ldrsb
+; can handle, we fix up the ones that ldrsb can't grok with a split.
+(define_insn "*extendqisi_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ "arm_arch4"
+ "*
+ /* If the address is invalid, this will split the instruction into two. */
+ if (bad_signed_byte_operand(operands[1], QImode))
+ return \"#\";
+ return \"ldr%?sb\\t%0, %1\";
+"
+[(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "bad_signed_byte_operand" "")))]
+ "arm_arch4 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (sign_extend:SI (match_dup 2)))]
+ "
+ {
+ HOST_WIDE_INT offset;
+
+ operands[2] = gen_rtx (MEM, QImode, operands[0]);
+ MEM_COPY_ATTRIBUTES (operands[2], operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+ operands[1] = XEXP (operands[1], 0);
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
+ {
+ HOST_WIDE_INT low = (offset > 0
+ ? (offset & 0xff) : -((-offset) & 0xff));
+ XEXP (operands[2], 0) = plus_constant (operands[0], low);
+ operands[1] = plus_constant (XEXP (operands[1], 0), offset - low);
+ }
+ /* Ensure the sum is in correct canonical form */
+ else if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) != CONST_INT
+ && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ operands[1] = gen_rtx (PLUS, GET_MODE (operands[1]),
+ XEXP (operands[1], 1), XEXP (operands[1], 0));
+ }
+")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "s_register_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mvf%?d\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extendsfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:SF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+(define_insn "extenddfxf2"
+ [(set (match_operand:XF 0 "s_register_operand" "=f")
+ (float_extend:XF (match_operand:DF 1 "s_register_operand" "f")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "mvf%?e\\t%0, %1"
+[(set_attr "type" "ffarith")])
+
+
+;; Move insns (including loads and stores)
+
+;; XXX Just some ideas about movti.
+;; I don't think these are a good idea on the arm, there just aren't enough
+;; registers
+;;(define_expand "loadti"
+;; [(set (match_operand:TI 0 "s_register_operand" "")
+;; (mem:TI (match_operand:SI 1 "address_operand" "")))]
+;; "" "")
+
+;;(define_expand "storeti"
+;; [(set (mem:TI (match_operand:TI 0 "address_operand" ""))
+;; (match_operand:TI 1 "s_register_operand" ""))]
+;; "" "")
+
+;;(define_expand "movti"
+;; [(set (match_operand:TI 0 "general_operand" "")
+;; (match_operand:TI 1 "general_operand" ""))]
+;; ""
+;; "
+;;{
+;; rtx insn;
+;;
+;; if (GET_CODE (operands[0]) == MEM && GET_CODE (operands[1]) == MEM)
+;; operands[1] = copy_to_reg (operands[1]);
+;; if (GET_CODE (operands[0]) == MEM)
+;; insn = gen_storeti (XEXP (operands[0], 0), operands[1]);
+;; else if (GET_CODE (operands[1]) == MEM)
+;; insn = gen_loadti (operands[0], XEXP (operands[1], 0));
+;; else
+;; FAIL;
+;;
+;; emit_insn (insn);
+;; DONE;
+;;}")
+
+;; Recognise garbage generated above.
+
+;;(define_insn ""
+;; [(set (match_operand:TI 0 "general_operand" "=r,r,r,<,>,m")
+;; (match_operand:TI 1 "general_operand" "<,>,m,r,r,r"))]
+;; ""
+;; "*
+;; {
+;; register mem = (which_alternative < 3);
+;; register char *template;
+;;
+;; operands[mem] = XEXP (operands[mem], 0);
+;; switch (which_alternative)
+;; {
+;; case 0: template = \"ldmdb\\t%1!, %M0\"; break;
+;; case 1: template = \"ldmia\\t%1!, %M0\"; break;
+;; case 2: template = \"ldmia\\t%1, %M0\"; break;
+;; case 3: template = \"stmdb\\t%0!, %M1\"; break;
+;; case 4: template = \"stmia\\t%0!, %M1\"; break;
+;; case 5: template = \"stmia\\t%0, %M1\"; break;
+;; }
+;; output_asm_insn (template, operands);
+;; return \"\";
+;; }")
+
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "di_operand" "=r,r,o<>")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r"))]
+ ""
+ "*
+ return (output_move_double (operands));
+"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+ /* CYGNUS LOCAL nickc */
+ if (! ok_integer_or_other (operands[1]))
+ /* END CYGNUS LOCAL */
+ {
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX,
+ (reload_in_progress || reload_completed ? 0
+ : preserve_subexpressions_p ()));
+ DONE;
+ }
+ if (CONSTANT_P (operands[1]) && flag_pic)
+ operands[1] = legitimize_pic_address (operands[1], SImode,
+ ((reload_in_progress
+ || reload_completed)
+ ? operands[0] : 0));
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,m")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?\\t%0, %1
+ str%?\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "! (const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1])))"
+ [(clobber (const_int 0))]
+ "
+ arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
+ NULL_RTX, 0);
+ DONE;
+")
+
+(define_expand "movaddr"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:DI 1 "address_operand" ""))]
+ ""
+ "")
+
+(define_insn "*movaddr_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:DI 1 "address_operand" "p"))]
+ "reload_completed
+ && (GET_CODE (operands[1]) == LABEL_REF
+ || (GET_CODE (operands[1]) == CONST
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (operands[1], 0), 1)) == CONST_INT))"
+ "adr%?\\t%0, %a1")
+
+/* When generating pic, we need to load the symbol offset into a register.
+ So that the optimizer does not confuse this with a normal symbol load
+ we use an unspec. The offset will be loaded from a constant pool entry,
+ since that is the only type of relocation we can use. */
+
+(define_insn "pic_load_addr"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")] 3))]
+ "flag_pic"
+ "ldr%?\\t%0, %a1"
+ [(set_attr "type" "load")])
+
+;; This variant is used for AOF assembly, since it needs to mention the
+;; pic register in the rtl.
+(define_expand "pic_load_addr_based"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "") (match_dup 2)] 3))]
+ "flag_pic"
+ "operands[2] = pic_offset_table_rtx;")
+
+(define_insn "*pic_load_addr_based_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand 1 "" "")
+ (match_operand 2 "s_register_operand" "r")] 3))]
+ "flag_pic && operands[2] == pic_offset_table_rtx"
+ "*
+#ifdef AOF_ASSEMBLER
+ operands[1] = aof_pic_entry (operands[1]);
+#endif
+ output_asm_insn (\"ldr%?\\t%0, %a1\", operands);
+ return \"\";
+" [(set_attr "type" "load")])
+
+(define_insn "pic_add_dot_plus_eight"
+ [(set (pc) (label_ref (match_operand 0 "" "")))
+ (set (match_operand 1 "register_operand" "+r")
+ (plus:SI (match_dup 1) (const (plus:SI (pc) (const_int 8)))))]
+ "flag_pic"
+ "add%?\\t%1, %|pc, %1")
+
+;; If copying one reg to another we can set the condition codes according to
+;; its value. Such a move is common after a return from subroutine and the
+;; result is being tested against zero.
+
+(define_insn "*movsi_compare0"
+ [(set (reg:CC 24) (compare:CC (match_operand:SI 1 "s_register_operand" "0,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r,r") (match_dup 1))]
+ ""
+ "@
+ cmp%?\\t%0, #0
+ sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+;; Subroutine to store a half word from a register into memory.
+;; Operand 0 is the source register (HImode)
+;; Operand 1 is the destination address in a register (SImode)
+
+;; In both this routine and the next, we must be careful not to spill
+;; a memory address of reg+large_const into a separate PLUS insn, since this
+;; can generate unrecognizable rtl.
+
+(define_expand "storehi"
+ [;; store the low byte
+ (set (match_operand 1 "" "") (match_dup 3))
+ ;; extract the high byte
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ ;; store the high byte
+ (set (match_dup 4) (subreg:QI (match_dup 2) 0))] ;explicit subreg safe
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+(define_expand "storehi_bigend"
+ [(set (match_dup 4) (match_dup 3))
+ (set (match_dup 2)
+ (ashiftrt:SI (match_operand 0 "" "") (const_int 8)))
+ (set (match_operand 1 "" "") (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+}
+")
+
+;; Subroutine to store a half word integer constant into memory.
+(define_expand "storeinthi"
+ [(set (match_operand 0 "" "")
+ (subreg:QI (match_operand 1 "" "") 0))
+ (set (match_dup 3) (subreg:QI (match_dup 2) 0))]
+ ""
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ rtx addr = XEXP (operands[0], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[1] = gen_reg_rtx (SImode);
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
+ }
+ }
+
+ operands[3] = change_address (operands[0], QImode, plus_constant (addr, 1));
+ operands[0] = change_address (operands[0], QImode, NULL_RTX);
+}
+")
+
+(define_expand "storehi_single_op"
+ [(set (match_operand:HI 0 "memory_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ "arm_arch4"
+ "
+ if (! s_register_operand (operands[1], HImode))
+ operands[1] = copy_to_mode_reg (HImode, operands[1]);
+")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (arm_arch4)
+ {
+ emit_insn (gen_storehi_single_op (operands[0], operands[1]));
+ DONE;
+ }
+ if (GET_CODE (operands[1]) == CONST_INT)
+ emit_insn (gen_storeinthi (operands[0], operands[1]));
+ else
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ if (BYTES_BIG_ENDIAN)
+ emit_insn (gen_storehi_bigend (operands[1], operands[0]));
+ else
+ emit_insn (gen_storehi (operands[1], operands[0]));
+ }
+ DONE;
+ }
+ /* Sign extend a constant, and keep it in an SImode reg. */
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
+
+ /* If the constant is already valid, leave it alone. */
+ if (! const_ok_for_arm (val))
+ {
+ /* If setting all the top bits will make the constant
+ loadable in a single instruction, then set them.
+ Otherwise, sign extend the number. */
+
+ if (const_ok_for_arm (~ (val | ~0xffff)))
+ val |= ~0xffff;
+ else if (val & 0x8000)
+ val |= ~0xffff;
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (val)));
+ operands[1] = gen_rtx_SUBREG (HImode, reg, 0);
+ }
+ else if (! arm_arch4)
+ {
+ /* Note: We do not have to worry about TARGET_SHORT_BY_BYTES
+ for v4 and up architectures because LDRH instructions will
+ be used to access the HI values, and these cannot generate
+ unaligned word access faults in the MMU. */
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ if (TARGET_SHORT_BY_BYTES)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+ rtx reg = gen_reg_rtx (SImode);
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && ((INTVAL(offset) & 1) != 1)
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) & ~3;
+ rtx new;
+
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ if (((INTVAL (offset) & 2) != 0)
+ ^ (BYTES_BIG_ENDIAN ? 1 : 0))
+ {
+ rtx reg2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lshrsi3 (reg2, reg, GEN_INT (16)));
+ reg = reg2;
+ }
+ }
+ else
+ emit_insn (gen_movhi_bytes (reg, operands[1]));
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else if (BYTES_BIG_ENDIAN)
+ {
+ rtx base;
+ rtx offset = const0_rtx;
+
+ if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
+ || (GET_CODE (base) == PLUS
+ && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && GET_CODE (base = XEXP (base, 0)) == REG))
+ && REGNO_POINTER_ALIGN (REGNO (base)) >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ rtx new;
+
+ if ((INTVAL (offset) & 2) == 2)
+ {
+ HOST_WIDE_INT new_offset = INTVAL (offset) ^ 2;
+ new = gen_rtx_MEM (SImode,
+ plus_constant (base, new_offset));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new) = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_movsi (reg, new));
+ }
+ else
+ {
+ new = gen_rtx_MEM (SImode, XEXP (operands[1], 0));
+ MEM_COPY_ATTRIBUTES (new, operands[1]);
+ RTX_UNCHANGING_P (new)
+ = RTX_UNCHANGING_P (operands[1]);
+ emit_insn (gen_rotated_loadsi (reg, new));
+ }
+
+ operands[1] = gen_lowpart (HImode, reg);
+ }
+ else
+ {
+ emit_insn (gen_movhi_bigend (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! const_ok_for_arm (INTVAL (operands[1]))
+ && ! const_ok_for_arm (~INTVAL (operands[1])))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx_SUBREG (SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}
+")
+
+(define_insn "rotated_loadsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "offsettable_memory_operand" "o")
+ (const_int 16)))]
+ "! TARGET_SHORT_BY_BYTES"
+ "*
+{
+ rtx ops[2];
+
+ ops[0] = operands[0];
+ ops[1] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 2));
+ output_asm_insn (\"ldr%?\\t%0, %1\\t%@ load-rotate\", ops);
+ return \"\";
+}"
+[(set_attr "type" "load")])
+
+(define_expand "movhi_bytes"
+ [(set (match_dup 2) (zero_extend:SI (match_operand:HI 1 "" "")))
+ (set (match_dup 3)
+ (zero_extend:SI (match_dup 6)))
+ (set (match_operand:SI 0 "" "")
+ (ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))]
+ ""
+ "
+{
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx (MEM, QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ RTX_UNCHANGING_P (mem1) = RTX_UNCHANGING_P (operands[1]);
+ mem2 = gen_rtx (MEM, QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ RTX_UNCHANGING_P (mem2) = RTX_UNCHANGING_P (operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = mem2;
+
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+}
+")
+
+(define_expand "movhi_bigend"
+ [(set (match_dup 2)
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "") 0)
+ (const_int 16)))
+ (set (match_dup 3)
+ (ashiftrt:SI (match_dup 2) (const_int 16)))
+ (set (match_operand:HI 0 "s_register_operand" "")
+ (subreg:HI (match_dup 3) 0))]
+ ""
+ "
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+")
+
+;; Pattern to recognise insn generated default case above
+;; CYGNUS LOCAL nickc: Store before load to avoid problem with reload.
+(define_insn "*movhi_insn_arch4"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,m,r")
+ (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
+ "arm_arch4
+ && ok_integer_or_other (operands[0])
+ && ok_integer_or_other (operands[1])" ;; CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ str%?h\\t%1, %0\\t%@ movhi ;; CYGNUS LOCAL nickc
+ ldr%?h\\t%0, %1\\t%@ movhi" ;; CYGNUS LOCAL nickc
+[(set_attr "type" "*,*,store1,load")]) ;; CYGNUS LOCAL nickc
+;; END CYGNUS LOCAL
+
+(define_insn "*movhi_insn_littleend"
+ [(set (match_operand:HI 0 "general_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && ! BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL nickc */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL nickc
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi"
+[(set_attr "type" "*,*,load")])
+
+(define_insn "*movhi_insn_bigend"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
+ (match_operand:HI 1 "general_operand" "rI,K,m"))]
+ "! arm_arch4
+ && BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES
+ /* CYGNUS LOCAL NICKC */
+ && ok_integer_or_other (operands[1])"
+ ;; END CYGNUS LOCAL
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi
+ ldr%?\\t%0, %1\\t%@ movhi_bigend\;mov%?\\t%0, %0, asr #16"
+[(set_attr "type" "*,*,load")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*loadhi_si_bigend"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (rotate:SI (subreg:SI (match_operand:HI 1 "memory_operand" "m") 0)
+ (const_int 16)))]
+ "BYTES_BIG_ENDIAN
+ && ! TARGET_SHORT_BY_BYTES"
+ "ldr%?\\t%0, %1\\t%@ movhi_bigend"
+[(set_attr "type" "load")])
+
+(define_insn "*movhi_bytes"
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r")
+ (match_operand:HI 1 "arm_rhs_operand" "rI,K"))]
+ "TARGET_SHORT_BY_BYTES"
+ "@
+ mov%?\\t%0, %1\\t%@ movhi
+ mvn%?\\t%0, #%B1\\t%@ movhi")
+
+
+(define_expand "reload_outhi"
+ [(parallel [(match_operand:HI 0 "reload_memory_operand" "=o")
+ (match_operand:HI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ ""
+ "
+ arm_reload_out_hi (operands);
+ DONE;
+")
+
+(define_expand "reload_inhi"
+ [(parallel [(match_operand:HI 0 "s_register_operand" "=r")
+ (match_operand:HI 1 "reload_memory_operand" "o")
+ (match_operand:SI 2 "s_register_operand" "=&r")])]
+ "TARGET_SHORT_BY_BYTES"
+ "
+ arm_reload_in_hi (operands);
+ DONE;
+")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+ /* Everything except mem = const or mem = mem can be done easily */
+
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_rtx (SUBREG, QImode, reg, 0);
+ }
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+ }
+")
+
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "general_operand" "=r,r,r,m")
+ (match_operand:QI 1 "general_operand" "rI,K,m,r"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ mov%?\\t%0, %1
+ mvn%?\\t%0, #%B1
+ ldr%?b\\t%0, %1
+ str%?b\\t%1, %0"
+[(set_attr "type" "*,*,load,store1")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+")
+
+(define_insn "*movsf_hard_insn"
+ [(set (match_operand:SF 0 "general_operand" "=f,f,f,m,f,r,r,r,m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mvf%?s\\t%0, %1
+ mnf%?s\\t%0, #%N1
+ ldf%?s\\t%0, %1
+ stf%?s\\t%1, %0
+ str%?\\t%1, [%|sp, #-4]!\;ldf%?s\\t%0, [%|sp], #4
+ stf%?s\\t%1, [%|sp, #-4]!\;ldr%?\\t%0, [%|sp], #4
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4,4,8,8,4,4,4")
+ (set_attr "type"
+ "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*,load,store1")])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -msoft-float.
+
+(define_insn "*movsf_soft_insn"
+ [(set (match_operand:SF 0 "general_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,mE,r"))]
+ "TARGET_SOFT_FLOAT
+ && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ "@
+ mov%?\\t%0, %1
+ ldr%?\\t%0, %1\\t%@ float
+ str%?\\t%1, %0\\t%@ float"
+[(set_attr "length" "4,4,4")
+ (set_attr "type" "*,load,store1")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+")
+
+;; Reloading a df mode value stored in integer regs to memory can require a
+;; scratch reg.
+(define_expand "reload_outdf"
+ [(match_operand:DF 0 "reload_memory_operand" "=o")
+ (match_operand:DF 1 "s_register_operand" "r")
+ (match_operand:SI 2 "s_register_operand" "=&r")]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
+
+ if (code == REG)
+ operands[2] = XEXP (operands[0], 0);
+ else if (code == POST_INC || code == PRE_DEC)
+ {
+ operands[0] = gen_rtx (SUBREG, DImode, operands[0], 0);
+ operands[1] = gen_rtx (SUBREG, DImode, operands[1], 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ else if (code == PRE_INC)
+ {
+ rtx reg = XEXP (XEXP (operands[0], 0), 0);
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
+ operands[2] = reg;
+ }
+ else if (code == POST_DEC)
+ operands[2] = XEXP (XEXP (operands[0], 0), 0);
+ else
+ emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
+ XEXP (XEXP (operands[0], 0), 1)));
+
+ emit_insn (gen_rtx (SET, VOIDmode, gen_rtx (MEM, DFmode, operands[2]),
+ operands[1]));
+
+ if (code == POST_DEC)
+ emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+
+ DONE;
+}
+")
+
+(define_insn "*movdf_hard_insn"
+ [(set (match_operand:DF 0 "general_operand" "=r,Q,r,m,r,f,f,f,m,!f,!r")
+ (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,fG,H,mF,f,r,f"))]
+ "TARGET_HARD_FLOAT
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ rtx ops[3];
+
+ switch (which_alternative)
+ {
+ case 0: return \"ldm%?ia\\t%m1, %M0\\t%@ double\";
+ case 1: return \"stm%?ia\\t%m0, %M1\\t%@ double\";
+ case 2: case 3: case 4: return output_move_double (operands);
+ case 5: return \"mvf%?d\\t%0, %1\";
+ case 6: return \"mnf%?d\\t%0, #%N1\";
+ case 7: return \"ldf%?d\\t%0, %1\";
+ case 8: return \"stf%?d\\t%1, %0\";
+ case 9: return output_mov_double_fpu_from_arm (operands);
+ case 10: return output_mov_double_arm_from_fpu (operands);
+ }
+}
+"
+[(set_attr "length" "4,4,8,8,8,4,4,4,4,8,8")
+ (set_attr "type"
+"load,store2,*,store2,load,ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r")])
+
+;; Software floating point version. This is essentially the same as movdi.
+;; Do not use `f' as a constraint to prevent reload from ever trying to use
+;; an `f' reg.
+
+(define_insn "*movdf_soft_insn"
+ [(set (match_operand:DF 0 "soft_df_operand" "=r,r,m")
+ (match_operand:DF 1 "soft_df_operand" "r,mF,r"))]
+ "TARGET_SOFT_FLOAT"
+ "* return output_move_double (operands);"
+[(set_attr "length" "8,8,8")
+ (set_attr "type" "*,load,store2")])
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "")
+
+;; Even when the XFmode patterns aren't enabled, we enable this after
+;; reloading so that we can push floating point registers in the prologue.
+
+(define_insn "*movxf_hard_insn"
+ [(set (match_operand:XF 0 "general_operand" "=f,f,f,m,f,r,r")
+ (match_operand:XF 1 "general_operand" "fG,H,m,f,r,f,r"))]
+ "TARGET_HARD_FLOAT && (ENABLE_XF_PATTERNS || reload_completed)"
+ "*
+ switch (which_alternative)
+ {
+ case 0: return \"mvf%?e\\t%0, %1\";
+ case 1: return \"mnf%?e\\t%0, #%N1\";
+ case 2: return \"ldf%?e\\t%0, %1\";
+ case 3: return \"stf%?e\\t%1, %0\";
+ case 4: return output_mov_long_double_fpu_from_arm (operands);
+ case 5: return output_mov_long_double_arm_from_fpu (operands);
+ case 6: return output_mov_long_double_arm_from_arm (operands);
+ }
+"
+[(set_attr "length" "4,4,4,4,8,8,12")
+ (set_attr "type" "ffarith,ffarith,f_load,f_store,r_mem_f,f_mem_r,*")])
+
+
+;; load- and store-multiple insns
+;; The arm can load/store any set of registers, provided that they are in
+;; ascending order; but that is beyond GCC so stick with what it knows.
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) > 14
+ || REGNO (operands[0]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_load_multiple (REGNO (operands[0]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[1], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P(operands[1]),
+ MEM_IN_STRUCT_P(operands[1]),
+ MEM_SCALAR_P (operands[1]));
+")
+
+;; Load multiple with write-back
+
+(define_insn "*ldmsi_postinc"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (match_dup 1)))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"ldm%?ia\\t%0!, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+;; Ordinary load multiple
+
+(define_insn "*ldmsi"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 2 "s_register_operand" "r")))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_DEST (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_DEST (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"ldm%?ia\\t%0, {%1-%2}\\t%@ load multiple\", ops);
+ return \"\";
+}
+"
+[(set_attr "type" "load")])
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+ /* Support only fixed point registers */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 14
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != REG
+ || GET_CODE (operands[0]) != MEM
+ || REGNO (operands[1]) > 14
+ || REGNO (operands[1]) + INTVAL (operands[2]) > 15)
+ FAIL;
+
+ operands[3]
+ = arm_gen_store_multiple (REGNO (operands[1]), INTVAL (operands[2]),
+ force_reg (SImode, XEXP (operands[0], 0)),
+ TRUE, FALSE, RTX_UNCHANGING_P (operands[0]),
+ MEM_IN_STRUCT_P(operands[0]),
+ MEM_SCALAR_P (operands[0]));
+")
+
+;; Store multiple with write-back
+
+(define_insn "*stmsi_postinc"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "s_register_operand" "+r")
+ (plus:SI (match_dup 1)
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (mem:SI (match_dup 1))
+ (match_operand:SI 3 "s_register_operand" "r"))])]
+ "(INTVAL (operands[2]) == 4 * (XVECLEN (operands[0], 0) - 2))"
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_SRC (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 1));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 2));
+
+ output_asm_insn (\"stm%?ia\\t%0!, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 5))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Ordinary store multiple
+
+(define_insn "*stmsi"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 2 "s_register_operand" "r"))
+ (match_operand:SI 1 "s_register_operand" "r"))])]
+ ""
+ "*
+{
+ rtx ops[3];
+ int count = XVECLEN (operands[0], 0);
+
+ ops[0] = XEXP (SET_DEST (XVECEXP (operands[0], 0, 0)), 0);
+ ops[1] = SET_SRC (XVECEXP (operands[0], 0, 0));
+ ops[2] = SET_SRC (XVECEXP (operands[0], 0, count - 1));
+
+ output_asm_insn (\"stm%?ia\\t%0, {%1-%2}\\t%@ str multiple\", ops);
+ return \"\";
+}
+"
+[(set (attr "type")
+ (cond [(eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 3))
+ (const_string "store2")
+ (eq (symbol_ref "XVECLEN (operands[0],0)") (const_int 4))
+ (const_string "store3")]
+ (const_string "store4")))])
+
+;; Move a block of memory if it is word aligned and MORE than 2 words long.
+;; We could let this apply for blocks of less than this, but it clobbers so
+;; many registers that there is then probably a better way.
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (arm_gen_movstrqi (operands))
+ DONE;
+ FAIL;
+")
+
+
+;; Comparison and test insns
+
+(define_expand "cmpsi"
+ [(match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "arm_add_operand" "")]
+ ""
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 0;
+ DONE;
+}
+")
+
+(define_expand "cmpsf"
+ [(match_operand:SF 0 "s_register_operand" "")
+ (match_operand:SF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpdf"
+ [(match_operand:DF 0 "s_register_operand" "")
+ (match_operand:DF 1 "fpu_rhs_operand" "")]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_expand "cmpxf"
+ [(match_operand:XF 0 "s_register_operand" "")
+ (match_operand:XF 1 "fpu_rhs_operand" "")]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "
+{
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ arm_compare_fp = 1;
+ DONE;
+}
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L")))]
+ ""
+ "@
+ cmp%?\\t%0, %1
+ cmn%?\\t%0, #%n1"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_shiftsi_swp"
+ [(set (reg:CC_SWP 24)
+ (compare:CC_SWP (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "reg_or_int_operand" "rM")])
+ (match_operand:SI 0 "s_register_operand" "r")))]
+ ""
+ "cmp%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsi_neg_shiftsi"
+ [(set (reg:CC 24)
+ (compare:CC (match_operand:SI 0 "s_register_operand" "r")
+ (neg:SI (match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")]))))]
+ ""
+ "cmn%?\\t%0, %1%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*cmpsf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpesfdf_df"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_esfdf"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_insn"
+ [(set (reg:CCFP 24)
+ (compare:CCFP (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?\\t%0, %1
+ cnf%?\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpsf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:SF 0 "s_register_operand" "f,f")
+ (match_operand:SF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f,f")
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_esfdf_df_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (float_extend:DF
+ (match_operand:SF 0 "s_register_operand" "f,f"))
+ (match_operand:DF 1 "fpu_add_operand" "fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmp_df_esfdf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:DF 0 "s_register_operand" "f")
+ (float_extend:DF
+ (match_operand:SF 1 "s_register_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "cmf%?e\\t%0, %1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+(define_insn "*cmpxf_trap"
+ [(set (reg:CCFPE 24)
+ (compare:CCFPE (match_operand:XF 0 "s_register_operand" "f,f")
+ (match_operand:XF 1 "fpu_add_operand" "fG,H")))]
+ "ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
+ "@
+ cmf%?e\\t%0, %1
+ cnf%?e\\t%0, #%N1"
+[(set_attr "conds" "set")
+ (set_attr "type" "f_2_r")])
+
+; This insn allows redundant compares to be removed by cse, nothing should
+; ever appear in the output file since (set (reg x) (reg x)) is a no-op that
+; is deleted later on. The match_dup will match the mode here, so that
+; mode changes of the condition codes aren't lost by this even though we don't
+; specify what they are.
+
+(define_insn "*deleted_compare"
+ [(set (match_operand 0 "cc_register" "") (match_dup 0))]
+ ""
+ "\\t%@ deleted compare"
+[(set_attr "conds" "set")
+ (set_attr "length" "0")])
+
+
+;; Conditional branch insns
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+;; patterns to match conditional branch insns
+
+(define_insn "*condbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%d1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+(define_insn "*condbranch_reversed"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%D1\\t%l0\";
+}"
+[(set_attr "conds" "use")])
+
+
+; scc insns
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (EQ, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (NE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (le:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GE, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LT, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (GEU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = gen_compare_reg (LTU, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+}
+")
+
+(define_insn "*mov_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)]))]
+ ""
+ "mov%D1\\t%0, #0\;mov%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*mov_notscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI (match_operator:SI 1 "comparison_operator"
+ [(match_operand 2 "cc_register" "") (const_int 0)])))]
+ ""
+ "mov%D1\\t%0, #0\;mvn%d1\\t%0, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+
+;; Conditional move insns
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "arm_not_operand" "")
+ (match_operand:SI 3 "arm_not_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "s_register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "s_register_operand" "")
+ (match_operand:SF 3 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
+
+ /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
+ Otherwise, ensure it is a valid FP add operand */
+ if ((! TARGET_HARD_FLOAT)
+ || (! fpu_add_operand (operands[3], SFmode)))
+ operands[3] = force_reg (SFmode, operands[3]);
+
+ ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "s_register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "s_register_operand" "")
+ (match_operand:DF 3 "fpu_add_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = gen_compare_reg (code, arm_compare_op0, arm_compare_op1,
+ arm_compare_fp);
+
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+}")
+
+(define_insn "*movsicc_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,0,rI,K,rI,rI,K,K")
+ (match_operand:SI 2 "arm_not_operand" "rI,K,0,0,rI,K,rI,K")))]
+ ""
+ "@
+ mov%D3\\t%0, %2
+ mvn%D3\\t%0, #%B2
+ mov%d3\\t%0, %1
+ mvn%d3\\t%0, #%B1
+ mov%d3\\t%0, %1\;mov%D3\\t%0, %2
+ mov%d3\\t%0, %1\;mvn%D3\\t%0, #%B2
+ mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
+ mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_hard_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:SF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:SF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3s\\t%0, %2
+ mnf%D3s\\t%0, #%N2
+ mvf%d3s\\t%0, %1
+ mnf%d3s\\t%0, #%N1
+ mvf%d3s\\t%0, %1\;mvf%D3s\\t%0, %2
+ mvf%d3s\\t%0, %1\;mnf%D3s\\t%0, #%N2
+ mnf%d3s\\t%0, #%N1\;mvf%D3s\\t%0, %2
+ mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+(define_insn "*movsfcc_soft_insn"
+ [(set (match_operand:SF 0 "s_register_operand" "=r,r")
+ (if_then_else:SF (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:SF 1 "s_register_operand" "0,r")
+ (match_operand:SF 2 "s_register_operand" "r,0")))]
+ "TARGET_SOFT_FLOAT"
+ "@
+ mov%D3\\t%0, %2
+ mov%d3\\t%0, %1"
+ [(set_attr "conds" "use")])
+
+(define_insn "*movdfcc_insn"
+ [(set (match_operand:DF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
+ (if_then_else:DF
+ (match_operator 3 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operand:DF 1 "fpu_add_operand" "0,0,fG,H,fG,fG,H,H")
+ (match_operand:DF 2 "fpu_add_operand" "fG,H,0,0,fG,H,fG,H")))]
+ "TARGET_HARD_FLOAT"
+ "@
+ mvf%D3d\\t%0, %2
+ mnf%D3d\\t%0, #%N2
+ mvf%d3d\\t%0, %1
+ mnf%d3d\\t%0, #%N1
+ mvf%d3d\\t%0, %1\;mvf%D3d\\t%0, %2
+ mvf%d3d\\t%0, %1\;mnf%D3d\\t%0, #%N2
+ mnf%d3d\\t%0, #%N1\;mvf%D3d\\t%0, %2
+ mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
+ [(set_attr "length" "4,4,4,4,8,8,8,8")
+ (set_attr "type" "ffarith")
+ (set_attr "conds" "use")])
+
+;; Jump and linkage insns
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 1 || arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return \"b%?\\t%l0\";
+}")
+
+(define_expand "call"
+ [(parallel [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "")
+
+(define_insn "*call_reg"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "r"))
+ (match_operand 1 "" "g"))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (operands);
+"
+;; length is worst case, normally it is only two
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_mem"
+ [(call (mem:SI (match_operand 0 "memory_operand" "m"))
+ (match_operand 1 "general_operand" "g"))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call_mem (operands);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "=rf")
+ (call (match_operand 1 "memory_operand" "m")
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])]
+ ""
+ "")
+
+(define_insn "*call_value_reg"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+ return output_call (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+(define_insn "*call_value_mem"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand 1 "memory_operand" "m"))
+ (match_operand 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ "! CONSTANT_ADDRESS_P (XEXP (operands[1], 0))"
+ "*
+ return output_call_mem (&operands[1]);
+"
+[(set_attr "length" "12")
+ (set_attr "type" "call")])
+
+;; Allow calls to SYMBOL_REFs specially as they are not valid general addresses
+;; The 'a' causes the operand to be treated as an address, i.e. no '#' output.
+
+(define_insn "*call_symbol"
+ [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl%?\\t%a0"
+[(set_attr "type" "call")])
+
+(define_insn "*call_value_symbol"
+ [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))]
+ "GET_CODE(operands[1]) == SYMBOL_REF"
+ "bl%?\\t%a1"
+[(set_attr "type" "call")])
+
+;; Often the return insn will be the same as loading from memory, so set attr
+(define_insn "return"
+ [(return)]
+ "USE_RETURN_INSN (FALSE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (NULL, TRUE, FALSE);
+}"
+[(set_attr "type" "load")])
+
+(define_insn "*cond_return"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (return)
+ (pc)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, FALSE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+(define_insn "*cond_return_inverted"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand 1 "cc_register" "") (const_int 0)])
+ (pc)
+ (return)))]
+ "USE_RETURN_INSN (TRUE)"
+ "*
+{
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state == 2)
+ {
+ arm_ccfsm_state += 2;
+ return \"\";
+ }
+ return output_return_instruction (operands[0], TRUE, TRUE);
+}"
+[(set_attr "conds" "use")
+ (set_attr "type" "load")])
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+[(set_attr "length" "0")
+ (set_attr "type" "block")])
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "s_register_operand" "") ; index to jump on
+ (match_operand:SI 1 "const_int_operand" "") ; lower bound
+ (match_operand:SI 2 "const_int_operand" "") ; total range
+ (match_operand:SI 3 "" "") ; table label
+ (match_operand:SI 4 "" "")] ; Out of range label
+ ""
+ "
+{
+ rtx reg;
+ if (operands[1] != const0_rtx)
+ {
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_addsi3 (reg, operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = reg;
+ }
+
+ if (! const_ok_for_arm (INTVAL (operands[2])))
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
+ operands[4]));
+ DONE;
+}")
+
+;; The USE in this pattern is needed to tell flow analysis that this is
+;; a CASESI insn. It has no other purpose.
+(define_insn "casesi_internal"
+ [(parallel [(set (pc)
+ (if_then_else
+ (leu (match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "arm_rhs_operand" "rI"))
+ (mem:SI (plus:SI (mult:SI (match_dup 0) (const_int 4))
+ (label_ref (match_operand 2 "" ""))))
+ (label_ref (match_operand 3 "" ""))))
+ (use (label_ref (match_dup 2)))])]
+ ""
+ "*
+ if (flag_pic)
+ return \"cmp\\t%0, %1\;addls\\t%|pc, %|pc, %0, asl #2\;b\\t%l3\";
+ return \"cmp\\t%0, %1\;ldrls\\t%|pc, [%|pc, %0, asl #2]\;b\\t%l3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "s_register_operand" "r"))]
+ ""
+ "mov%?\\t%|pc, %0\\t%@ indirect jump")
+
+(define_insn "*load_indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "memory_operand" "m"))]
+ ""
+ "ldr%?\\t%|pc, %0\\t%@ indirect jump"
+[(set_attr "type" "load")])
+
+;; Misc insns
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "mov%?\\tr0, r0\\t%@ nop")
+
+;; Patterns to allow combination of arithmetic, cond code and shifts
+
+(define_insn "*arith_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")]))]
+ ""
+ "%i1%?\\t%0, %2, %4%S3")
+
+(define_insn "*arith_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
+ (match_dup 2)]))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*arith_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (match_operator:SI 1 "shiftable_operator"
+ [(match_operator:SI 3 "shift_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "reg_or_int_operand" "rI")])
+ (match_operand:SI 2 "s_register_operand" "r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "%i1%?s\\t%0, %2, %4%S3"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")])))]
+ ""
+ "sub%?\\t%0, %1, %3%S2")
+
+(define_insn "*sub_shiftsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+(define_insn "*sub_shiftsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "reg_or_int_operand" "rM")]))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=r"))]
+ ""
+ "sub%?s\\t%0, %1, %3%S2"
+[(set_attr "conds" "set")])
+
+;; These variants of the above insns can occur if the first operand is the
+;; frame pointer and we eliminate that. This is a kludge, but there doesn't
+;; seem to be a way around it. Most of the predicates have to be null
+;; because the format can be generated part way through reload, so
+;; if we don't match it as soon as it becomes available, reload doesn't know
+;; how to reload pseudos that haven't got hard registers; the constraints will
+;; sort everything out.
+
+(define_insn "*reload_mulsi3"
+ [(set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 2 "" "r"))
+ (match_operand:SI 1 "const_int_operand" "n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"add%?\\t%0, %2, %3%S5\", operands);
+ operands[2] = operands[1];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+; we have no idea how long the add_immediate is, it could be up to 4.
+[(set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)])
+ (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+(define_insn "*reload_mulsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI
+ (plus:SI
+ (match_operator:SI 5 "shift_operator"
+ [(match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "rM")])
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"add%?s\\t%0, %0, %3%S5\";
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "20")])
+
+;; These are similar, but are needed when the mla pattern contains the
+;; eliminated register as operand 3.
+
+(define_insn "*reload_muladdsi"
+ [(set (match_operand:SI 0 "" "=&r,&r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "" "%0,r")
+ (match_operand:SI 2 "" "r,r"))
+ (match_operand:SI 3 "" "r,r"))
+ (match_operand:SI 4 "const_int_operand" "n,n")))]
+ "reload_in_progress"
+ "*
+ output_asm_insn (\"mla%?\\t%0, %2, %1, %3\", operands);
+ operands[2] = operands[4];
+ operands[1] = operands[0];
+ return output_add_immediate (operands);
+"
+[(set_attr "length" "20")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "" "=&r")
+ (plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1))
+ (match_dup 2)))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands);
+ return \"\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+(define_insn "*reload_muladdsi_compare0_scratch"
+ [(set (reg:CC_NOOV 24)
+ (compare:CC_NOOV (plus:SI (plus:SI (mult:SI
+ (match_operand:SI 3 "" "r")
+ (match_operand:SI 4 "" "r"))
+ (match_operand:SI 1 "" "r"))
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 0 "=&r"))]
+ "reload_in_progress"
+ "*
+ output_add_immediate (operands);
+ return \"mla%?s\\t%0, %3, %4, %0\";
+"
+[(set_attr "length" "20")
+ (set_attr "conds" "set")
+ (set_attr "type" "mult")])
+
+
+
+(define_insn "*and_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (and:SI (match_operator 1 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 2 "s_register_operand" "r")))]
+ ""
+ "mov%D1\\t%0, #0\;and%d1\\t%0, %2, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ior_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (ior:SI (match_operator 2 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "s_register_operand" "0,?r")))]
+ ""
+ "@
+ orr%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;orr%d2\\t%0, %1, #1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+
+(define_insn "*compare_scc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[1]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %2, lsr #31\";
+
+ if (GET_CODE (operands[1]) == GE && operands[3] == const0_rtx)
+ return \"mvn\\t%0, %2\;mov\\t%0, %0, lsr #31\";
+
+ if (GET_CODE (operands[1]) == NE)
+ {
+ if (which_alternative == 1)
+ return \"adds\\t%0, %2, #%n3\;movne\\t%0, #1\";
+ return \"subs\\t%0, %2, %3\;movne\\t%0, #1\";
+ }
+ if (which_alternative == 1)
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ return \"mov%D1\\t%0, #0\;mov%d1\\t%0, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 3 "equality_operator"
+ [(match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == NE)
+ {
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D4\\t%0, %2\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d4\\t%0, %1\", operands);
+ return \"\";
+ }
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%d4\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8")])
+
+(define_insn "*cond_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "s_register_operand" "0,?r")]))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[4]) == LT && operands[3] == const0_rtx)
+ return \"%i5\\t%0, %1, %2, lsr #31\";
+
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (GET_CODE (operands[5]) == AND)
+ output_asm_insn (\"mov%D4\\t%0, #0\", operands);
+ else if (GET_CODE (operands[5]) == MINUS)
+ output_asm_insn (\"rsb%D4\\t%0, %1, #0\", operands);
+ else if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"%i5%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*cond_sub"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "0,?r")
+ (match_operator:SI 4 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%D4\\t%0, %1\", operands);
+ return \"sub%d4\\t%0, %1, #1\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*cmp_ite0"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 0))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%2, %3\;cmp%d5\\t%0, %1\",\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\"},
+ {\"cmp\\t%2, %3\;cmn%d5\\t%0, #%n1\", \"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\"},
+ {\"cmn\\t%2, #%n3\;cmp%d5\\t%0, %1\", \"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\"},
+ {\"cmn\\t%2, #%n3\;cmn%d5\\t%0, #%n1\",
+ \"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]), GET_CODE (operands[4]));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*cmp_ite1"
+ [(set (match_operand 6 "dominant_cc_register" "")
+ (compare
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand:SI 0 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L,rI,L")])
+ (match_operator:SI 5 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,rI,L,L")])
+ (const_int 1))
+ (const_int 0)))]
+ ""
+ "*
+{
+ char* opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\", \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]),
+ reverse_condition (GET_CODE (operands[4])));
+
+ return opcodes[which_alternative][swap];
+}
+"
+[(set_attr "conds" "set")
+ (set_attr "length" "8")])
+
+(define_insn "*negscc"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (neg:SI (match_operator 3 "comparison_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[3]) == LT && operands[3] == const0_rtx)
+ return \"mov\\t%0, %1, asr #31\";
+
+ if (GET_CODE (operands[3]) == NE)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, #0\";
+
+ if (GET_CODE (operands[3]) == GT)
+ return \"subs\\t%0, %1, %2\;mvnne\\t%0, %0, asr #31\";
+
+ output_asm_insn (\"cmp\\t%1, %2\", operands);
+ output_asm_insn (\"mov%D3\\t%0, #0\", operands);
+ return \"mvn%d3\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "movcond"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL,rIL")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,rI,?rI")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,0,rI")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ if (GET_CODE (operands[5]) == LT
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"and\\t%0, %1, %3, asr #31\";
+ return \"ands\\t%0, %1, %3, asr #32\;movcc\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"bic\\t%0, %2, %3, asr #31\";
+ return \"bics\\t%0, %2, %3, asr #32\;movcs\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+
+ if (GET_CODE (operands[5]) == GE
+ && (operands[4] == const0_rtx))
+ {
+ if (which_alternative != 1 && GET_CODE (operands[1]) == REG)
+ {
+ if (operands[2] == const0_rtx)
+ return \"bic\\t%0, %1, %3, asr #31\";
+ return \"bics\\t%0, %1, %3, asr #32\;movcs\\t%0, %2\";
+ }
+ else if (which_alternative != 0 && GET_CODE (operands[2]) == REG)
+ {
+ if (operands[1] == const0_rtx)
+ return \"and\\t%0, %2, %3, asr #31\";
+ return \"ands\\t%0, %2, %3, asr #32\;movcc\\t%0, %1\";
+ }
+ /* The only case that falls through to here is when both ops 1 & 2
+ are constants */
+ }
+ if (GET_CODE (operands[4]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[4])))
+ output_asm_insn (\"cmn\\t%3, #%n4\", operands);
+ else
+ output_asm_insn (\"cmp\\t%3, %4\", operands);
+ if (which_alternative != 0)
+ output_asm_insn (\"mov%d5\\t%0, %1\", operands);
+ if (which_alternative != 1)
+ output_asm_insn (\"mov%D5\\t%0, %2\", operands);
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,8,12")])
+
+(define_insn "*ifcompare_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_plus_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")))]
+ ""
+ "@
+ add%d4\\t%0, %2, %3
+ sub%d4\\t%0, %2, #%n3
+ add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1
+ add%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1
+ sub%d4\\t%0, %2, #%n3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_plus"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 5 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")
+ (plus:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))))]
+ ""
+ "@
+ add%D4\\t%0, %2, %3
+ sub%D4\\t%0, %2, #%n3
+ add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1
+ add%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1
+ sub%D4\\t%0, %2, #%n3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,4,8,8,8,8")
+ (set_attr "type" "*,*,*,*,load,load")])
+
+(define_insn "*ifcompare_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 9 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rI")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rI")])))]
+ ""
+ "%I6%d5\\t%0, %1, %2\;%I7%D5\\t%0, %3, %4"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[3] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[5]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[4])
+ && REGNO (operands[4]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == LT)
+ return \"and\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ else if (GET_CODE (operands[6]) == GE)
+ return \"bic\\t%0, %5, %2, asr #31\;%I7\\t%0, %4, %0\";
+ }
+ if (GET_CODE (operands[3]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[3])))
+ output_asm_insn (\"cmn\\t%2, #%n3\", operands);
+ else
+ output_asm_insn (\"cmp\\t%2, %3\", operands);
+ output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ return \"ldr%D6\\t%0, %1\";
+ else
+ return \"mov%D6\\t%0, %1\";
+ }
+ return \"\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_arith_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")))]
+ ""
+ "@
+ %I5%d4\\t%0, %2, %3
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
+ %I5%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "*
+ /* If we have an operation where (op x 0) is the identity operation and
+ the conditional operator is LT or GE and we are comparing against zero and
+ everything is in registers then we can do this in two instructions */
+ if (operands[5] == const0_rtx
+ && GET_CODE (operands[7]) != AND
+ && GET_CODE (operands[3]) == REG
+ && GET_CODE (operands[1]) == REG
+ && REGNO (operands[1]) == REGNO (operands[2])
+ && REGNO (operands[2]) != REGNO (operands[0]))
+ {
+ if (GET_CODE (operands[6]) == GE)
+ return \"and\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ else if (GET_CODE (operands[6]) == LT)
+ return \"bic\\t%0, %3, %4, asr #31\;%I7\\t%0, %2, %0\";
+ }
+
+ if (GET_CODE (operands[5]) == CONST_INT
+ && !const_ok_for_arm (INTVAL (operands[5])))
+ output_asm_insn (\"cmn\\t%4, #%n5\", operands);
+ else
+ output_asm_insn (\"cmp\\t%4, %5\", operands);
+
+ if (which_alternative != 0)
+ {
+ if (GET_CODE (operands[1]) == MEM)
+ output_asm_insn (\"ldr%d6\\t%0, %1\", operands);
+ else
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
+ }
+ return \"%I7%D6\\t%0, %2, %3\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")
+ (match_operator:SI 5 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])))]
+ ""
+ "@
+ %I5%D4\\t%0, %2, %3
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
+ %I5%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")
+ (set_attr "type" "*,*,load")])
+
+(define_insn "*ifcompare_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2
+ mvn%d4\\t%0, #%B1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_not_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mvn%d4\\t%0, %2
+ mov%D4\\t%0, %1\;mvn%d4\\t%0, %2
+ mvn%D4\\t%0, #%B1\;mvn%d4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_shift_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ mov%d5\\t%0, %2%S4
+ mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
+ mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r,r")
+ (match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 6 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 2 "s_register_operand" "r,r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rM,rM,rM")])))]
+ ""
+ "@
+ mov%D5\\t%0, %2%S4
+ mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
+ mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 7 "comparison_operator"
+ [(match_operand:SI 5 "s_register_operand" "r")
+ (match_operand:SI 6 "arm_add_operand" "rIL")])
+ (match_operator:SI 8 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 9 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_shift_shift"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 8 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shift_operator"
+ [(match_operand:SI 1 "s_register_operand" "r")
+ (match_operand:SI 2 "arm_rhs_operand" "rM")])
+ (match_operator:SI 7 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "arm_rhs_operand" "rM")])))]
+ ""
+ "mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_not_arith"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])))]
+ ""
+ "mvn%d5\\t%0, %1\;%I6%D5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 6 "comparison_operator"
+ [(match_operand:SI 4 "s_register_operand" "r")
+ (match_operand:SI 5 "arm_add_operand" "rIL")])
+ (match_operator:SI 7 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+(define_insn "*if_arith_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand 4 "cc_register" "") (const_int 0)])
+ (match_operator:SI 6 "shiftable_operator"
+ [(match_operand:SI 2 "s_register_operand" "r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI")])
+ (not:SI (match_operand:SI 1 "s_register_operand" "r"))))]
+ ""
+ "mvn%D5\\t%0, %1\;%I6%d5\\t%0, %2, %3"
+[(set_attr "conds" "use")
+ (set_attr "length" "8")])
+
+(define_insn "*ifcompare_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_neg_move"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")))]
+ ""
+ "@
+ rsb%d4\\t%0, %2, #0
+ mov%D4\\t%0, %1\;rsb%d4\\t%0, %2, #0
+ mvn%D4\\t%0, #%B1\;rsb%d4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*ifcompare_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI
+ (match_operator 5 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "r,r")
+ (match_operand:SI 4 "arm_add_operand" "rIL,rIL")])
+ (match_operand:SI 1 "arm_not_operand" "0,?rIK")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r"))))
+ (clobber (reg:CC 24))]
+ ""
+ "#"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8,12")])
+
+(define_insn "*if_move_neg"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ (if_then_else:SI
+ (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_not_operand" "0,?rI,K")
+ (neg:SI (match_operand:SI 2 "s_register_operand" "r,r,r"))))]
+ ""
+ "@
+ rsb%D4\\t%0, %2, #0
+ mov%d4\\t%0, %1\;rsb%D4\\t%0, %2, #0
+ mvn%d4\\t%0, #%B1\;rsb%D4\\t%0, %2, #0"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8,8")])
+
+(define_insn "*arith_adjacentmem"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operator:SI 1 "shiftable_operator"
+ [(match_operand:SI 2 "memory_operand" "m")
+ (match_operand:SI 3 "memory_operand" "m")]))
+ (clobber (match_scratch:SI 4 "=r"))]
+ "adjacent_mem_locations (operands[2], operands[3])"
+ "*
+{
+ rtx ldm[3];
+ rtx arith[4];
+ int val1 = 0, val2 = 0;
+
+ if (REGNO (operands[0]) > REGNO (operands[4]))
+ {
+ ldm[1] = operands[4];
+ ldm[2] = operands[0];
+ }
+ else
+ {
+ ldm[1] = operands[0];
+ ldm[2] = operands[4];
+ }
+ if (GET_CODE (XEXP (operands[2], 0)) != REG)
+ val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
+ if (GET_CODE (XEXP (operands[3], 0)) != REG)
+ val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
+ arith[0] = operands[0];
+ arith[3] = operands[1];
+ if (val1 < val2)
+ {
+ arith[1] = ldm[1];
+ arith[2] = ldm[2];
+ }
+ else
+ {
+ arith[1] = ldm[2];
+ arith[2] = ldm[1];
+ }
+ if (val1 && val2)
+ {
+ rtx ops[3];
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = XEXP (XEXP (operands[2], 0), 0);
+ ops[2] = XEXP (XEXP (operands[2], 0), 1);
+ output_add_immediate (ops);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ else if (val1)
+ {
+ ldm[0] = XEXP (operands[3], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ }
+ else
+ {
+ ldm[0] = XEXP (operands[2], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
+ return \"\";
+}
+"
+[(set_attr "length" "12")
+ (set_attr "type" "load")])
+
+;; the arm can support extended pre-inc instructions
+
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+;; We reject the frame pointer if it occurs anywhere in these patterns since
+;; elimination will cause too many headaches.
+
+(define_insn "*strqi_preinc"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_preinc"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_predec"
+ [(set (match_operand:QI 3 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, %2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqisi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (zero_extend:SI
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?b\\t%3, [%0, -%2]!\\t%@ z_extendqisi"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_preinc"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, %2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_predec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r")))
+ (match_operand:SI 3 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "str%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadsi_preinc"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadsi_predec"
+ [(set (match_operand:SI 3 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_preinc"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "index_operand" "rJ"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, %2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_predec"
+ [(set (match_operand:HI 3 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "r"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "(!BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && (GET_CODE (operands[2]) != REG
+ || REGNO (operands[2]) != FRAME_POINTER_REGNUM)"
+ "ldr%?\\t%3, [%0, -%2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*strqi_shiftpreinc"
+ [(set (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strqi_shiftpredec"
+ [(set (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:QI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:QI 5 "s_register_operand" "=r")
+ (mem:QI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?b\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*strsi_shiftpreinc"
+ [(set (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0")))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*strsi_shiftpredec"
+ [(set (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])))
+ (match_operand:SI 5 "s_register_operand" "r"))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "str%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "store1")])
+
+(define_insn "*loadqi_shiftpreinc"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadqi_shiftpredec"
+ [(set (match_operand:SI 5 "s_register_operand" "=r")
+ (mem:SI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpreinc"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (plus:SI (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")])
+ (match_operand:SI 1 "s_register_operand" "0"))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 1)))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, %3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+(define_insn "*loadhi_shiftpredec"
+ [(set (match_operand:HI 5 "s_register_operand" "=r")
+ (mem:HI (minus:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operator:SI 2 "shift_operator"
+ [(match_operand:SI 3 "s_register_operand" "r")
+ (match_operand:SI 4 "const_shift_operand" "n")]))))
+ (set (match_operand:SI 0 "s_register_operand" "=r")
+ (minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
+ (match_dup 4)])))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO (operands[0]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[1]) != FRAME_POINTER_REGNUM
+ && REGNO (operands[3]) != FRAME_POINTER_REGNUM"
+ "ldr%?\\t%5, [%0, -%3%S2]!\\t%@ loadhi"
+[(set_attr "type" "load")])
+
+; It can also support extended post-inc expressions, but combine doesn't
+; try these....
+; It doesn't seem worth adding peepholes for anything but the most common
+; cases since, unlike combine, the increment must immediately follow the load
+; for this pattern to match.
+; When loading we must watch to see that the base register isn't trampled by
+; the load. In such cases this isn't a post-inc expression.
+
+(define_peephole
+ [(set (mem:QI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?b\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:QI 0 "s_register_operand" "=r")
+ (mem:QI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?b\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r"))
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
+ ""
+ "str%?\\t%2, [%0], %1")
+
+(define_peephole
+ [(set (match_operand:HI 0 "s_register_operand" "=r")
+ (mem:HI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "(! BYTES_BIG_ENDIAN)
+ && ! TARGET_SHORT_BY_BYTES
+ && REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2\\t%@ loadhi")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mem:SI (match_operand:SI 1 "s_register_operand" "+r")))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
+ "REGNO(operands[0]) != REGNO(operands[1])
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operand:SI 0 "s_register_operand" "+r")
+ (match_operand:SI 1 "index_operand" "rJ")))
+ (match_operand:QI 2 "s_register_operand" "r"))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))]
+ ""
+ "str%?b\\t%2, [%0, %1]!")
+
+(define_peephole
+ [(set (mem:QI (plus:SI (match_operator:SI 4 "shift_operator"
+ [(match_operand:SI 0 "s_register_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "n")])
+ (match_operand:SI 2 "s_register_operand" "+r")))
+ (match_operand:QI 3 "s_register_operand" "r"))
+ (set (match_dup 2) (plus:SI (match_op_dup 4 [(match_dup 0) (match_dup 1)])
+ (match_dup 2)))]
+ ""
+ "str%?b\\t%3, [%2, %0%S4]!")
+
+; This pattern is never tried by combine, so do it as a peephole
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (reg:CC 24)
+ (compare:CC (match_dup 1) (const_int 0)))]
+ ""
+ "sub%?s\\t%0, %1, #0"
+[(set_attr "conds" "set")])
+
+; Peepholes to spot possible load- and store-multiples, if the ordering is
+; reversed, check that the memory references aren't volatile.
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 6 "memory_operand" "m"))
+ (set (match_operand:SI 3 "s_register_operand" "=r")
+ (match_operand:SI 7 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 4 "memory_operand" "m"))
+ (set (match_operand:SI 2 "s_register_operand" "=r")
+ (match_operand:SI 5 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))
+ (set (match_operand:SI 1 "s_register_operand" "=r")
+ (match_operand:SI 3 "memory_operand" "m"))]
+ "load_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_ldm_seq (operands, 2);
+")
+
+(define_peephole
+ [(set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 6 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))
+ (set (match_operand:SI 7 "memory_operand" "=m")
+ (match_operand:SI 3 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 4, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 4);
+")
+
+(define_peephole
+ [(set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 4 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))
+ (set (match_operand:SI 5 "memory_operand" "=m")
+ (match_operand:SI 2 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 3, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 3);
+")
+
+(define_peephole
+ [(set (match_operand:SI 2 "memory_operand" "=m")
+ (match_operand:SI 0 "s_register_operand" "r"))
+ (set (match_operand:SI 3 "memory_operand" "=m")
+ (match_operand:SI 1 "s_register_operand" "r"))]
+ "store_multiple_sequence (operands, 2, NULL, NULL, NULL)"
+ "*
+ return emit_stm_seq (operands, 2);
+")
+
+;; A call followed by return can be replaced by restoring the regs and
+;; jumping to the subroutine, provided we aren't passing the address of
+;; any of our local variables. If we call alloca then this is unsafe
+;; since restoring the frame frees the memory, which is not what we want.
+;; Sometimes the return might have been targeted by the final prescan:
+;; if so then emit a proper return insn as well.
+;; Unfortunately, if the frame pointer is required, we don't know if the
+;; current function has any implicit stack pointer adjustments that will
+;; be restored by the return: we can't therefore do a tail call.
+;; Another unfortunate that we can't handle is if current_function_args_size
+;; is non-zero: in this case elimination of the argument pointer assumed
+;; that lr was pushed onto the stack, so eliminating upsets the offset
+;; calculations.
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; As above but when this function is not void, we must be returning the
+;; result of the called subroutine.
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "s_register_operand" "=rf")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (use (match_dup 0))
+ (return)]
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN (FALSE)
+ && !get_frame_size () && !current_function_calls_alloca
+ && !frame_pointer_needed && !current_function_args_size)"
+ "*
+{
+ extern rtx arm_target_insn;
+ extern int arm_ccfsm_state;
+
+ if (arm_ccfsm_state && arm_target_insn && INSN_DELETED_P (arm_target_insn))
+ {
+ arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
+ output_return_instruction (NULL, TRUE, FALSE);
+ arm_ccfsm_state = 0;
+ arm_target_insn = NULL;
+ }
+
+ output_return_instruction (NULL, FALSE, FALSE);
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set_attr "length" "8")])
+
+;; CYGNUS LOCAL
+;; If calling a subroutine and then jumping back to somewhere else, but not
+;; too far away, then we can set the link register with the branch address
+;; and jump direct to the subroutine. On return from the subroutine
+;; execution continues at the branch; this avoids a prefetch stall.
+;; We use the length attribute (via short_branch ()) to establish whether or
+;; not this is possible, this is the same as the sparc does.
+
+(define_peephole
+ [(parallel[(call (mem:SI (match_operand:SI 0 "" "X"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 2 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[2]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[2]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l2 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l2)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l2 - . -4)\", operands);
+ }
+ return \"b%?\\t%a0\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+
+(define_peephole
+ [(parallel[(set (match_operand:SI 0 "s_register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "" "X"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (clobber (reg:SI 14))])
+ (set (pc)
+ (label_ref (match_operand 3 "" "")))]
+ "0 && GET_CODE (operands[0]) == SYMBOL_REF
+ && short_branch (INSN_UID (insn), INSN_UID (operands[3]))
+ && arm_insn_not_targeted (insn)"
+ "*
+{
+ int backward = arm_backwards_branch (INSN_UID (insn),
+ INSN_UID (operands[3]));
+
+#if 0
+ /* Putting this in means that TARGET_6 code will ONLY run on an arm6 or
+ * above, leaving it out means that the code will still run on an arm 2 or 3
+ */
+ if (TARGET_6)
+ {
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|pc, #(8 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|pc, #(%l3 - . -8)\", operands);
+ }
+ else
+#endif
+ {
+ output_asm_insn (\"mov%?\\t%|lr, %|pc\\t%@ protect cc\", operands);
+ if (backward)
+ output_asm_insn (\"sub%?\\t%|lr, %|lr, #(4 + . -%l3)\", operands);
+ else
+ output_asm_insn (\"add%?\\t%|lr, %|lr, #(%l3 - . -4)\", operands);
+ }
+ return \"b%?\\t%a1\";
+}"
+[(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq_attr "prog_mode" "prog32")
+ (const_int 8)
+ (const_int 12)))])
+;; END CYGNUS LOCAL
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (ge:SI (match_operand:SI 1 "s_register_operand" "")
+ (const_int 0))
+ (neg:SI (match_operator:SI 2 "comparison_operator"
+ [(match_operand:SI 3 "s_register_operand" "")
+ (match_operand:SI 4 "arm_rhs_operand" "")]))))
+ (clobber (match_operand:SI 5 "s_register_operand" ""))]
+ ""
+ [(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31))))
+ (set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
+ (match_dup 5)))]
+ "")
+
+;; This split can be used because CC_Z mode implies that the following
+;; branch will be an equality, or an unsigned inequality, so the sign
+;; extension is not needed.
+
+(define_split
+ [(set (reg:CC_Z 24)
+ (compare:CC_Z
+ (ashift:SI (subreg:SI (match_operand:QI 0 "memory_operand" "") 0)
+ (const_int 24))
+ (match_operand 1 "const_int_operand" "")))
+ (clobber (match_scratch:SI 2 ""))]
+ "((unsigned HOST_WIDE_INT) INTVAL (operands[1]))
+ == (((unsigned HOST_WIDE_INT) INTVAL (operands[1])) >> 24) << 24"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 0)))
+ (set (reg:CC 24) (compare:CC (match_dup 2) (match_dup 1)))]
+ "
+ operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24);
+")
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "
+ arm_expand_prologue ();
+ DONE;
+")
+
+;; This split is only used during output to reduce the number of patterns
+;; that need assembler instructions adding to them. We allowed the setting
+;; of the conditions to be implicit during rtl generation so that
+;; the conditional compare patterns would work. However this conflicts to
+;; some extent with the conditional data operations, so we have to split them
+;; up again here.
+
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "" "") (match_operand 3 "" "")])
+ (match_operand 4 "" "")
+ (match_operand 5 "" "")))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (match_dup 5)))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+;; CYGNUS LOCAL
+(define_split
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "s_register_operand" "")
+ (match_operand:SI 3 "arm_add_operand" "")])
+ (match_operand:SI 4 "arm_rhs_operand" "")
+ (not:SI
+ (match_operand:SI 5 "s_register_operand" ""))))
+ (clobber (reg:CC 24))]
+ "reload_completed"
+ [(set (match_dup 6) (match_dup 7))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(match_dup 6) (const_int 0)])
+ (match_dup 4)
+ (not:SI (match_dup 5))))]
+ "
+{
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
+ operands[3]);
+
+ operands[6] = gen_rtx (REG, mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+}
+")
+
+(define_insn "*cond_move_not"
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
+ (if_then_else:SI (match_operator 4 "comparison_operator"
+ [(match_operand 3 "cc_register" "") (const_int 0)])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
+ (not:SI
+ (match_operand:SI 2 "s_register_operand" "r,r"))))]
+ ""
+ "@
+ mvn%D4\\t%0, %2
+ mov%d4\\t%0, %1\;mvn%D4\\t%0, %2"
+[(set_attr "conds" "use")
+ (set_attr "length" "4,8")])
+;; END CYGNUS LOCAL
+
+;; The next two patterns occur when an AND operation is followed by a
+;; scc insn sequence
+
+(define_insn "*sign_extract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n")))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"ands\\t%0, %1, %2\", operands);
+ return \"mvnne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "8")])
+
+(define_insn "*not_signextract_onebit"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (not:SI
+ (sign_extract:SI (match_operand:SI 1 "s_register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 2 "const_int_operand" "n"))))]
+ ""
+ "*
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ output_asm_insn (\"tst\\t%1, %2\", operands);
+ output_asm_insn (\"mvneq\\t%0, #0\", operands);
+ return \"movne\\t%0, #0\";
+"
+[(set_attr "conds" "clob")
+ (set_attr "length" "12")])
+
+;; Push multiple registers to the stack. The first register is in the
+;; unspec part of the insn; subsequent registers are in parallel (use ...)
+;; expressions.
+(define_insn "*push_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+ extern int lr_save_eliminated;
+
+ if (lr_save_eliminated)
+ {
+ if (XVECLEN (operands[2], 0) > 1)
+ abort ();
+ return \"\";
+ }
+ strcpy (pattern, \"stmfd\\t%m0!, {%1\");
+ for (i = 1; i < XVECLEN (operands[2], 0); i++)
+ {
+ strcat (pattern, \", %|\");
+ strcat (pattern, reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i),
+ 0))]);
+ }
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "store4")])
+
+;; Similarly for the floating point registers
+(define_insn "*push_fp_multi"
+ [(match_parallel 2 "multi_register_push"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (unspec:BLK [(match_operand:XF 1 "f_register_operand" "f")] 2))])]
+ ""
+ "*
+{
+ char pattern[100];
+ int i;
+
+ sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
+ output_asm_insn (pattern, operands);
+ return \"\";
+}"
+[(set_attr "type" "f_store")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/coff.h b/gcc_arm/config/arm/coff.h
new file mode 100755
index 0000000..13703ca
--- /dev/null
+++ b/gcc_arm/config/arm/coff.h
@@ -0,0 +1,211 @@
+/* Definitions of target machine for GNU compiler,
+ for ARM with COFF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/semi.h"
+#include "arm/aout.h"
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/coff)", stderr)
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
+
+/* CYGNUS LOCAL nickc/interworking */
+#define MULTILIB_DEFAULTS { "mlittle-endian", "msoft-float", "mapcs-32", "mno-thumb-interwork" }
+/* END CYGNUS LOCAL nickc */
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+extern int arm_valid_machine_decl_attribute ();
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* This is COFF, but prefer stabs. */
+#define SDB_DEBUGGING_INFO
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+/* Define this to NULL so we don't get anything.
+ We have ASM_IDENTIFY_LANGUAGE.
+ Also, when using stabs, gcc2_compiled must be a stabs entry, not an
+ ordinary symbol, or gdb won't see it. The stabs entry must be
+ before the N_SO in order for gdb to find it. */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+/* This outputs a lot of .req's to define alias for various registers.
+ Let's try to avoid this. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for ARM/coff\n", \
+ ASM_COMMENT_START, version_string); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rdata"
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+
+/* If you don't define HAVE_ATEXIT, and the object file format/OS/whatever
+ does not support constructors/destructors, then gcc implements destructors
+ by defining its own exit function, which calls the destructors. This gcc
+ exit function overrides the C library's exit function, and this can cause
+ all kinds of havoc if the C library has a non-trivial exit function. You
+ really don't want to use the exit function in libgcc2.c. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc_arm/config/arm/ecos-elf.h b/gcc_arm/config/arm/ecos-elf.h
new file mode 100755
index 0000000..9fdc64a
--- /dev/null
+++ b/gcc_arm/config/arm/ecos-elf.h
@@ -0,0 +1,29 @@
+/* Definitions for ecos based ARM systems using ELF
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM/ELF Ecos)", stderr);
+
+#define HAS_INIT_SECTION
+
+#include "unknown-elf.h"
+
+#undef INVOKE_main
+
diff --git a/gcc_arm/config/arm/elf.h b/gcc_arm/config/arm/elf.h
new file mode 100755
index 0000000..c78b68a
--- /dev/null
+++ b/gcc_arm/config/arm/elf.h
@@ -0,0 +1,374 @@
+/* Definitions of target machine for GNU compiler,
+ for ARM with ELF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org> and
+ Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#define OBJECT_FORMAT_ELF
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+#endif
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Darm_elf -Acpu(arm) -Amachine(arm) -D__ELF__"
+#endif
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+#define TYPE_OPERAND_FMT "%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ } while (0)
+
+/* This is how to declare the size of a function. */
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno ++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } while (0)
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "%{mbig-endian:-EB} %{mcpu=*:-m%*} %{march=*:-m%*} \
+ %{mapcs-*:-mapcs-%*} %{mthumb-interwork:-mthumb-interwork}"
+#endif
+
+#ifndef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+#endif
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/elf)", stderr)
+#endif
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mlittle-endian", "msoft-float", "mapcs-32", "mno-thumb-interwork", "fno-leading-underscore" }
+#endif
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+extern int arm_valid_machine_decl_attribute ();
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+/* Define this to NULL so we don't get anything.
+ We have ASM_IDENTIFY_LANGUAGE.
+ Also, when using stabs, gcc2_compiled must be a stabs entry, not an
+ ordinary symbol, or gdb won't see it. The stabs entry must be
+ before the N_SO in order for gdb to find it. */
+#ifndef ASM_IDENTIFY_GCC
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+#endif
+
+/* This outputs a lot of .req's to define alias for various registers.
+ Let's try to avoid this. */
+#ifndef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for ARM/elf\n", \
+ ASM_COMMENT_START, version_string); \
+} while (0)
+#endif
+
+/* Output an internal label definition. */
+#ifndef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM, PREFIX, NUM) \
+ do \
+ { \
+ char *s = (char *) alloca (40 + strlen (PREFIX)); \
+ extern int arm_target_label, arm_ccfsm_state; \
+ extern rtx arm_target_insn; \
+ \
+ if (arm_ccfsm_state == 3 && arm_target_label == (NUM) \
+ && !strcmp (PREFIX, "L")) \
+ { \
+ arm_ccfsm_state = 0; \
+ arm_target_insn = NULL; \
+ } \
+ ASM_GENERATE_INTERNAL_LABEL (s, (PREFIX), (NUM)); \
+ /* CYGNUS LOCAL nickc */ \
+ arm_asm_output_label (STREAM, s); \
+ /* END CYGNUS LOCAL */ \
+ } while (0)
+#endif
+
+/* Support the ctors/dtors and other sections. */
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+
+ Note that we want to give these sections the SHF_WRITE attribute
+ because these sections will actually contain data (i.e. tables of
+ addresses of functions in the current root executable or shared library
+ file) and, in the case of a shared library, the relocatable addresses
+ will have to be properly resolved/relocated (and then written into) by
+ the dynamic linker when it actually attaches the given shared library
+ to the executing process. (Note that on SVR4, you may wish to use the
+ `-z text' option to the ELF linker, when building a shared library, as
+ an additional check that you are doing everything right. But if you do
+ use the `-z text' option when building a shared library, you will get
+ errors unless the .ctors and .dtors sections are marked as writable
+ via the SHF_WRITE attribute.) */
+#ifndef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section\t.ctors,\"aw\""
+#endif
+
+#ifndef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section\t.dtors,\"aw\""
+#endif
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#ifndef SUBTARGET_EXTRA_SECTIONS
+#define SUBTARGET_EXTRA_SECTIONS
+#endif
+
+#ifndef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_ctors, in_dtors
+#endif
+
+/* A list of extra section function definitions. */
+#ifndef SUBTARGET_EXTRA_SECTION_FUNCTIONS
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+#endif
+
+#ifndef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+#endif
+
+#ifndef CTORS_SECTION_FUNCTION
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+#endif
+
+#ifndef DTORS_SECTION_FUNCTION
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+#endif
+
+/* Support the ctors/dtors sections for g++. */
+#ifndef INT_ASM_OP
+#define INT_ASM_OP ".word"
+#endif
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#ifndef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+#endif
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#ifndef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+#endif
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+#include "arm/aout.h"
+
+#define ASM_OUTPUT_UNIQUE_BSS(file, decl, name, size) \
+ { \
+ int len = strlen (name) + 5; \
+ char * string; \
+ \
+ string = alloca (len + 1); \
+ sprintf (string, ".bss.%s", name); \
+ \
+ named_section (NULL, string, 0); \
+ \
+ ASM_GLOBALIZE_LABEL (file, name); \
+ \
+ ASM_OUTPUT_ALIGN (file, \
+ floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT)); \
+ \
+ last_assemble_variable_decl = decl; \
+ ASM_DECLARE_OBJECT_NAME (file, name, decl); \
+ ASM_OUTPUT_SKIP (file, size ? size : 1); \
+ }
+
+#define ASM_OUTPUT_UNIQUE_LOCAL(file, decl, name, size) \
+ do \
+ { \
+ int len = strlen (name) + 5; \
+ char * string; \
+ \
+ string = alloca (len + 1); \
+ sprintf (string, ".bss.%s", name); \
+ \
+ named_section (NULL, string, 0); \
+ \
+ ASM_OUTPUT_ALIGN (file, \
+ floor_log2 (DECL_ALIGN (decl) / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (file, name); \
+ fprintf (file, "\t.space\t%d\n", size); \
+ } \
+ while (0)
diff --git a/gcc_arm/config/arm/lib1funcs.asm b/gcc_arm/config/arm/lib1funcs.asm
new file mode 100755
index 0000000..2b1ac8c
--- /dev/null
+++ b/gcc_arm/config/arm/lib1funcs.asm
@@ -0,0 +1,580 @@
+@ libgcc1 routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#ifdef __APCS_26__
+#define RET movs
+#define RETc(x) mov##x##s
+#define RETCOND ^
+#else
+#define RET mov
+#define RETc(x) mov##x
+#define RETCOND
+#endif
+
+#ifndef __USER_LABEL_PREFIX__
+#error __USER_LABEL_PREFIX__ not defined
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+#ifdef __elf__
+#define __PLT__ (PLT)
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#else
+#define __PLT__
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+#ifdef L_udivsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__udivsi3)
+ TYPE (__udivsi3)
+ .align 0
+
+SYM (__udivsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ mov result, #0
+ cmp dividend, divisor
+ bcc Lgot_result
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, #0x10000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #4
+ movcc curbit, curbit, lsl #4
+ bcc Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, #0x80000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #1
+ movcc curbit, curbit, lsl #1
+ bcc Lbignum
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ subcs dividend, dividend, divisor
+ orrcs result, result, curbit
+ cmp dividend, divisor, lsr #1
+ subcs dividend, dividend, divisor, lsr #1
+ orrcs result, result, curbit, lsr #1
+ cmp dividend, divisor, lsr #2
+ subcs dividend, dividend, divisor, lsr #2
+ orrcs result, result, curbit, lsr #2
+ cmp dividend, divisor, lsr #3
+ subcs dividend, dividend, divisor, lsr #3
+ orrcs result, result, curbit, lsr #3
+ cmp dividend, #0 @ Early termination?
+ movnes curbit, curbit, lsr #4 @ No, any more bits to do?
+ movne divisor, divisor, lsr #4
+ bne Loop3
+Lgot_result:
+ mov r0, result
+ RET pc, lr
+
+Ldiv0:
+ str lr, [sp, #-4]!
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ ldmia sp!, {pc}RETCOND
+
+ SIZE (__udivsi3)
+
+#endif /* L_udivsi3 */
+
+#ifdef L_umodsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__umodsi3)
+ TYPE (__umodsi3)
+ .align 0
+
+SYM (__umodsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ cmp dividend, divisor
+ RETc(cc) pc, lr
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, #0x10000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #4
+ movcc curbit, curbit, lsl #4
+ bcc Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, #0x80000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #1
+ movcc curbit, curbit, lsl #1
+ bcc Lbignum
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ subcs dividend, dividend, divisor
+ cmp dividend, divisor, lsr #1
+ subcs dividend, dividend, divisor, lsr #1
+ orrcs overdone, overdone, curbit, ror #1
+ cmp dividend, divisor, lsr #2
+ subcs dividend, dividend, divisor, lsr #2
+ orrcs overdone, overdone, curbit, ror #2
+ cmp dividend, divisor, lsr #3
+ subcs dividend, dividend, divisor, lsr #3
+ orrcs overdone, overdone, curbit, ror #3
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ movnes curbit, curbit, lsr #4 @ No, any more bits to do?
+ movne divisor, divisor, lsr #4
+ bne Loop3
+
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+ ands overdone, overdone, #0xe0000000
+ RETc(eq) pc, lr @ No fixups needed
+ tst overdone, ip, ror #3
+ addne dividend, dividend, divisor, lsr #3
+ tst overdone, ip, ror #2
+ addne dividend, dividend, divisor, lsr #2
+ tst overdone, ip, ror #1
+ addne dividend, dividend, divisor, lsr #1
+ RET pc, lr
+
+Ldiv0:
+ str lr, [sp, #-4]!
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ ldmia sp!, {pc}RETCOND
+
+ SIZE (__umodsi3)
+
+#endif /* L_umodsi3 */
+
+#ifdef L_divsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__divsi3)
+ TYPE (__divsi3)
+ .align 0
+
+SYM (__divsi3):
+ eor ip, dividend, divisor @ Save the sign of the result.
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ rsbmi divisor, divisor, #0 @ Loops below use unsigned.
+ beq Ldiv0
+ cmp dividend, #0
+ rsbmi dividend, dividend, #0
+ cmp dividend, divisor
+ bcc Lgot_result
+
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, #0x10000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #4
+ movcc curbit, curbit, lsl #4
+ bcc Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, #0x80000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #1
+ movcc curbit, curbit, lsl #1
+ bcc Lbignum
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ subcs dividend, dividend, divisor
+ orrcs result, result, curbit
+ cmp dividend, divisor, lsr #1
+ subcs dividend, dividend, divisor, lsr #1
+ orrcs result, result, curbit, lsr #1
+ cmp dividend, divisor, lsr #2
+ subcs dividend, dividend, divisor, lsr #2
+ orrcs result, result, curbit, lsr #2
+ cmp dividend, divisor, lsr #3
+ subcs dividend, dividend, divisor, lsr #3
+ orrcs result, result, curbit, lsr #3
+ cmp dividend, #0 @ Early termination?
+ movnes curbit, curbit, lsr #4 @ No, any more bits to do?
+ movne divisor, divisor, lsr #4
+ bne Loop3
+Lgot_result:
+ mov r0, result
+ cmp ip, #0
+ rsbmi r0, r0, #0
+ RET pc, lr
+
+Ldiv0:
+ str lr, [sp, #-4]!
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ ldmia sp!, {pc}RETCOND
+
+ SIZE (__divsi3)
+
+#endif /* L_divsi3 */
+
+#ifdef L_modsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__modsi3)
+ TYPE (__modsi3)
+ .align 0
+
+SYM (__modsi3):
+ mov curbit, #1
+ cmp divisor, #0
+ rsbmi divisor, divisor, #0 @ Loops below use unsigned.
+ beq Ldiv0
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ ip later on; this is faster than pushing lr and using that.
+ str dividend, [sp, #-4]!
+ cmp dividend, #0
+ rsbmi dividend, dividend, #0
+ cmp dividend, divisor
+ bcc Lgot_result
+
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, #0x10000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #4
+ movcc curbit, curbit, lsl #4
+ bcc Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, #0x80000000
+ cmpcc divisor, dividend
+ movcc divisor, divisor, lsl #1
+ movcc curbit, curbit, lsl #1
+ bcc Lbignum
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ subcs dividend, dividend, divisor
+ cmp dividend, divisor, lsr #1
+ subcs dividend, dividend, divisor, lsr #1
+ orrcs overdone, overdone, curbit, ror #1
+ cmp dividend, divisor, lsr #2
+ subcs dividend, dividend, divisor, lsr #2
+ orrcs overdone, overdone, curbit, ror #2
+ cmp dividend, divisor, lsr #3
+ subcs dividend, dividend, divisor, lsr #3
+ orrcs overdone, overdone, curbit, ror #3
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ movnes curbit, curbit, lsr #4 @ No, any more bits to do?
+ movne divisor, divisor, lsr #4
+ bne Loop3
+
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+ ands overdone, overdone, #0xe0000000
+ beq Lgot_result
+ tst overdone, ip, ror #3
+ addne dividend, dividend, divisor, lsr #3
+ tst overdone, ip, ror #2
+ addne dividend, dividend, divisor, lsr #2
+ tst overdone, ip, ror #1
+ addne dividend, dividend, divisor, lsr #1
+Lgot_result:
+ ldr ip, [sp], #4
+ cmp ip, #0
+ rsbmi dividend, dividend, #0
+ RET pc, lr
+
+Ldiv0:
+ str lr, [sp, #-4]!
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ ldmia sp!, {pc}RETCOND
+
+ SIZE (__modsi3)
+
+#endif /* L_modsi3 */
+
+#ifdef L_dvmd_tls
+
+ .globl SYM (__div0)
+ TYPE (__div0)
+ .align 0
+SYM (__div0):
+ RET pc, lr
+
+ SIZE (__div0)
+
+#endif /* L_divmodsi_tools */
+
+#ifdef L_dvmd_lnx
+@ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
+
+#include <asm/unistd.h>
+
+#define SIGFPE 8 @ cant use <asm/signal.h> as it
+ @ contains too much C rubbish
+ .globl SYM (__div0)
+ TYPE (__div0)
+ .align 0
+SYM (__div0):
+ stmfd sp!, {r1, lr}
+ swi __NR_getpid
+ cmn r0, #1000
+ ldmhsfd sp!, {r1, pc}RETCOND @ not much we can do
+ mov r1, #SIGFPE
+ swi __NR_kill
+ ldmfd sp!, {r1, pc}RETCOND
+
+ SIZE (__div0)
+
+#endif /* L_dvmd_lnx */
+
+/* These next two sections are here despite the fact that they contain Thumb
+ assembler because their presence allows interworked code to be linked even
+ when the GCC library is this one. */
+
+#ifdef L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+ .force_thumb
+.macro call_via register
+ .globl SYM (_call_via_\register)
+ TYPE (_call_via_\register)
+ .thumb_func
+SYM (_call_via_\register):
+ bx \register
+ nop
+
+ SIZE (_call_via_\register)
+.endm
+
+ call_via r0
+ call_via r1
+ call_via r2
+ call_via r3
+ call_via r4
+ call_via r5
+ call_via r6
+ call_via r7
+ call_via r8
+ call_via r9
+ call_via sl
+ call_via fp
+ call_via ip
+ call_via sp
+ call_via lr
+
+#endif /* L_call_via_rX */
+
+#ifdef L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+ .code 16
+
+.macro interwork register
+ .code 16
+ .globl SYM (_interwork_call_via_\register)
+ TYPE (_interwork_call_via_\register)
+ .thumb_func
+SYM (_interwork_call_via_\register):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_\register
+.Lchange_\register:
+ tst \register, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The lr case has to be handled a little differently...*/
+ .code 16
+ .globl SYM (_interwork_call_via_lr)
+ TYPE (_interwork_call_via_lr)
+ .thumb_func
+SYM (_interwork_call_via_lr):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
diff --git a/gcc_arm/config/arm/lib1thumb.asm b/gcc_arm/config/arm/lib1thumb.asm
new file mode 100755
index 0000000..1789356
--- /dev/null
+++ b/gcc_arm/config/arm/lib1thumb.asm
@@ -0,0 +1,572 @@
+@ libgcc1 routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+ .code 16
+
+#ifndef __USER_LABEL_PREFIX__
+#error __USER_LABEL_PREFIX__ not defined
+#endif
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+#define __PLT__
+
+#ifdef __ELF__
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#else
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+/* Function end macros. Variants for interworking. */
+
+# define __THUMB_INTERWORK__
+# ifdef __THUMB_INTERWORK__
+# define RET bx lr
+# define RETc(x) bx##x lr
+.macro THUMB_LDIV0
+.Ldiv0:
+ push { lr }
+ bl SYM (__div0)
+ mov r0, #0 @ About as wrong as it could be.
+ pop { r1 }
+ bx r1
+.endm
+# else
+# define RET mov pc, lr
+# define RETc(x) mov##x pc, lr
+.macro THUMB_LDIV0
+.Ldiv0:
+ push { lr }
+ bl SYM (__div0)
+ mov r0, #0 @ About as wrong as it could be.
+ pop { pc }
+.endm
+# endif
+# define RETCOND
+
+.macro FUNC_END name
+.Ldiv0:
+ THUMB_LDIV0
+ SIZE (__\name)
+.endm
+
+.macro THUMB_FUNC_START name
+ .globl SYM (\name)
+ TYPE (\name)
+ .thumb_func
+SYM (\name):
+.endm
+
+/* Function start macros. */
+
+#define THUMB_FUNC .thumb_func
+#define THUMB_CODE .force_thumb
+
+.macro FUNC_START name
+ .text
+ .globl SYM (__\name)
+ TYPE (__\name)
+ .align 0
+ THUMB_CODE
+ THUMB_FUNC
+SYM (__\name):
+.endm
+
+/* Register aliases. */
+
+work .req r4 @ XXXX is this safe ?
+dividend .req r0
+divisor .req r1
+overdone .req r2
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+/* ------------------------------------------------------------------------ */
+/* Bodies of the divsion and modulo routines. */
+/* ------------------------------------------------------------------------ */
+.macro THUMB_DIV_MOD_BODY modulo
+ @ Load the constant 0x10000000 into our work register.
+ mov work, #1
+ lsl work, #28
+.Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bhs .Lbignum
+ cmp divisor, dividend
+ bhs .Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b .Loop1
+.Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+.Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bhs .Loop3
+ cmp divisor, dividend
+ bhs .Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b .Loop2
+.Loop3:
+ @ Test for possible subtractions ...
+ .if \modulo
+ @ ... On the final pass, this may subtract too much from the dividend,
+ @ so keep track of which subtractions are done, we can fix them up
+ @ afterwards.
+ mov overdone, #0
+ cmp dividend, divisor
+ blo .Lover1
+ sub dividend, dividend, divisor
+.Lover1:
+ lsr work, divisor, #1
+ cmp dividend, work
+ blo .Lover2
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+.Lover2:
+ lsr work, divisor, #2
+ cmp dividend, work
+ blo .Lover3
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+.Lover3:
+ lsr work, divisor, #3
+ cmp dividend, work
+ blo .Lover4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+.Lover4:
+ mov ip, curbit
+ .else
+ @ ... and note which bits are done in the result. On the final pass,
+ @ this may subtract too much from the dividend, but the result will be ok,
+ @ since the "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ blo .Lover1
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+.Lover1:
+ lsr work, divisor, #1
+ cmp dividend, work
+ blo .Lover2
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+.Lover2:
+ lsr work, divisor, #2
+ cmp dividend, work
+ blo .Lover3
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, work
+.Lover3:
+ lsr work, divisor, #3
+ cmp dividend, work
+ blo .Lover4
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, work
+.Lover4:
+ .endif
+
+ cmp dividend, #0 @ Early termination?
+ beq .Lover5
+ lsr curbit, #4 @ No, any more bits to do?
+ beq .Lover5
+ lsr divisor, #4
+ b .Loop3
+.Lover5:
+ .if \modulo
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ beq .Lgot_result
+
+ @ If we terminated early, because dividend became zero, then the
+ @ bit in ip will not be in the bottom nibble, and we should not
+ @ perform the additions below. We must test for this though
+ @ (rather relying upon the TSTs to prevent the additions) since
+ @ the bit in ip could be in the top two bits which might then match
+ @ with one of the smaller RORs.
+ mov curbit, ip
+ mov work, #0x7
+ tst curbit, work
+ beq .Lgot_result
+
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq .Lover6
+ lsr work, divisor, #3
+ add dividend, work
+.Lover6:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq .Lover7
+ lsr work, divisor, #2
+ add dividend, work
+.Lover7:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq .Lgot_result
+ lsr work, divisor, #1
+ add dividend, work
+ .endif
+.Lgot_result:
+.endm
+/* ------------------------------------------------------------------------ */
+/* Start of the Real Functions */
+/* ------------------------------------------------------------------------ */
+#ifdef L_udivsi3
+
+ FUNC_START udivsi3
+
+ cmp divisor, #0
+ beq .Ldiv0
+ mov curbit, #1
+ mov result, #0
+
+ push { work }
+ cmp dividend, divisor
+ blo .Lgot_result
+
+ THUMB_DIV_MOD_BODY 0
+
+ mov r0, result
+ pop { work }
+ RET
+
+ FUNC_END udivsi3
+
+#endif /* L_udivsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_umodsi3
+
+ FUNC_START umodsi3
+
+ cmp divisor, #0
+ beq .Ldiv0
+ mov curbit, #1
+ cmp dividend, divisor
+ bhs ..Lover10
+ RET
+
+..Lover10:
+ push { work }
+
+ THUMB_DIV_MOD_BODY 1
+
+ pop { work }
+ RET
+
+ FUNC_END umodsi3
+
+#endif /* L_umodsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_divsi3
+
+ FUNC_START divsi3
+
+ cmp divisor, #0
+ beq .Ldiv0
+
+ push { work }
+ mov work, dividend
+ eor work, divisor @ Save the sign of the result.
+ mov ip, work
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ bpl .Lover10
+ neg divisor, divisor @ Loops below use unsigned.
+.Lover10:
+ cmp dividend, #0
+ bpl .Lover11
+ neg dividend, dividend
+.Lover11:
+ cmp dividend, divisor
+ blo .Lgot_result
+
+ THUMB_DIV_MOD_BODY 0
+
+ mov r0, result
+ mov work, ip
+ cmp work, #0
+ bpl .Lover12
+ neg r0, r0
+.Lover12:
+ pop { work }
+ RET
+
+ FUNC_END divsi3
+
+#endif /* L_divsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_modsi3
+
+ FUNC_START modsi3
+
+ mov curbit, #1
+ cmp divisor, #0
+ beq .Ldiv0
+ bpl .Lover10
+ neg divisor, divisor @ Loops below use unsigned.
+
+.Lover10:
+ push { work }
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ work later on. Must do this after saving the original value of
+ @ the work register, because we will pop this value off first.
+ push { dividend }
+ cmp dividend, #0
+ bpl .Lover11
+ neg dividend, dividend
+.Lover11:
+ cmp dividend, divisor
+ blo .Lgot_result
+
+ THUMB_DIV_MOD_BODY 1
+
+ pop { work }
+ cmp work, #0
+ bpl .Lover12
+ neg dividend, dividend
+.Lover12:
+ pop { work }
+ RET
+
+ FUNC_END modsi3
+
+#endif /* L_modsi3 */
+/* ------------------------------------------------------------------------ */
+#ifdef L_dvmd_tls
+
+ FUNC_START div0
+
+ RET
+
+ SIZE (__div0)
+
+#endif /* L_divmodsi_tools */
+/* ------------------------------------------------------------------------ */
+#ifdef L_dvmd_lnx
+@ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls
+
+#include <asm/unistd.h>
+
+#define SIGFPE 8 @ cant use <asm/signal.h> as it
+ @ contains too much C rubbish
+ FUNC_START div0
+
+ stmfd sp!, {r1, lr}
+ swi __NR_getpid
+ cmn r0, #1000
+ ldmhsfd sp!, {r1, pc}RETCOND @ not much we can do
+ mov r1, #SIGFPE
+ swi __NR_kill
+#ifdef __THUMB_INTERWORK__
+ ldmfd sp!, {r1, lr}
+ bx lr
+#else
+ ldmfd sp!, {r1, pc}RETCOND
+#endif
+
+ SIZE (__div0)
+
+#endif /* L_dvmd_lnx */
+/* ------------------------------------------------------------------------ */
+/* These next two sections are here despite the fact that they contain Thumb
+ assembler because their presence allows interworked code to be linked even
+ when the GCC library is this one. */
+
+/* Do not build the interworking functions when the target architecture does
+ not support Thumb instructions. (This can be a multilib option). */
+#if defined L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+ .force_thumb
+
+.macro call_via register
+ THUMB_FUNC_START _call_via_\register
+
+ bx \register
+ nop
+
+ SIZE (_call_via_\register)
+.endm
+
+ call_via r0
+ call_via r1
+ call_via r2
+ call_via r3
+ call_via r4
+ call_via r5
+ call_via r6
+ call_via r7
+ call_via r8
+ call_via r9
+ call_via sl
+ call_via fp
+ call_via ip
+ call_via sp
+ call_via lr
+
+#endif /* L_call_via_rX */
+/* ------------------------------------------------------------------------ */
+/* Do not build the interworking functions when the target architecture does
+ not support Thumb instructions. (This can be a multilib option). */
+#if defined L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+ .code 16
+
+.macro interwork register
+ .code 16
+
+ THUMB_FUNC_START _interwork_call_via_\register
+
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_\register
+.Lchange_\register:
+ tst \register, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The LR case has to be handled a little differently... */
+ .code 16
+
+ THUMB_FUNC_START _interwork_call_via_lr
+
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
diff --git a/gcc_arm/config/arm/lib1thumb_981111.asm b/gcc_arm/config/arm/lib1thumb_981111.asm
new file mode 100755
index 0000000..dcabcf4
--- /dev/null
+++ b/gcc_arm/config/arm/lib1thumb_981111.asm
@@ -0,0 +1,747 @@
+@ libgcc1 routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+ .code 16
+
+#ifndef __USER_LABEL_PREFIX__
+#error __USER_LABEL_PREFIX__ not defined
+#endif
+
+#ifdef __elf__
+#define __PLT__ (PLT)
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#else
+#define __PLT__
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+#define RET mov pc, lr
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+work .req r4 @ XXXX is this safe ?
+
+#ifdef L_udivsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__udivsi3)
+ TYPE (__udivsi3)
+ .align 0
+ .thumb_func
+SYM (__udivsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ mov result, #0
+
+ push { work }
+ cmp dividend, divisor
+ bcc Lgot_result
+
+ @ Load the constant 0x10000000 into our work register
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ bcc Over1
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+Over1:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over2
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+Over2:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over3
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, work
+Over3:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, work
+Over4:
+ cmp dividend, #0 @ Early termination?
+ beq Lgot_result
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Lgot_result
+ lsr divisor, #4
+ b Loop3
+Lgot_result:
+ mov r0, result
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__udivsi3)
+
+#endif /* L_udivsi3 */
+
+#ifdef L_umodsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__umodsi3)
+ TYPE (__umodsi3)
+ .align 0
+ .thumb_func
+SYM (__umodsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ cmp dividend, divisor
+ bcs Over1
+ RET
+
+Over1:
+ @ Load the constant 0x10000000 into our work register
+ push { work }
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ bcc Over2
+ sub dividend, dividend, divisor
+Over2:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over3
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over3:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over4:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over5
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over5:
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ beq Over6
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Over6
+ lsr divisor, #4
+ b Loop3
+
+Over6:
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ bne Over7
+ pop { work }
+ RET @ No fixups needed
+Over7:
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq Over8
+ lsr work, divisor, #3
+ add dividend, dividend, work
+Over8:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq Over9
+ lsr work, divisor, #2
+ add dividend, dividend, work
+Over9:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq Over10
+ lsr work, divisor, #1
+ add dividend, dividend, work
+Over10:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__umodsi3)
+
+#endif /* L_umodsi3 */
+
+#ifdef L_divsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__divsi3)
+ TYPE (__divsi3)
+ .align 0
+ .thumb_func
+SYM (__divsi3):
+ cmp divisor, #0
+ beq Ldiv0
+
+ push { work }
+ mov work, dividend
+ eor work, divisor @ Save the sign of the result.
+ mov ip, work
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ bpl Over1
+ neg divisor, divisor @ Loops below use unsigned.
+Over1:
+ cmp dividend, #0
+ bpl Over2
+ neg dividend, dividend
+Over2:
+ cmp dividend, divisor
+ bcc Lgot_result
+
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ Bcs Lbignum
+ cmp divisor, dividend
+ Bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ lsl work, #3
+Loop2:
+ cmp divisor, work
+ Bcs Loop3
+ cmp divisor, dividend
+ Bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ Bcc Over3
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+Over3:
+ lsr work, divisor, #1
+ cmp dividend, work
+ Bcc Over4
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+Over4:
+ lsr work, divisor, #2
+ cmp dividend, work
+ Bcc Over5
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, result, work
+Over5:
+ lsr work, divisor, #3
+ cmp dividend, work
+ Bcc Over6
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, result, work
+Over6:
+ cmp dividend, #0 @ Early termination?
+ Beq Lgot_result
+ lsr curbit, #4 @ No, any more bits to do?
+ Beq Lgot_result
+ lsr divisor, #4
+ b Loop3
+
+Lgot_result:
+ mov r0, result
+ mov work, ip
+ cmp work, #0
+ Bpl Over7
+ neg r0, r0
+Over7:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__divsi3)
+
+#endif /* L_divsi3 */
+
+#ifdef L_modsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__modsi3)
+ TYPE (__modsi3)
+ .align 0
+ .thumb_func
+SYM (__modsi3):
+ mov curbit, #1
+ cmp divisor, #0
+ beq Ldiv0
+ Bpl Over1
+ neg divisor, divisor @ Loops below use unsigned.
+Over1:
+ push { work }
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ ip later on. Must do this after saving the original value of
+ @ the work register, because we will pop this value off first.
+ push { dividend }
+ cmp dividend, #0
+ Bpl Over2
+ neg dividend, dividend
+Over2:
+ cmp dividend, divisor
+ bcc Lgot_result
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ bcc Over3
+ sub dividend, dividend, divisor
+Over3:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over4:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over5
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over5:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over6
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over6:
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ beq Over7
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Over7
+ lsr divisor, #4
+ b Loop3
+
+Over7:
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ beq Lgot_result
+
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq Over8
+ lsr work, divisor, #3
+ add dividend, dividend, work
+Over8:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq Over9
+ lsr work, divisor, #2
+ add dividend, dividend, work
+Over9:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq Lgot_result
+ lsr work, divisor, #1
+ add dividend, dividend, work
+Lgot_result:
+ pop { work }
+ cmp work, #0
+ bpl Over10
+ neg dividend, dividend
+Over10:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__modsi3)
+
+#endif /* L_modsi3 */
+
+#ifdef L_dvmd_tls
+
+ .globl SYM (__div0)
+ TYPE (__div0)
+ .align 0
+ .thumb_func
+SYM (__div0):
+ RET
+
+ SIZE (__div0)
+
+#endif /* L_divmodsi_tools */
+
+
+#ifdef L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+
+.macro call_via register
+ .globl SYM (_call_via_\register)
+ TYPE (_call_via_\register)
+ .thumb_func
+SYM (_call_via_\register):
+ bx \register
+ nop
+
+ SIZE (_call_via_\register)
+.endm
+
+ call_via r0
+ call_via r1
+ call_via r2
+ call_via r3
+ call_via r4
+ call_via r5
+ call_via r6
+ call_via r7
+ call_via r8
+ call_via r9
+ call_via sl
+ call_via fp
+ call_via ip
+ call_via sp
+ call_via lr
+
+#endif /* L_call_via_rX */
+
+#ifdef L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+
+.macro interwork register
+ .code 16
+
+ .globl SYM (_interwork_call_via_\register)
+ TYPE (_interwork_call_via_\register)
+ .thumb_func
+SYM (_interwork_call_via_\register):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_\register
+.Lchange_\register:
+ tst \register, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The lr case has to be handled a little differently...*/
+ .code 16
+ .globl SYM (_interwork_call_via_lr)
+ TYPE (_interwork_call_via_lr)
+ .thumb_func
+SYM (_interwork_call_via_lr):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
diff --git a/gcc_arm/config/arm/linux-aout.h b/gcc_arm/config/arm/linux-aout.h
new file mode 100755
index 0000000..3a853bd
--- /dev/null
+++ b/gcc_arm/config/arm/linux-aout.h
@@ -0,0 +1,58 @@
+/* Definitions for ARM running Linux-based GNU systems using a.out.
+ Copyright (C) 1993, 1994, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Russell King <rmk92@ecs.soton.ac.uk>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <linux-aout.h>
+
+/* these are different... */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+"%{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}} %{static:-static}"
+
+#undef ASM_APP_ON
+#undef ASM_APP_OFF
+#undef COMMENT_BEGIN
+
+/* We default to ARM3. */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm3
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dunix -Darm -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(arm) -Amachine(arm)"
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} %{!ggdb:-lc} %{ggdb:-lg}"
+
+#define HANDLE_SYSV_PRAGMA
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM GNU/Linux with a.out)", stderr);
+
+/*
+ * Maths operation domain error number, EDOM
+ * We don't really want this for libc6. However, taking it out would be
+ * too much of a pain for now and it doesn't hurt much.
+ */
+#define TARGET_EDOM 33
+
+#include "arm/aout.h"
+
+#include "arm/linux-gas.h"
diff --git a/gcc_arm/config/arm/linux-elf.h b/gcc_arm/config/arm/linux-elf.h
new file mode 100755
index 0000000..d906093
--- /dev/null
+++ b/gcc_arm/config/arm/linux-elf.h
@@ -0,0 +1,204 @@
+/* Definitions for ARM running Linux-based GNU systems using ELF
+ Copyright (C) 1993, 1994, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM GNU/Linux with ELF)", stderr);
+
+/* We have libgcc2. */
+#define HAVE_ATEXIT
+
+/* Default is to use APCS-32 mode. */
+#ifndef SUBTARGET_DEFAULT_APCS26
+#define TARGET_DEFAULT (ARM_FLAG_APCS_32 | ARM_FLAG_SHORT_BYTE)
+#define SUBTARGET_EXTRA_LINK_SPEC \
+ " %{mapcs-26:-m elf32arm26} %{!mapcs-26:-m elf32arm}"
+#define SUBTARGET_EXTRA_ASM_SPEC \
+ " %{mapcs-26:-mapcs-26} %(!mapcs-26:-mapcs-32}"
+#endif
+
+/* Now we define the strings used to build the spec file. */
+#define LIB_SPEC "%{!shared:%{!symbolic:-lc}}"
+
+/* Add the compiler's crtend, and the library's crtn. */
+#define ENDFILE_SPEC "%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
+ %{pg:gcrtn.o%s}%{!pg:crtn.o%s}"
+
+#define STARTFILE_SPEC "%{!shared:crt1.o%s} \
+ crti.o%s \
+ %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+#define LINK_SPEC "%{h*} %{version:-v} \
+ %{b} %{Wl,*:%*} \
+ %{static:-Bstatic} \
+ %{shared:-shared} \
+ %{symbolic:-Bsymbolic} \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.2} \
+ -X \
+ %{mbig-endian:-EB}" \
+ SUBTARGET_EXTRA_LINK_SPEC
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dunix -Darm -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(arm) \
+-Amachine(arm) -D__ELF__ -Darm_elf"
+
+#ifndef SUBTARGET_DEFAULT_APCS26
+#undef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+#endif
+
+/* Allow #sccs in preprocessor. */
+#define SCCS_DIRECTIVE
+
+#define USER_LABEL_PREFIX "" /* For ELF the default is no underscores */
+#define LOCAL_LABEL_PREFIX "."
+
+/* Attach a special .ident directive to the end of the file to identify
+ the version of GCC which compiled this code. */
+#define IDENT_ASM_OP ".ident"
+
+/* Output #ident as a .ident. */
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "\t%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
+
+#ifdef IDENTIFY_WITH_IDENT
+#define ASM_IDENTIFY_GCC(FILE) /* nothing */
+#define ASM_IDENTIFY_LANGUAGE(FILE) \
+ fprintf (FILE, "\t%s \"GCC (%s) %s\"\n", IDENT_ASM_OP, \
+ lang_identify (), version_string)
+#else
+#define ASM_FILE_END(FILE) \
+do { \
+ fprintf ((FILE), "\t%s\t\"GCC: (GNU) %s\"\n", \
+ IDENT_ASM_OP, version_string); \
+ } while (0)
+#endif
+
+/* Support const sections and the ctors and dtors sections for g++.
+ Note that there appears to be two different ways to support const
+ sections at the moment. You can either #define the symbol
+ READONLY_DATA_SECTION (giving it some code which switches to the
+ readonly data section) or else you can #define the symbols
+ EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS, SELECT_SECTION, and
+ SELECT_RTX_SECTION. We do both here just to be on the safe side. */
+#define USE_CONST_SECTION 1
+
+/* Support for Constructors and Destructors. */
+#define READONLY_DATA_SECTION() const_section ()
+
+/* A default list of other sections which we might be "in" at any given
+ time. For targets that use additional sections (e.g. .tdesc) you
+ should override this definition in the target-specific file which
+ includes this file. */
+#define SUBTARGET_EXTRA_SECTIONS in_const,
+
+/* A default list of extra section function definitions. For targets
+ that use additional sections (e.g. .tdesc) you should override this
+ definition in the target-specific file which includes this file. */
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS CONST_SECTION_FUNCTION
+
+extern void text_section ();
+
+#define CONST_SECTION_ASM_OP ".section\t.rodata"
+
+#define CONST_SECTION_FUNCTION \
+void \
+const_section () \
+{ \
+ if (!USE_CONST_SECTION) \
+ text_section (); \
+ else if (in_section != in_const) \
+ { \
+ fprintf (asm_out_file, "%s\n", CONST_SECTION_ASM_OP); \
+ in_section = in_const; \
+ } \
+}
+
+/* Switch into a generic section.
+ This is currently only used to support section attributes.
+
+ We make the section read-only and executable for a function decl,
+ read-only for a const data decl, and writable for a non-const data decl. */
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
+ fprintf (FILE, ".section\t%s,\"%s\",%%progbits\n", NAME, \
+ (DECL) && TREE_CODE (DECL) == FUNCTION_DECL ? "ax" : \
+ (DECL) && DECL_READONLY_SECTION (DECL, RELOC) ? "a" : "aw")
+
+/* A C statement or statements to switch to the appropriate
+ section for output of DECL. DECL is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether forming
+ the initial value of DECL requires link-time relocations. */
+#define SELECT_SECTION(DECL,RELOC) \
+{ \
+ if (TREE_CODE (DECL) == STRING_CST) \
+ { \
+ if (! flag_writable_strings) \
+ const_section (); \
+ else \
+ data_section (); \
+ } \
+ else if (TREE_CODE (DECL) == VAR_DECL) \
+ { \
+ if ((flag_pic && RELOC) \
+ || !TREE_READONLY (DECL) || TREE_SIDE_EFFECTS (DECL) \
+ || !DECL_INITIAL (DECL) \
+ || (DECL_INITIAL (DECL) != error_mark_node \
+ && !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
+ data_section (); \
+ else \
+ const_section (); \
+ } \
+ else \
+ const_section (); \
+}
+
+/* A C statement or statements to switch to the appropriate
+ section for output of RTX in mode MODE. RTX is some kind
+ of constant in RTL. The argument MODE is redundant except
+ in the case of a `const_int' rtx. Currently, these always
+ go into the const section. */
+#define SELECT_RTX_SECTION(MODE,RTX) const_section ()
+
+/* On svr4, we *do* have support for the .init and .fini sections, and we
+ can put stuff in there to be executed before and after `main'. We let
+ crtstuff.c and other files know this by defining the following symbols.
+ The definitions say how to change sections to the .init and .fini
+ sections. This is the same for all known svr4 assemblers. */
+#define INIT_SECTION_ASM_OP ".section\t.init"
+#define FINI_SECTION_ASM_OP ".section\t.fini"
+
+
+/* This is how we tell the assembler that a symbol is weak. */
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+/* This is how we tell the assembler that two symbols have the same value. */
+
+#define ASM_OUTPUT_DEF(FILE,NAME1,NAME2) \
+ do { assemble_name (FILE, NAME1); \
+ fputs (" = ", FILE); \
+ assemble_name (FILE, NAME2); \
+ fputc ('\n', FILE); } while (0)
+
+#include "arm/elf.h"
+#include "arm/linux-gas.h"
diff --git a/gcc_arm/config/arm/linux-elf26.h b/gcc_arm/config/arm/linux-elf26.h
new file mode 100755
index 0000000..aa65ae7
--- /dev/null
+++ b/gcc_arm/config/arm/linux-elf26.h
@@ -0,0 +1,32 @@
+/* Definitions for 26-bit ARM running Linux-based GNU systems using ELF
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Philip Blundell <philb@gnu.org>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define SUBTARGET_DEFAULT_APCS26
+
+#define SUBTARGET_LINK_SPEC \
+ " %{mapcs-32:-m elf32arm} %{!mapcs-32:-m elf32arm26}"
+
+#define SUBTARGET_EXTRA_ASM_SPEC \
+ " %{mapcs-32:-mapcs-32} %(!mapcs-32:-mapcs-26}"
+
+#define TARGET_DEFAULT (ARM_FLAG_SHORT_BYTE)
+
+#include "arm/linux-elf.h"
diff --git a/gcc_arm/config/arm/linux-gas.h b/gcc_arm/config/arm/linux-gas.h
new file mode 100755
index 0000000..72567f3
--- /dev/null
+++ b/gcc_arm/config/arm/linux-gas.h
@@ -0,0 +1,87 @@
+/* Definitions of target machine for GNU compiler.
+ ARM Linux-based GNU systems version.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Russell King <rmk92@ecs.soton.ac.uk>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/*
+ * We are using GAS, so stabs should work.
+ */
+
+#ifndef DBX_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO 1
+#endif
+
+/*
+ * This is how we tell the assembler that a symbol is weak. GAS always
+ * supports weak symbols.
+ */
+
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+/* This is used in ASM_FILE_START */
+#undef ARM_OS_NAME
+#define ARM_OS_NAME "Linux"
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__}"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#if 0 /* not yet */
+
+/* Clear the instruction cache from `beg' to `end'. This makes an
+ inline system call to SYS_cacheflush. The arguments are as
+ follows:
+
+ cacheflush (start, end, flags)
+
+*/
+
+#define CLEAR_INSN_CACHE(BEG, END) \
+{ \
+ register unsigned long _beg __asm ("a1") = (unsigned long) (BEG); \
+ register unsigned long _end __asm ("a2") = (unsigned long) (END); \
+ register unsigned long _flg __asm ("a3") = 0; \
+ __asm __volatile ("swi 0x9000b8"); \
+}
+
+#endif
+
+/* If cross-compiling, don't require stdio.h etc to build libgcc.a. */
+#ifdef CROSS_COMPILE
+#ifndef inhibit_libc
+#define inhibit_libc
+#endif
+#endif
diff --git a/gcc_arm/config/arm/linux.h b/gcc_arm/config/arm/linux.h
new file mode 100755
index 0000000..fa8fef1
--- /dev/null
+++ b/gcc_arm/config/arm/linux.h
@@ -0,0 +1,72 @@
+/* Definitions for ARM running Linux-based GNU systems.
+ Copyright (C) 1993, 1994, 1997 Free Software Foundation, Inc.
+ Contributed by Russell King <rmk92@ecs.soton.ac.uk>.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <linux-aout.h>
+
+/* these are different... */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+"%{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}} %{static:-static}"
+
+#undef ASM_APP_ON
+#undef ASM_APP_OFF
+#undef COMMENT_BEGIN
+
+/* We default to ARM3. */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm3
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dunix -Darm -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(arm) -Amachine(arm)"
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} %{!ggdb:-lc} %{ggdb:-lg}"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#define HANDLE_SYSV_PRAGMA
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM GNU/Linux with a.out)", stderr);
+
+/* This is used in ASM_FILE_START */
+#define ARM_OS_NAME "Linux"
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Maths operation domain error number, EDOM */
+#define TARGET_EDOM 33
+#include "arm/aout.h"
+
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE}"
diff --git a/gcc_arm/config/arm/netbsd.h b/gcc_arm/config/arm/netbsd.h
new file mode 100755
index 0000000..7b03d4a
--- /dev/null
+++ b/gcc_arm/config/arm/netbsd.h
@@ -0,0 +1,161 @@
+/* NetBSD/arm (RiscBSD) version.
+ Copyright (C) 1993, 1994, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Mark Brinicombe (amb@physig.ph.kcl.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (ARM/NetBSD)", stderr);
+
+/* This is used in ASM_FILE_START. */
+#define ARM_OS_NAME "NetBSD"
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Since we always use GAS as our assembler we support stabs. */
+#define DBX_DEBUGGING_INFO 1
+
+/*#undef ASM_DECLARE_FUNCTION_NAME*/
+
+/* ARM6 family default cpu. */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm6
+
+/* Default is to use APCS-32 mode. */
+#define TARGET_DEFAULT (ARM_FLAG_APCS_32 | ARM_FLAG_SOFT_FLOAT)
+
+#include "arm/aout.h"
+
+/* This gets redefined in config/netbsd.h. */
+#undef TARGET_MEM_FUNCTIONS
+
+#include <netbsd.h>
+
+/* Until they use ELF or something that handles dwarf2 unwinds
+ and initialization stuff better. */
+#undef DWARF2_UNWIND_INFO
+
+/* Some defines for CPP.
+ arm32 is the NetBSD port name, so we always define arm32 and __arm32__. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-Dunix -Driscbsd -Darm32 -D__arm32__ -D__arm__ -D__NetBSD__ \
+-Asystem(unix) -Asystem(NetBSD) -Acpu(arm) -Amachine(arm)"
+
+/* Define _POSIX_SOURCE if necessary. */
+#undef CPP_SPEC
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) %(cpp_endian) \
+%{posix:-D_POSIX_SOURCE} \
+"
+
+/* Because TARGET_DEFAULT sets ARM_FLAG_APCS_32 */
+#undef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+
+/* Because TARGET_DEFAULT sets ARM_FLAG_SOFT_FLOAT */
+#undef CPP_FLOAT_DEFAULT_SPEC
+#define CPP_FLOAT_DEFAULT_SPEC "-D__SOFTFP__"
+
+/* Pass -X to the linker so that it will strip symbols starting with 'L' */
+#undef LINK_SPEC
+#define LINK_SPEC "\
+-X %{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{R*} \
+%{static:-Bstatic} %{assert*} \
+"
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#define HANDLE_SYSV_PRAGMA
+
+/* We don't have any limit on the length as out debugger is GDB. */
+#undef DBX_CONTIN_LENGTH
+
+/* NetBSD does its profiling differently to the Acorn compiler. We
+ don't need a word following the mcount call; and to skip it
+ requires either an assembly stub or use of fomit-frame-pointer when
+ compiling the profiling functions. Since we break Acorn CC
+ compatibility below a little more won't hurt. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf(STREAM, "\tmov\t%sip, %slr\n", REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf(STREAM, "\tbl\tmcount\n"); \
+}
+
+/* On the ARM `@' introduces a comment, so we must use something else
+ for .type directives. */
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "%%%s"
+
+/* NetBSD uses the old PCC style aggregate returning conventions. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+
+/* Although not normally relevant (since by default, all aggregates
+ are returned in memory) compiling some parts of libc requires
+ non-APCS style struct returns. */
+#undef RETURN_IN_MEMORY
+
+/* VERY BIG NOTE : Change of structure alignment for RiscBSD.
+ There are consequences you should be aware of...
+
+ Normally GCC/arm uses a structure alignment of 32 for compatibility
+ with armcc. This means that structures are padded to a word
+ boundary. However this causes problems with bugged NetBSD kernel
+ code (possibly userland code as well - I have not checked every
+ binary). The nature of this bugged code is to rely on sizeof()
+ returning the correct size of various structures rounded to the
+ nearest byte (SCSI and ether code are two examples, the vm system
+ is another). This code breaks when the structure alignment is 32
+ as sizeof() will report a word=rounded size. By changing the
+ structure alignment to 8. GCC will conform to what is expected by
+ NetBSD.
+
+ This has several side effects that should be considered.
+ 1. Structures will only be aligned to the size of the largest member.
+ i.e. structures containing only bytes will be byte aligned.
+ structures containing shorts will be half word alinged.
+ structures containing ints will be word aligned.
+
+ This means structures should be padded to a word boundary if
+ alignment of 32 is required for byte structures etc.
+
+ 2. A potential performance penalty may exist if strings are no longer
+ word aligned. GCC will not be able to use word load/stores to copy
+ short strings.
+
+ This modification is not encouraged but with the present state of the
+ NetBSD source tree it is currently the only solution that meets the
+ requirements. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY 8
diff --git a/gcc_arm/config/arm/pe.c b/gcc_arm/config/arm/pe.c
new file mode 100755
index 0000000..491f505
--- /dev/null
+++ b/gcc_arm/config/arm/pe.c
@@ -0,0 +1,521 @@
+/* CYGNUS LOCAL dje/pe, entire file */
+/* Routines for GCC for ARM/pe.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "output.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+extern int current_function_anonymous_args;
+
+/* ARM/PE specific attribute support.
+
+ ARM/PE has three new attributes:
+ naked - for interrupt functions
+ dllexport - for exporting a function/variable that will live in a dll
+ dllimport - for importing a function/variable from a dll
+
+ Microsoft allows multiple declspecs in one __declspec, separating
+ them with spaces. We do NOT support this. Instead, use __declspec
+ multiple times.
+*/
+
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR. */
+
+int
+arm_pe_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("dllexport", attr))
+ return 1;
+ if (is_attribute_p ("dllimport", attr))
+ return 1;
+
+ return arm_valid_machine_decl_attribute (decl, attributes, attr, args);
+}
+
+#if 0 /* needed when we tried type attributes */
+/* Return zero if TYPE1 and TYPE2 are incompatible, one if they are compatible,
+ and two if they are nearly compatible (which causes a warning to be
+ generated). */
+
+int
+arm_pe_comp_type_attributes (type1, type2)
+ tree type1, type2;
+{
+ type1 = TYPE_ATTRIBUTES (type1);
+ type2 = TYPE_ATTRIBUTES (type2);
+
+ if (lookup_attribute ("dllimport", type1)
+ && lookup_attribute ("dllexport", type2))
+ return 0;
+
+ if (lookup_attribute ("dllimport", type2)
+ && lookup_attribute ("dllexport", type1))
+ return 0;
+
+ return 1;
+}
+#endif
+
+/* Merge attributes in decls OLD and NEW.
+
+ This handles the following situation:
+
+ __declspec (dllimport) int foo;
+ int foo;
+
+ The second instance of `foo' nullifies the dllimport. */
+
+tree
+arm_pe_merge_machine_decl_attributes (old, new)
+ tree old, new;
+{
+ tree a;
+ int delete_dllimport_p;
+
+ old = DECL_MACHINE_ATTRIBUTES (old);
+ new = DECL_MACHINE_ATTRIBUTES (new);
+
+ /* What we need to do here is remove from `old' dllimport if it doesn't
+ appear in `new'. dllimport behaves like extern: if a declaration is
+ marked dllimport and a definition appears later, then the object
+ is not dllimport'd. */
+
+ if (lookup_attribute ("dllimport", old) != NULL_TREE
+ && lookup_attribute ("dllimport", new) == NULL_TREE)
+ delete_dllimport_p = 1;
+ else
+ delete_dllimport_p = 0;
+
+ a = merge_attributes (old, new);
+
+ if (delete_dllimport_p)
+ {
+ tree prev,t;
+
+ /* Scan the list for dllimport and delete it. */
+ for (prev = NULL_TREE, t = a; t; prev = t, t = TREE_CHAIN (t))
+ {
+ if (is_attribute_p ("dllimport", TREE_PURPOSE (t)))
+ {
+ if (prev == NULL_TREE)
+ a = TREE_CHAIN (a);
+ else
+ TREE_CHAIN (prev) = TREE_CHAIN (t);
+ break;
+ }
+ }
+ }
+
+ return a;
+}
+
+/* Check a type that has a virtual table, and see if any virtual methods are
+ marked for import or export, and if so, arrange for the vtable to
+ be imported or exported. */
+
+static int
+arm_check_vtable_importexport (type)
+ tree type;
+{
+ tree methods = TYPE_METHODS (type);
+ tree fndecl;
+
+ if (TREE_CODE (methods) == FUNCTION_DECL)
+ fndecl = methods;
+ else if (TREE_VEC_ELT (methods, 0) != NULL_TREE)
+ fndecl = TREE_VEC_ELT (methods, 0);
+ else
+ fndecl = TREE_VEC_ELT (methods, 1);
+
+ while (fndecl)
+ {
+ if (DECL_VIRTUAL_P (fndecl) || DECL_VINDEX (fndecl) != NULL_TREE)
+ {
+ tree exp = lookup_attribute ("dllimport",
+ DECL_MACHINE_ATTRIBUTES (fndecl));
+ if (exp == 0)
+ exp = lookup_attribute ("dllexport",
+ DECL_MACHINE_ATTRIBUTES (fndecl));
+ if (exp)
+ return 1;
+ }
+
+ fndecl = TREE_CHAIN (fndecl);
+ }
+
+ return 0;
+}
+
+/* Return non-zero if DECL is a dllexport'd object. */
+
+tree current_class_type; /* FIXME */
+
+int
+arm_dllexport_p (decl)
+ tree decl;
+{
+ tree exp;
+
+ if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+ exp = lookup_attribute ("dllexport", DECL_MACHINE_ATTRIBUTES (decl));
+ if (exp)
+ return 1;
+
+#if 0 /* This was a hack to get vtable's exported or imported since only one
+ copy of them is ever output. Disabled pending better solution. */
+ /* For C++, the vtables might have to be marked. */
+ if (TREE_CODE (decl) == VAR_DECL && DECL_VIRTUAL_P (decl))
+ {
+ if (TREE_PUBLIC (decl)
+ && DECL_EXTERNAL (decl) == 0
+ && (DECL_CONTEXT (decl)
+ ? arm_check_vtable_importexport (DECL_CONTEXT (decl))
+ : current_class_type
+ ? arm_check_vtable_importexport (current_class_type)
+ : 0)
+ )
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+/* Return non-zero if DECL is a dllimport'd object. */
+
+int
+arm_dllimport_p (decl)
+ tree decl;
+{
+ tree imp;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && TARGET_NOP_FUN_DLLIMPORT)
+ return 0;
+
+ if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+ imp = lookup_attribute ("dllimport", DECL_MACHINE_ATTRIBUTES (decl));
+ if (imp)
+ return 1;
+
+#if 0 /* This was a hack to get vtable's exported or imported since only one
+ copy of them is ever output. Disabled pending better solution. */
+ /* For C++, the vtables might have to be marked. */
+ if (TREE_CODE (decl) == VAR_DECL && DECL_VIRTUAL_P (decl))
+ {
+ if (TREE_PUBLIC (decl)
+ && DECL_EXTERNAL (decl)
+ && (DECL_CONTEXT (decl)
+ ? arm_check_vtable_importexport (DECL_CONTEXT (decl))
+ : current_class_type
+ ? arm_check_vtable_importexport (current_class_type)
+ : 0)
+ )
+ return 1;
+ }
+#endif
+
+ return 0;
+}
+
+/* Return non-zero if SYMBOL is marked as being dllexport'd. */
+
+int
+arm_dllexport_name_p (symbol)
+ char *symbol;
+{
+ return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
+}
+
+/* Return non-zero if SYMBOL is marked as being dllimport'd. */
+
+int
+arm_dllimport_name_p (symbol)
+ char *symbol;
+{
+ return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
+}
+
+/* Mark a DECL as being dllexport'd.
+ Note that we override the previous setting (eg: dllimport). */
+
+void
+arm_mark_dllexport (decl)
+ tree decl;
+{
+ char *oldname, *newname;
+ rtx rtlname;
+ tree idp;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+ if (GET_CODE (rtlname) == SYMBOL_REF)
+ oldname = XSTR (rtlname, 0);
+ else if (GET_CODE (rtlname) == MEM
+ && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
+ oldname = XSTR (XEXP (rtlname, 0), 0);
+ else
+ abort ();
+ if (arm_dllimport_name_p (oldname))
+ oldname += 9;
+ else if (arm_dllexport_name_p (oldname))
+ return; /* already done */
+
+ newname = alloca (strlen (oldname) + 4);
+ sprintf (newname, "@e.%s", oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ /* ??? At least I think that's why we do this. */
+ idp = get_identifier (newname);
+
+ XEXP (DECL_RTL (decl), 0) =
+ gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
+}
+
+/* Mark a DECL as being dllimport'd. */
+
+void
+arm_mark_dllimport (decl)
+ tree decl;
+{
+ char *oldname, *newname;
+ tree idp;
+ rtx rtlname, newrtl;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+ if (GET_CODE (rtlname) == SYMBOL_REF)
+ oldname = XSTR (rtlname, 0);
+ else if (GET_CODE (rtlname) == MEM
+ && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
+ oldname = XSTR (XEXP (rtlname, 0), 0);
+ else
+ abort ();
+ if (arm_dllexport_name_p (oldname))
+ abort (); /* this shouldn't happen */
+ else if (arm_dllimport_name_p (oldname))
+ return; /* already done */
+
+ /* ??? One can well ask why we're making these checks here,
+ and that would be a good question. */
+
+ /* Imported variables can't be initialized. */
+ if (TREE_CODE (decl) == VAR_DECL
+ && !DECL_VIRTUAL_P (decl)
+ && DECL_INITIAL (decl))
+ {
+ error_with_decl (decl, "initialized variable `%s' is marked dllimport");
+ return;
+ }
+ /* Nor can they be static. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl)
+ && 0 /*???*/)
+ {
+ error_with_decl (decl, "static variable `%s' is marked dllimport");
+ return;
+ }
+
+ /* `extern' needn't be specified with dllimport.
+ Specify `extern' now and hope for the best. Sigh. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl))
+ {
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ }
+
+ newname = alloca (strlen (oldname) + 11);
+ sprintf (newname, "@i.__imp_%s", oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ /* ??? At least I think that's why we do this. */
+ idp = get_identifier (newname);
+
+ newrtl = gen_rtx (MEM, Pmode,
+ gen_rtx (SYMBOL_REF, Pmode,
+ IDENTIFIER_POINTER (idp)));
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+}
+
+/* Cover function to implement ENCODE_SECTION_INFO. */
+
+void
+arm_pe_encode_section_info (decl)
+ tree decl;
+{
+ /* This bit is copied from arm.h. */
+ if (optimize > 0 && TREE_CONSTANT (decl)
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
+ {
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl));
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
+ }
+
+ /* Mark the decl so we can tell from the rtl whether the object is
+ dllexport'd or dllimport'd. */
+
+ if (arm_dllexport_p (decl))
+ arm_mark_dllexport (decl);
+ else if (arm_dllimport_p (decl))
+ arm_mark_dllimport (decl);
+ /* It might be that DECL has already been marked as dllimport, but a
+ subsequent definition nullified that. The attribute is gone but
+ DECL_RTL still has @i.__imp_foo. We need to remove that. */
+ else if ((TREE_CODE (decl) == FUNCTION_DECL
+ || TREE_CODE (decl) == VAR_DECL)
+ && DECL_RTL (decl) != NULL_RTX
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
+ && arm_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
+ {
+ char *oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
+ tree idp = get_identifier (oldname + 9);
+ rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
+
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+
+ /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
+ ??? We leave these alone for now. */
+ }
+}
+
+/* Cover function for UNIQUE_SECTION. */
+
+void
+arm_pe_unique_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ int len;
+ char *name,*string,*prefix;
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ /* Strip off any encoding in fnname. */
+ STRIP_NAME_ENCODING (name, name);
+
+ /* The object is put in, for example, section .text$foo.
+ The linker will then ultimately place them in .text
+ (everything from the $ on is stripped). */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ prefix = ".text$";
+ else if (DECL_READONLY_SECTION (decl, reloc))
+ prefix = ".rdata$";
+ else
+ prefix = ".data$";
+ len = strlen (name) + strlen (prefix);
+ string = alloca (len + 1);
+ sprintf (string, "%s%s", prefix, name);
+
+ DECL_SECTION_NAME (decl) = build_string (len, string);
+}
+
+/* This is to better conform to the ARM PCS.
+ Richard Earnshaw hasn't put this into FSF sources yet so it's here. */
+
+int
+arm_pe_return_in_memory (type)
+ tree type;
+{
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+ int num_fields = 0;
+
+ /* For a record containing just a single element, we can be a little
+ less restrictive. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL && ! TREE_STATIC (field))
+ {
+ if ((AGGREGATE_TYPE_P (TREE_TYPE (field))
+ && RETURN_IN_MEMORY (TREE_TYPE (field)))
+ || FLOAT_TYPE_P (TREE_TYPE (field)))
+ return 1;
+ num_fields++;
+ }
+ }
+
+ if (num_fields == 1)
+ return 0;
+
+ /* For a struct, we can return in a register if every element was a
+ bit-field and it all fits in one word. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL
+ && ! TREE_STATIC (field)
+ && (! DECL_BIT_FIELD_TYPE (field)
+ || (TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field))
+ + TREE_INT_CST_LOW (DECL_SIZE (field))) > 32))
+ return 1;
+ }
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE
+ || TREE_CODE (type) == QUAL_UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL
+ && ! TREE_STATIC (field)
+ && ((AGGREGATE_TYPE_P (TREE_TYPE (field))
+ && RETURN_IN_MEMORY (TREE_TYPE (field)))
+ || FLOAT_TYPE_P (TREE_TYPE (field))))
+ return 1;
+ }
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
diff --git a/gcc_arm/config/arm/pe.h b/gcc_arm/config/arm/pe.h
new file mode 100755
index 0000000..dcc2042
--- /dev/null
+++ b/gcc_arm/config/arm/pe.h
@@ -0,0 +1,295 @@
+/* CYGNUS LOCAL entire file */
+/* Definitions of target machine for GNU compiler, for ARM with PE obj format.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/coff.h"
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/pe)", stderr)
+
+/* Support the __declspec keyword by turning them into attributes.
+ We currently only support: naked, dllimport, and dllexport.
+ Note that the current way we do this may result in a collision with
+ predefined attributes later on. This can be solved by using one attribute,
+ say __declspec__, and passing args to it. The problem with that approach
+ is that args are not accumulated: each new appearance would clobber any
+ existing args. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-Darm -D__pe__ -Acpu(arm) -Amachine(arm) \
+-D__declspec(x)=__attribute__((x)) \
+"
+
+/* Experimental addition for pr 7885.
+ Ignore dllimport for functions. */
+#define TARGET_NOP_FUN_DLLIMPORT (target_flags & 0x20000)
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+{ "nop-fun-dllimport", 0x20000 }, \
+{ "no-nop-fun-dllimport", -0x20000 },
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT + 0x20000)
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+/* Same as arm.h except r10 is call-saved, not fixed. */
+#undef FIXED_REGISTERS
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0,0,0,0,0, \
+ 0,0,0,1,0,1,0,1, \
+ 0,0,0,0,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* Same as arm.h except r10 is call-saved, not fixed. */
+#undef CALL_USED_REGISTERS
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1,0,0,0,0, \
+ 0,0,0,1,1,1,1,1, \
+ 1,1,1,1,0,0,0,0, \
+ 1,1,1 \
+}
+
+/* This is to better conform to the ARM PCS.
+ Richard Earnshaw hasn't put this into FSF sources yet so it's here. */
+#undef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) \
+ ((TYPE_MODE ((TYPE)) == BLKmode && ! TYPE_NO_FORCE_BLK (TYPE)) \
+ || (AGGREGATE_TYPE_P ((TYPE)) && arm_pe_return_in_memory ((TYPE))))
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+extern int arm_pe_valid_machine_decl_attribute ();
+#undef VALID_MACHINE_DECL_ATTRIBUTE
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_pe_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+#if 0 /* Needed when we tried type attributes. */
+/* A C expression whose value is zero if the attributes on
+ TYPE1 and TYPE2 are incompatible, one if they are compatible,
+ and two if they are nearly compatible (which causes a warning to be
+ generated). */
+extern int arm_pe_comp_type_attributes ();
+#define COMP_TYPE_ATTRIBUTES(TYPE1, TYPE2) \
+arm_pe_comp_type_attributes ((TYPE1), (TYPE2))
+#endif
+
+extern union tree_node *arm_pe_merge_machine_decl_attributes ();
+#define MERGE_MACHINE_DECL_ATTRIBUTES(OLD, NEW) \
+arm_pe_merge_machine_decl_attributes ((OLD), (NEW))
+
+/* In addition to the stuff done in arm.h, we must mark dll symbols specially.
+ Definitions of dllexport'd objects install some info in the .drectve
+ section. References to dllimport'd objects are fetched indirectly via
+ __imp_. If both are declared, dllexport overrides.
+ This is also needed to implement one-only vtables: they go into their own
+ section and we need to set DECL_SECTION_NAME so we do that here.
+ Note that we can be called twice on the same decl. */
+extern void arm_pe_encode_section_info ();
+#undef ENCODE_SECTION_INFO
+#define ENCODE_SECTION_INFO(DECL) \
+arm_pe_encode_section_info (DECL)
+
+/* Used to implement dllexport overriding dllimport semantics. It's also used
+ to handle vtables - the first pass won't do anything because
+ DECL_CONTEXT (DECL) will be 0 so arm_dll{ex,im}port_p will return 0.
+ It's also used to handle dllimport override semantics. */
+#if 0
+#define REDO_SECTION_INFO_P(DECL) \
+((DECL_MACHINE_ATTRIBUTES (DECL) != NULL_TREE) \
+ || (TREE_CODE (DECL) == VAR_DECL && DECL_VIRTUAL_P (DECL)))
+#else
+#define REDO_SECTION_INFO_P(DECL) 1
+#endif
+
+/* Utility used only in this file. */
+#define ARM_STRIP_NAME_ENCODING(SYM_NAME) \
+((SYM_NAME) + ((SYM_NAME)[0] == '@' ? 3 : 0))
+
+/* Strip any text from SYM_NAME added by ENCODE_SECTION_INFO and store
+ the result in VAR. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYM_NAME) \
+(VAR) = ARM_STRIP_NAME_ENCODING (SYM_NAME)
+
+/* Define this macro if in some cases global symbols from one translation
+ unit may not be bound to undefined symbols in another translation unit
+ without user intervention. For instance, under Microsoft Windows
+ symbols must be explicitly imported from shared libraries (DLLs). */
+#define MULTIPLE_SYMBOL_SPACES
+
+#define UNIQUE_SECTION_P(DECL) DECL_ONE_ONLY (DECL)
+extern void arm_pe_unique_section ();
+#define UNIQUE_SECTION(DECL,RELOC) arm_pe_unique_section (DECL, RELOC)
+
+#define SUPPORTS_ONE_ONLY 1
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#undef ASM_OUTPUT_SECTION_NAME
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+ /* Functions may have been compiled at various levels of \
+ optimization so we can't use `same_size' here. Instead, \
+ have the linker pick one. */ \
+ if ((DECL) && DECL_ONE_ONLY (DECL)) \
+ fprintf (STREAM, "\t.linkonce %s\n", \
+ TREE_CODE (DECL) == FUNCTION_DECL \
+ ? "discard" : "same_size"); \
+} while (0)
+
+/* This outputs a lot of .req's to define alias for various registers.
+ Let's try to avoid this. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for ARM/pe\n", \
+ ASM_COMMENT_START, version_string); \
+ output_file_directive ((STREAM), main_input_filename); \
+} while (0)
+
+/* Output a reference to a label. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
+fprintf (STREAM, "%s%s", USER_LABEL_PREFIX, ARM_STRIP_NAME_ENCODING (NAME))
+
+/* Output a function definition label. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ function_section (DECL); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+} while (0)
+
+/* Output a common block. */
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf ((STREAM), "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ } \
+ if (! arm_dllimport_name_p (NAME)) \
+ { \
+ fprintf ((STREAM), "\t.comm\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ", %d\t%s %d\n", \
+ (ROUNDED), ASM_COMMENT_START, (SIZE)); \
+ } \
+} while (0)
+
+/* Output the label for an initialized variable. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ enum in_section save_section = in_section; \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ switch_to_section (save_section, (DECL)); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#define DRECTVE_SECTION_ASM_OP "\t.section .drectve"
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef SUBTARGET_EXTRA_SECTIONS
+#define SUBTARGET_EXTRA_SECTIONS in_drectve,
+
+/* A list of extra section function definitions. */
+
+#undef SUBTARGET_EXTRA_SECTION_FUNCTIONS
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ DRECTVE_SECTION_FUNCTION \
+ SWITCH_TO_SECTION_FUNCTION
+
+#define DRECTVE_SECTION_FUNCTION \
+void \
+drectve_section () \
+{ \
+ if (in_section != in_drectve) \
+ { \
+ fprintf (asm_out_file, "%s\n", DRECTVE_SECTION_ASM_OP); \
+ in_section = in_drectve; \
+ } \
+}
+
+/* Switch to SECTION (an `enum in_section').
+
+ ??? This facility should be provided by GCC proper.
+ The problem is that we want to temporarily switch sections in
+ ASM_DECLARE_OBJECT_NAME and then switch back to the original section
+ afterwards. */
+#define SWITCH_TO_SECTION_FUNCTION \
+void \
+switch_to_section (section, decl) \
+ enum in_section section; \
+ tree decl; \
+{ \
+ switch (section) \
+ { \
+ case in_text: text_section (); break; \
+ case in_data: data_section (); break; \
+ case in_named: named_section (decl, NULL, 0); break; \
+ case in_rdata: rdata_section (); break; \
+ case in_ctors: ctors_section (); break; \
+ case in_dtors: dtors_section (); break; \
+ case in_drectve: drectve_section (); break; \
+ default: abort (); break; \
+ } \
+}
diff --git a/gcc_arm/config/arm/riscix.h b/gcc_arm/config/arm/riscix.h
new file mode 100755
index 0000000..a96e784
--- /dev/null
+++ b/gcc_arm/config/arm/riscix.h
@@ -0,0 +1,151 @@
+/* Definitions of target machine for GNU compiler. ARM RISCiX version.
+ Copyright (C) 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rwe11@cl.cam.ac.uk), based on original
+ work by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Translation to find startup files. On RISC iX boxes,
+ crt0, mcrt0 and gcrt0.o are in /usr/lib. */
+#define STARTFILE_SPEC "\
+ %{pg:/usr/lib/gcrt0.o%s}\
+ %{!pg:%{p:/usr/lib/mcrt0.o%s}\
+ %{!p:/usr/lib/crt0.o%s}}"
+
+/* RISC iX has no concept of -lg */
+/* If -static is specified then link with -lc_n */
+
+#ifndef LIB_SPEC
+#define LIB_SPEC "\
+ %{g*:-lg}\
+ %{!p:%{!pg:%{!static:-lc}%{static:-lc_n}}}\
+ %{p:-lc_p}\
+ %{pg:-lc_p}"
+#endif
+
+/* The RISC iX assembler never deletes any symbols from the object module;
+ and, by default, ld doesn't either. -X causes local symbols starting
+ with 'L' to be deleted, which is what we want. */
+#ifndef LINK_SPEC
+#define LINK_SPEC "-X"
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-Darm -Driscix -Dunix -Asystem(unix) -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC /* CYGNYS LOCAL */
+#define CPP_SPEC "%{m6:-D__arm6__} \
+ %{mbsd:%{pedantic:%e-mbsd and -pedantic incompatible} -D_BSD_C} \
+ %{mxopen:%{mbsd:%e-mbsd and -mxopen incompatible} \
+ %{pedantic:%e-mxopen and -pedantic incompatible} -D_XOPEN_C} \
+ %{!mbsd:%{!mxopen:%{!ansi: -D_BSD_C}}}"
+#endif /* END CYGNUS LOCAL */
+
+/* RISCiX has some weird symbol name munging, that is done to the object module
+ after assembly, which enables multiple libraries to be supported within
+ one (possibly shared) library. It basically changes the symbol name of
+ certain symbols (for example _bcopy is converted to _$bcopy if using BSD)
+ Symrename's parameters are determined as follows:
+ -mno-symrename Don't run symrename
+ -mbsd symrename -BSD <file>
+ -mxopen symrename -XOPEN <file>
+ -ansi symrename - <file>
+ <none> symrename -BSD <file>
+ */
+
+#ifndef ASM_FINAL_SPEC
+#if !defined (CROSS_COMPILE)
+#define ASM_FINAL_SPEC "\
+%{!mno-symrename: \
+ \n /usr/bin/symrename \
+ -%{mbsd:%{pedantic:%e-mbsd and -pedantic incompatible}BSD}\
+%{mxopen:%{mbsd:%e-mbsd and -mxopen incompatible}\
+%{pedantic:%e-mxopen and -pedantic incompatible}XOPEN}\
+%{!mbsd:%{!mxopen:%{!ansi:BSD}}} %{c:%{o*:%*}%{!o*:%b.o}}%{!c:%U.o}}"
+#endif
+#endif
+
+/* None of these is actually used in cc1. If we don't define them in target
+ switches cc1 complains about them. For the sake of argument lets allocate
+ bit 31 of target flags for such options. */
+#define SUBTARGET_SWITCHES \
+{"bsd", 0x80000000}, {"xopen", 0x80000000}, {"no-symrename", 0x80000000},
+
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION \
+ fputs (" (ARM/RISCiX)", stderr);
+
+/* This is used in ASM_FILE_START */
+#define ARM_OS_NAME "RISCiX"
+
+/* Unsigned chars produces much better code than signed. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Define this if the target system supports the function atexit from the
+ ANSI C standard. If this is not defined, and INIT_SECTION_ASM_OP is not
+ defined, a default exit function will be provided to support C++.
+ The man page only describes on_exit, but atexit is also there. */
+#define HAVE_ATEXIT 1
+
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+#endif
+
+/* size_t is "unsigned int" in RISCiX */
+#define SIZE_TYPE "unsigned int"
+
+/* ptrdiff_t is "int" in RISCiX */
+#define PTRDIFF_TYPE "int"
+
+/* Maths operation domain error number, EDOM */
+#define TARGET_EDOM 33
+
+/* Override the normal default CPU */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm2
+
+#include "arm/aout.h"
+
+/* The RISCiX assembler does not understand .set */
+#undef SET_ASM_OP
+
+/* Override CPP_SPEC, there's no point handling endianness (and probably
+ not much point handling apcs_pc), and we want to add the right #defines
+ when using the include files. */
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) \
+ %{mbsd:%{pedantic:%e-mbsd and -pedantic incompatible} -D_BSD_C} \
+ %{mxopen:%{mbsd:%e-mbsd and -mxopen incompatible} \
+ %{pedantic:%e-mxopen and -pedantic incompatible} -D_XOPEN_C} \
+ %{!mbsd:%{!mxopen:%{!ansi: -D_BSD_C}}}"
+
+/* The native RISCiX assembler does not support stabs of any kind; because
+ the native assembler is not used by the compiler, Acorn didn't feel it was
+ necessary to put them in! */
+
+#ifdef DBX_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#endif
diff --git a/gcc_arm/config/arm/riscix1-1.h b/gcc_arm/config/arm/riscix1-1.h
new file mode 100755
index 0000000..aa27965
--- /dev/null
+++ b/gcc_arm/config/arm/riscix1-1.h
@@ -0,0 +1,100 @@
+/* Definitions of target machine for GNU compiler. ARM RISCiX 1.1x version.
+ Copyright (C) 1993, 1995, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rwe11@cl.cam.ac.uk), based on original
+ work by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* RISCiX 1.1x is basically the same as 1.2x except that it doesn't have
+ symrename or atexit. */
+
+/* Translation to find startup files. On RISCiX boxes, gcrt0.o is in
+ /usr/lib. */
+#define STARTFILE_SPEC \
+ "%{pg:/usr/lib/gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}"
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Darm -Driscix -Dunix -Asystem(unix) -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC /* CYGNUS LOCAL */
+#define CPP_SPEC "%{m6:-D__arm6__} %{!ansi: -D_BSD_C}"
+#endif /* END CYGNUS LOCAL */
+
+/* Riscix 1.1 doesn't have X/OPEN support, so only accept -mbsd (but ignore
+ it).
+ By not having -mxopen and -mno-symrename, we get warning messages,
+ but everything still compiles. */
+/* None of these is actually used in cc1, so they modify bit 31 */
+#define SUBTARGET_SWITCHES \
+{"bsd", 0x80000000},
+
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION \
+ fputs (" (ARM/RISCiX)", stderr);
+
+/* This is used in ASM_FILE_START */
+#define ARM_OS_NAME "RISCiX"
+
+#ifdef riscos
+#define TARGET_WHEN_DEBUGGING 3
+#else
+#define TARGET_WHEN_DEBUGGING 1
+#endif
+
+/* 'char' is signed by default on RISCiX, unsigned on RISCOS. */
+#ifdef riscos
+#define DEFAULT_SIGNED_CHAR 0
+#else
+#define DEFAULT_SIGNED_CHAR 1
+#endif
+
+/* Define this if the target system supports the function atexit form the
+ ANSI C standard. If this is not defined, and INIT_SECTION_ASM_OP is not
+ defined, a default exit function will be provided to support C++.
+ The man page only describes on_exit, but atexit is also there.
+ This seems to be missing in early versions. */
+/*#define HAVE_ATEXIT 1 */
+/* Some systems use __main in a way incompatible with its use in gcc, in these
+ cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to
+ give the same symbol without quotes for an alternative entry point. You
+ must define both, or neither. */
+#ifndef NAME__MAIN
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+#endif
+
+/* Override the normal default CPU */
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm2
+
+#include "arm/aout.h"
+
+#undef CPP_SPEC
+#define CPP_SPEC "\
+%(cpp_cpu_arch) %(cpp_apcs_pc) %(cpp_float) %{!ansi: -D_BSD_C} \
+"
+
+/* The native RISCiX assembler does not support stabs of any kind; because
+ the native assembler is not used by the compiler, Acorn didn't feel it was
+ necessary to put them in! */
+
+#ifdef DBX_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#endif
diff --git a/gcc_arm/config/arm/rix-gas.h b/gcc_arm/config/arm/rix-gas.h
new file mode 100755
index 0000000..dae16d0
--- /dev/null
+++ b/gcc_arm/config/arm/rix-gas.h
@@ -0,0 +1,43 @@
+/* Definitions of target machine for GNU compiler. ARM RISCiX(stabs) version.
+ Copyright (C) 1993 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (rwe11@cl.cam.ac.uk), based on original
+ work by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Limit the length of a stabs entry (for the broken Acorn assembler) */
+#define DBX_CONTIN_LENGTH 80
+
+#include "arm/riscix.h"
+
+/* The native RISCiX assembler does not support stabs of any kind; because
+ the native assembler is not used by the compiler, Acorn didn't feel it was
+ necessary to put them in!
+ However, this file assumes that we have an assembler that does have stabs,
+ so we put them back in. */
+
+#define DBX_DEBUGGING_INFO
+
+/* Unfortunately dbx doesn't understand these */
+/* Dbx on RISCiX is so broken that I've given up trying to support it.
+ lets just support gdb. */
+/* #define DEFAULT_GDB_EXTENSIONS 0 */
+/* RISCiX dbx doesn't accept xrefs */
+/* #define DBX_NO_XREFS 1 */
+
diff --git a/gcc_arm/config/arm/semi.h b/gcc_arm/config/arm/semi.h
new file mode 100755
index 0000000..98f26ce
--- /dev/null
+++ b/gcc_arm/config/arm/semi.h
@@ -0,0 +1,55 @@
+/* Definitions of target machine for GNU compiler. ARM on semi-hosted platform
+ Copyright (C) 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (richard.earnshaw@armltd.co.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* CYGNUS LOCAL */
+/* Note: The definitions LOCAL_LABEL_PREFIX and USER_LABEL_PREFIX here
+ *must* match the definitions in bfd/coff-arm.c */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+/* #define LOCAL_LABEL_PREFIX "" */
+/* #define NO_DOT_IN_LABEL */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+/* END CYGNUS LOCAL */
+
+/* CYGNUS LOCAL */
+#define STARTFILE_SPEC "%scrt0.o"
+/* END CYGNUS LOCAL */
+
+#define LIB_SPEC "-lc"
+
+#define CPP_PREDEFINES \
+ "-Darm -D__semi__ -Acpu(arm) -Amachine(arm)"
+
+/* CYGNUS LOCAL */
+#define ASM_SPEC "%{mbig-endian:-EB} %{mcpu=*:-m%*} %{march=*:-m%*} \
+ %{mapcs-*:-mapcs-%*} %{mthumb-interwork:-mthumb-interwork}"
+/* END CYGNUS LOCAL */
+
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/semi-hosted)", stderr);
+
+#define TARGET_DEFAULT ARM_FLAG_APCS_32
+
+#undef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
diff --git a/gcc_arm/config/arm/semiaof.h b/gcc_arm/config/arm/semiaof.h
new file mode 100755
index 0000000..14de3b2
--- /dev/null
+++ b/gcc_arm/config/arm/semiaof.h
@@ -0,0 +1,59 @@
+/* Definitions of target machine for GNU compiler. ARM on semi-hosted platform
+ AOF Syntax assembler.
+ Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Earnshaw (richard.earnshaw@armltd.co.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define CPP_PREDEFINES \
+ "-Darm -Dsemi -Acpu(arm) -Amachine(arm)"
+
+ /* CYGNUS LOCAL */
+#define CPP_SPEC "%{m6:-D__arm6__} \
+%{mcpu-*:-D__%*} \
+%{mcpu=*:-D__%*} \
+%{mapcs-32:-D__APCS_32__ -U__APCS_26__} \
+%{mapcs-26:-D__APCS_26__ -U__APCS_32__} \
+%{!mapcs-32: %{!mapcs-26:-D__APCS_32__}} \
+%{msoft-float:-D__SOFTFP__} \
+%{mhard-float:-U__SOFTFP__} \
+%{!mhard-float: %{!msoft-float:-U__SOFTFP__}} \
+%{mbig-endian:-D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{mbe:-D__ARMEB__ %{mwords-little-endian:-D__ARMWEL__}} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__}} \
+"
+ /* END CYGNUS LOCAL */
+
+#define ASM_SPEC "%{g -g} -arch 4 \
+-apcs 3%{mapcs-32:/32bit}%{mapcs-26:/26bit}%{!mapcs-26:%{!macps-32:/32bit}}"
+
+#define LIB_SPEC "%{Eb: armlib_h.32b%s}%{!Eb: armlib_h.32l%s}"
+
+#define TARGET_VERSION fputs (" (ARM/semi-hosted)", stderr);
+
+#define TARGET_DEFAULT ARM_FLAG_APCS_32
+
+/* The Norcroft C library defines size_t as "unsigned int" */
+#define SIZE_TYPE "unsigned int"
+
+#include "arm/aof.h"
+
+#undef CPP_APCS_PC_DEFAULT_SPEC
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+
+
diff --git a/gcc_arm/config/arm/t-arm-elf b/gcc_arm/config/arm/t-arm-elf
new file mode 100755
index 0000000..b57eeca
--- /dev/null
+++ b/gcc_arm/config/arm/t-arm-elf
@@ -0,0 +1,35 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+# CYGNUS LOCAL interworking
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX
+# END CYGNUS LOCAL interworking
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# CYGNUS LOCAL
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float mapcs-32/mapcs-26 mno-thumb-interwork/mthumb-interwork fno-leading-underscore/fleading-underscore mcpu=arm7
+MULTILIB_DIRNAMES = le be fpu soft 32bit 26bit normal interwork elf under nofmult
+MULTILIB_EXCEPTIONS = *mapcs-26/*mthumb-interwork* *mthumb-interwork*/*mcpu=arm7*
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle mcpu?arm7=mcpu?arm7d mcpu?arm7=mcpu?arm7di mcpu?arm7=mcpu?arm70 mcpu?arm7=mcpu?arm700 mcpu?arm7=mcpu?arm700i mcpu?arm7=mcpu?arm710 mcpu?arm7=mcpu?arm710c mcpu?arm7=mcpu?arm7100 mcpu?arm7=mcpu?arm7500 mcpu?arm7=mcpu?arm7500fe mcpu?arm7=mcpu?arm6 mcpu?arm7=mcpu?arm60 mcpu?arm7=mcpu?arm600 mcpu?arm7=mcpu?arm610 mcpu?arm7=mcpu?arm620
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+# END CYGNUS LOCAL
+
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc
diff --git a/gcc_arm/config/arm/t-bare b/gcc_arm/config/arm/t-bare
new file mode 100755
index 0000000..21e4dd6
--- /dev/null
+++ b/gcc_arm/config/arm/t-bare
@@ -0,0 +1,34 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+# CYGNUS LOCAL interworking
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX
+# END CYGNUS LOCAL interworking
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# CYGNUS LOCAL
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float mapcs-32/mapcs-26 mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES = le be fpu soft 32bit 26bit normal interwork
+MULTILIB_MATCHES =
+MULTILIB_EXCEPTIONS = *mapcs-26/*mthumb-interwork*
+# END CYGNUS LOCAL
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-linux b/gcc_arm/config/arm/t-linux
new file mode 100755
index 0000000..0160ee6
--- /dev/null
+++ b/gcc_arm/config/arm/t-linux
@@ -0,0 +1,42 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+LIBGCC2_CFLAGS=-O2 -fomit-frame-pointer $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -g0
+
+# Don't build enquire
+ENQUIRE=
+
+# Since libgcc1 is an assembler file, we can build it automatically for the
+# cross-compiler.
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx
+
+# CYGNUS LOCAL
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float mapcs-32/mapcs-26 mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES = le be fpu soft 32bit 26bit normal interwork
+MULTILIB_MATCHES =
+MULTILIB_EXCEPTIONS = *mapcs-26/*mthumb-interwork*
+# END CYGNUS LOCAL
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-netbsd b/gcc_arm/config/arm/t-netbsd
new file mode 100755
index 0000000..cc2f658
--- /dev/null
+++ b/gcc_arm/config/arm/t-netbsd
@@ -0,0 +1,7 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+LIBGCC2_CFLAGS=-O2 -fomit-frame-pointer $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -g0
+# -Dinhibit_libc
+
+# Don't build enquire
+ENQUIRE=
diff --git a/gcc_arm/config/arm/t-pe b/gcc_arm/config/arm/t-pe
new file mode 100755
index 0000000..e68b3c9
--- /dev/null
+++ b/gcc_arm/config/arm/t-pe
@@ -0,0 +1,31 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+pe.o: $(srcdir)/config/arm/pe.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
+
+MULTILIB_OPTIONS = mhard-float
+MULTILIB_DIRNAMES = fpu
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-pe-thumb b/gcc_arm/config/arm/t-pe-thumb
new file mode 100755
index 0000000..253c814
--- /dev/null
+++ b/gcc_arm/config/arm/t-pe-thumb
@@ -0,0 +1,37 @@
+# Makefile fragment
+# Copyright (c) 1998 Free Software Foundation
+# CYGNUS LOCAL (entire file) nickc/thumb-pe
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1thumb.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# Rule to build Psion specific GCC functions.
+pe.o: $(srcdir)/config/arm/pe.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
+
+# Avoid building a duplicate set of libraries for the default endian-ness.
+MULTILIB_OPTIONS = mthumb-interwork
+MULTILIB_DIRNAMES = interwork
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-riscix b/gcc_arm/config/arm/t-riscix
new file mode 100755
index 0000000..e5a2213
--- /dev/null
+++ b/gcc_arm/config/arm/t-riscix
@@ -0,0 +1,3 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+LIBGCC2_CFLAGS=-O2 -fomit-frame-pointer $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -g0
diff --git a/gcc_arm/config/arm/t-semi b/gcc_arm/config/arm/t-semi
new file mode 100755
index 0000000..61c1c37
--- /dev/null
+++ b/gcc_arm/config/arm/t-semi
@@ -0,0 +1,47 @@
+# Just for these, we omit the frame pointer since it makes such a big
+# difference. It is then pointless adding debugging.
+LIBGCC2_CFLAGS=-O2 -fomit-frame-pointer $(LIBGCC2_INCLUDES) $(GCC_CFLAGS) -g0
+
+# Don't build enquire
+ENQUIRE=
+
+# Can't test libgcc1 since it tries to bring in things like malloc, and
+# there probably isn't a libc to link against until we have a compiler.
+LIBGCC1_TEST =
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+
+#Don't try to run fixproto
+STMP_FIXPROTO =
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __SOFTFP__' > fp-bit.c
+ echo '#define FLOAT' >> fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifdef __SOFTFP__' > dp-bit.c
+ echo '#ifndef __ARMEB__' >> dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+
+MULTILIB_OPTIONS = msoft-float mapcs-26 mbig-endian mwords-little-endian
+MULTILIB_DIRNAMES = soft apcs26 big wlittle
+MULTILIB_EXCEPTIONS = *mapcs-26/*mbig-endian* mwords-little-endian *mapcs-26/mwords-little-endian msoft-float/mwords-little-endian
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-semiaof b/gcc_arm/config/arm/t-semiaof
new file mode 100755
index 0000000..1017543
--- /dev/null
+++ b/gcc_arm/config/arm/t-semiaof
@@ -0,0 +1,64 @@
+OLDCC = armcc -w
+# Don't build enquire
+ENQUIRE=
+CROSS_LIBGCC1 = libgcc1-aof.a
+LIBGCC2 = libgcc2-aof.a
+LIBGCC = libgcc-aof.a
+LIBGCC2_CFLAGS = -O2 -fomit-frame-pointer
+LIBGCC1_TEST = #libgcc1-atest
+EXTRA_PARTS = crtbegin.o crtend.o
+STMP_FIXPROTO =
+
+# Rule to build libgcc1.a and libgcc2.a and libgcc.a, since the librarian
+# for the ARM tools is somewhat quirky, and needs a special rule to use it.
+libgcc1-aof.a: libgcc1.c $(CONFIG_H) config.status
+ -rm -rf tmplib libgcc1.a libgcc1-aof.a tmplibgcc1.a
+ mkdir tmplib
+ for name in $(LIB1FUNCS); \
+ do \
+ echo $${name}; \
+ rm -f $${name}$(objext); \
+ $(OLDCC) $(CCLIBFLAGS) $(INCLUDES) -c -DL$${name} $(srcdir)/libgcc1.c; \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ mv libgcc1$(objext) tmplib/$${name}$(objext); \
+ done
+ (cd tmplib; \
+ armlib -c tmplibgcc1.a *; \
+ mv tmplibgcc1.a ..)
+ mv tmplibgcc1.a libgcc1-aof.a
+ rm -rf tmplib
+
+libgcc2-aof.a: libgcc2.c libgcc2.ready $(CONFIG_H) $(LIB2FUNCS_EXTRA) \
+ machmode.h longlong.h gbl-ctors.h config.status
+ -rm -f tmplibgcc2.a
+ -rm -rf tmplib
+ mkdir tmplib
+ for name in $(LIB2FUNCS); \
+ do \
+ echo $${name}; \
+ $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) $(INCLUDES) -c -DL$${name} \
+ $(srcdir)/libgcc2.c -o tmplib/$${name}$(objext); \
+ if [ $$? -eq 0 ] ; then true; else exit 1; fi; \
+ done
+ (cd tmplib; \
+ armlib -c tmplibgcc2.a *; \
+ mv tmplibgcc2.a ..)
+ mv tmplibgcc2.a libgcc2-aof.a
+ rm -rf tmplib
+
+# Combine the various libraries into a single library, libgcc.a.
+libgcc-aof.a: $(CROSS_LIBGCC1) $(LIBGCC2)
+ -rm -rf tmplibgcc.a libgcc.a tmpcopy libgcc-aof.a
+ mkdir tmpcopy
+ (cd tmpcopy; armlib -e ../$(LIBGCC1) \*)
+ -(cd tmpcopy; chmod +w * > /dev/null 2>&1)
+ (cd tmpcopy; armlib -e ../$(LIBGCC2) \*)
+ (cd tmpcopy; armlib -co ../tmplibgcc.a *$(objext))
+ rm -rf tmpcopy
+ mv tmplibgcc.a libgcc.a
+ ln libgcc.a libgcc-aof.a
+
+libgcc1-atest: libgcc1-test.o native $(GCC_PARTS) $(EXTRA_PARTS)
+ @echo "Testing libgcc1. Ignore linker warning messages."
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) libgcc1-test.o -o libgcc1-test \
+ -v
diff --git a/gcc_arm/config/arm/t-thumb b/gcc_arm/config/arm/t-thumb
new file mode 100755
index 0000000..6cd8a13
--- /dev/null
+++ b/gcc_arm/config/arm/t-thumb
@@ -0,0 +1,31 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1thumb.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+# adddi3/subdi3 added to machine description
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# Avoid building a duplicate set of libraries for the default endian-ness.
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES = le be normal interwork
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/t-thumb-elf b/gcc_arm/config/arm/t-thumb-elf
new file mode 100755
index 0000000..2f5054d
--- /dev/null
+++ b/gcc_arm/config/arm/t-thumb-elf
@@ -0,0 +1,32 @@
+# CYGNUS LOCAL (entire file) clm/arm-elf
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1thumb.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX
+# adddi3/subdi3 added to machine description
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# Avoid building a duplicate set of libraries for the default endian-ness.
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mno-thumb-interwork/mthumb-interwork fno-leading-underscore/fleading-underscore
+MULTILIB_DIRNAMES = le be normal interwork elf under
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc_arm/config/arm/tcoff.h b/gcc_arm/config/arm/tcoff.h
new file mode 100755
index 0000000..6fa4705
--- /dev/null
+++ b/gcc_arm/config/arm/tcoff.h
@@ -0,0 +1,192 @@
+/* Definitions of target machine for GNU compiler,
+ for Thumb with COFF obj format.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+ Derived from arm/coff.h originally by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/coff)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* This is COFF, but prefer stabs. */
+#define SDB_DEBUGGING_INFO
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/coff\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rdata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc_arm/config/arm/telf-oabi.h b/gcc_arm/config/arm/telf-oabi.h
new file mode 100755
index 0000000..17e85e2
--- /dev/null
+++ b/gcc_arm/config/arm/telf-oabi.h
@@ -0,0 +1,244 @@
+/* CYGNUS LOCAL (entire file) clm/arm-elf */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with ELF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OBJECT_FORMAT_ELF
+
+#define CPP_PREDEFINES "-Darm_oabi -Dthumb -Dthumbelf -D__thumb -Acpu(arm) -Amachine(arm)"
+
+#define ASM_SPEC "-moabi -marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/elf)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* Debug */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",@progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"aw\""
+
+#define USER_LABEL_PREFIX ""
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE,ADDR) \
+ if (((ADDR)[0] == '.') && ((ADDR)[1] == 'L')) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, (ADDR)); \
+ else \
+ fprintf ((FILE), "\t%s\t%s", \
+ UNALIGNED_WORD_ASM_OP, (ADDR))
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+#define INVOKE__main
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc_arm/config/arm/telf-oabi_020422.h b/gcc_arm/config/arm/telf-oabi_020422.h
new file mode 100755
index 0000000..9b7d6c7
--- /dev/null
+++ b/gcc_arm/config/arm/telf-oabi_020422.h
@@ -0,0 +1,237 @@
+/* CYGNUS LOCAL (entire file) clm/arm-elf */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with ELF obj format.
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OBJECT_FORMAT_ELF
+
+#define CPP_PREDEFINES "-Darm_oabi -Dthumb -Dthumbelf -D__thumb -Acpu(arm) -Amachine(arm)"
+
+#define ASM_SPEC "-moabi -marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/elf)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* Debug */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",@progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"aw\""
+
+#define USER_LABEL_PREFIX ""
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+#define INVOKE__main
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc_arm/config/arm/telf.h b/gcc_arm/config/arm/telf.h
new file mode 100755
index 0000000..29297b0
--- /dev/null
+++ b/gcc_arm/config/arm/telf.h
@@ -0,0 +1,450 @@
+/* CYGNUS LOCAL (entire file) clm/arm-elf */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with ELF obj format.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OBJECT_FORMAT_ELF
+
+#define CPP_PREDEFINES "-Dthumb -Dthumbelf -D__thumb -Acpu(arm) -Amachine(arm)"
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/elf)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* Debug */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \
+ fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"aw\""
+
+#define USER_LABEL_PREFIX ""
+
+/* If defined, a C expression whose value is a string containing the
+ assembler operation to identify the following data as
+ uninitialized global data. If not defined, and neither
+ `ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined,
+ uninitialized global data will be output in the data section if
+ `-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be
+ used. */
+#ifndef BSS_SECTION_ASM_OP
+#define BSS_SECTION_ASM_OP ".section\t.bss"
+#endif
+
+/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used
+ in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits.
+
+ Try to use function `asm_output_aligned_bss' defined in file
+ `varasm.c' when defining this macro. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+#endif
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE,ADDR) \
+ if (((ADDR)[0] == '.') && ((ADDR)[1] == 'L')) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, (ADDR)); \
+ else \
+ fprintf ((FILE), "\t%s\t%s", \
+ UNALIGNED_WORD_ASM_OP, (ADDR))
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* For aliases of functions we use .thumb_set instead. */
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE,DECL1,DECL2) \
+ do \
+ { \
+ char * LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
+ char * LABEL2 = IDENTIFIER_POINTER (DECL2); \
+ \
+ if (TREE_CODE (DECL1) == FUNCTION_DECL) \
+ { \
+ fprintf (FILE, "\t.thumb_set "); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } \
+ else \
+ ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \
+ } \
+ while (0)
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+#define INVOKE__main
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char * name, * string, * prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ { \
+ prefix = "."; \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".text."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".rodata."; \
+ else \
+ prefix = ".data."; \
+ } \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+
+/* This is how we tell the assembler that a symbol is weak. */
+#ifndef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak\t", FILE); \
+ assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+#endif
+
+#ifndef TYPE_ASM_OP
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+#define TYPE_OPERAND_FMT "%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (FILE, "\t.thumb_func\n") ; \
+ else \
+ fprintf (FILE, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+ do \
+ { \
+ char * name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ } \
+ while (0)
+
+/* This is how to declare the size of a function. */
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno ++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ while (0)
+
+#endif /* TYPE_ASM_OP */
diff --git a/gcc_arm/config/arm/telf_020422.h b/gcc_arm/config/arm/telf_020422.h
new file mode 100755
index 0000000..6e59404
--- /dev/null
+++ b/gcc_arm/config/arm/telf_020422.h
@@ -0,0 +1,443 @@
+/* CYGNUS LOCAL (entire file) clm/arm-elf */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with ELF obj format.
+ Copyright (C) 1995, 1996, 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OBJECT_FORMAT_ELF
+
+#define CPP_PREDEFINES "-Dthumb -Dthumbelf -D__thumb -Acpu(arm) -Amachine(arm)"
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/elf)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* Debug */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \
+ fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"aw\""
+
+#define USER_LABEL_PREFIX ""
+
+/* If defined, a C expression whose value is a string containing the
+ assembler operation to identify the following data as
+ uninitialized global data. If not defined, and neither
+ `ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined,
+ uninitialized global data will be output in the data section if
+ `-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be
+ used. */
+#ifndef BSS_SECTION_ASM_OP
+#define BSS_SECTION_ASM_OP ".section\t.bss"
+#endif
+
+/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used
+ in place of `ASM_OUTPUT_BSS', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits.
+
+ Try to use function `asm_output_aligned_bss' defined in file
+ `varasm.c' when defining this macro. */
+#ifndef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+#endif
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* For aliases of functions we use .thumb_set instead. */
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE,DECL1,DECL2) \
+ do \
+ { \
+ char * LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
+ char * LABEL2 = IDENTIFIER_POINTER (DECL2); \
+ \
+ if (TREE_CODE (DECL1) == FUNCTION_DECL) \
+ { \
+ fprintf (FILE, "\t.thumb_set "); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } \
+ else \
+ ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \
+ } \
+ while (0)
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+#define INVOKE__main
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char * name, * string, * prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ { \
+ prefix = "."; \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".text."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".rodata."; \
+ else \
+ prefix = ".data."; \
+ } \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+
+/* This is how we tell the assembler that a symbol is weak. */
+#ifndef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak\t", FILE); \
+ assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); \
+ } \
+ while (0)
+#endif
+
+#ifndef TYPE_ASM_OP
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+#define TYPE_OPERAND_FMT "%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (FILE, "\t.thumb_func\n") ; \
+ else \
+ fprintf (FILE, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+ do \
+ { \
+ char * name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
+ } \
+ } \
+ while (0)
+
+/* This is how to declare the size of a function. */
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno ++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ while (0)
+
+#endif /* TYPE_ASM_OP */
diff --git a/gcc_arm/config/arm/thumb.c b/gcc_arm/config/arm/thumb.c
new file mode 100755
index 0000000..778cda9
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.c
@@ -0,0 +1,2132 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+static int current_function_has_far_jump = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ if (current_function_has_far_jump)
+ return 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ {
+ current_function_has_far_jump = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ current_function_has_far_jump = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb.c.orig b/gcc_arm/config/arm/thumb.c.orig
new file mode 100755
index 0000000..778cda9
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.c.orig
@@ -0,0 +1,2132 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+static int current_function_has_far_jump = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ if (current_function_has_far_jump)
+ return 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ {
+ current_function_has_far_jump = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ current_function_has_far_jump = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb.c.rej b/gcc_arm/config/arm/thumb.c.rej
new file mode 100755
index 0000000..2b5e409
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.c.rej
@@ -0,0 +1,168 @@
+***************
+*** 2103,2105 ****
+ }
+ #endif /* THUMB_PE */
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+--- 2103,2264 ----
+ }
+ #endif /* THUMB_PE */
+ /* END CYGNUS LOCAL nickc/thumb-pe */
++
++ /* Return nonzero if ATTR is a valid attribute for TYPE.
++ ATTRIBUTES are any existing attributes and ARGS are the arguments
++ supplied with ATTR.
++
++ Supported attributes:
++
++ short_call: assume the offset from the caller to the callee is small.
++
++ long_call: don't assume the offset is small. */
++
++ int
++ arm_valid_machine_type_attribute (type, attributes, attr, args)
++ tree type;
++ tree attributes;
++ tree attr;
++ tree args;
++ {
++ if (args != NULL_TREE)
++ return 0;
++
++ if (is_attribute_p ("long_call", attr))
++ return 1;
++
++ if (is_attribute_p ("short_call", attr))
++ return 1;
++
++ return 0;
++ }
++
++ /* Encode long_call or short_call attribute by prefixing
++ symbol name in DECL with a special character FLAG. */
++
++ void
++ arm_encode_call_attribute (decl, flag)
++ tree decl;
++ int flag;
++ {
++ const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
++ int len = strlen (str);
++ char * newstr;
++
++ /* Do not allow weak functions to be treated as short call. */
++ if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
++ return;
++
++ if (ENCODED_SHORT_CALL_ATTR_P (str)
++ || ENCODED_LONG_CALL_ATTR_P (str))
++ return;
++
++ newstr = malloc (len + 2);
++ newstr[0] = flag;
++ strcpy (newstr + 1, str);
++
++ XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
++ }
++
++ /* Return the length of a function name prefix
++ that starts with the character 'c'. */
++
++ static int
++ arm_get_strip_length (char c)
++ {
++ switch (c)
++ {
++ ARM_NAME_ENCODING_LENGTHS
++ default: return 0;
++ }
++ }
++
++ /* Return a pointer to a function's name with any
++ and all prefix encodings stripped from it. */
++
++ char *
++ arm_strip_name_encoding (char * name)
++ {
++ int skip;
++
++ while ((skip = arm_get_strip_length (* name)))
++ name += skip;
++
++ return name;
++ }
++
++ /* Return 1 if the operand is a SYMBOL_REF for a function known to be
++ defined within the current compilation unit. If this caanot be
++ determined, then 0 is returned. */
++
++ static int
++ current_file_function_operand (sym_ref)
++ rtx sym_ref;
++ {
++ /* This is a bit of a fib. A function will have a short call flag
++ applied to its name if it has the short call attribute, or it has
++ already been defined within the current compilation unit. */
++ if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
++ return 1;
++
++ /* The current function is always defined within the current compilation
++ unit. if it s a weak definition however, then this may not be the real
++ definition of the function, and so we have to say no. */
++ if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
++ && !DECL_WEAK (current_function_decl))
++ return 1;
++
++ /* We cannot make the determination - default to returning 0. */
++ return 0;
++ }
++
++ /* Return non-zero if a 32 bit "long_call" should be generated for
++ this call. We generate a long_call if the function:
++
++ a. has an __attribute__((long call))
++ or b. the -mlong-calls command line switch has been specified
++
++ However we do not generate a long call if the function:
++
++ c. has an __attribute__ ((short_call))
++ or d. has an __attribute__ ((section))
++ or e. is defined within the current compilation unit.
++
++ This function will be called by C fragments contained in the machine
++ description file. CALL_REF and CALL_COOKIE correspond to the matched
++ rtl operands. CALL_SYMBOL is used to distinguish between
++ two different callers of the function. It is set to 1 in the
++ "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
++ and "call_value" patterns. This is because of the difference in the
++ SYM_REFs passed by these patterns. */
++
++ int
++ arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
++ rtx sym_ref;
++ int call_cookie;
++ int call_symbol;
++ {
++ if (!call_symbol)
++ {
++ if (GET_CODE (sym_ref) != MEM)
++ return 0;
++
++ sym_ref = XEXP (sym_ref, 0);
++ }
++
++ if (GET_CODE (sym_ref) != SYMBOL_REF)
++ return 0;
++
++ if (call_cookie & CALL_SHORT)
++ return 0;
++
++ if (TARGET_LONG_CALLS && flag_function_sections)
++ return 1;
++
++ if (current_file_function_operand (sym_ref))
++ return 0;
++
++ return (call_cookie & CALL_LONG)
++ || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
++ || TARGET_LONG_CALLS;
++ }
diff --git a/gcc_arm/config/arm/thumb.h b/gcc_arm/config/arm/thumb.h
new file mode 100755
index 0000000..9cd719a
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.h
@@ -0,0 +1,1195 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* Output a reference to a label. */
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "%s%s", user_label_prefix, (NAME))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \
+ ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \
+ ? 4 - (CUM) / 4 : 0)
+
+#define CUMULATIVE_ARGS int
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0)
+
+#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \
+ (CUM) += ((((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : GET_MODE_SIZE (MODE)) + 3) & ~3
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
diff --git a/gcc_arm/config/arm/thumb.h.orig b/gcc_arm/config/arm/thumb.h.orig
new file mode 100755
index 0000000..9cd719a
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.h.orig
@@ -0,0 +1,1195 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* Output a reference to a label. */
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "%s%s", user_label_prefix, (NAME))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \
+ ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \
+ ? 4 - (CUM) / 4 : 0)
+
+#define CUMULATIVE_ARGS int
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0)
+
+#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \
+ (CUM) += ((((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : GET_MODE_SIZE (MODE)) + 3) & ~3
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
diff --git a/gcc_arm/config/arm/thumb.md b/gcc_arm/config/arm/thumb.md
new file mode 100755
index 0000000..dd86008
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.md
@@ -0,0 +1,1174 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG)
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG)
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "s_register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb.md.orig b/gcc_arm/config/arm/thumb.md.orig
new file mode 100755
index 0000000..dd86008
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.md.orig
@@ -0,0 +1,1174 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG)
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "
+{
+ if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG)
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "s_register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "! TARGET_LONG_CALLS && GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb.md.rej b/gcc_arm/config/arm/thumb.md.rej
new file mode 100755
index 0000000..745d220
--- /dev/null
+++ b/gcc_arm/config/arm/thumb.md.rej
@@ -0,0 +1,168 @@
+***************
+*** 1002,1019 ****
+ ;; Call insns
+
+ (define_expand "call"
+- [(call (match_operand:SI 0 "memory_operand" "")
+- (match_operand 1 "" ""))]
+ ""
+ "
+ {
+- if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG)
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+ }")
+
+ (define_insn "*call_indirect"
+- [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+- (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+ [(set_attr "length" "4")])
+--- 1002,1024 ----
+ ;; Call insns
+
+ (define_expand "call"
++ [(parallel
++ [(call (match_operand:SI 0 "memory_operand" "")
++ (match_operand 1 "" ""))
++ (use (match_operand 2 "" ""))])]
+ ""
+ "
+ {
++ if (GET_CODE (XEXP (operands[0], 0)) != REG
++ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+ }")
+
+ (define_insn "*call_indirect"
++ [(parallel
++ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
++ (match_operand 1 "" ""))
++ (use (match_operand 2 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+ [(set_attr "length" "4")])
+***************
+*** 1023,1075 ****
+ ;; would switch back into ARM mode...
+
+ (define_insn "*call_indirect_interwork"
+- [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+- (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+ [(set_attr "length" "4")])
+
+ (define_expand "call_value"
+- [(set (match_operand 0 "" "")
+- (call (match_operand 1 "memory_operand" "")
+- (match_operand 2 "" "")))]
+ ""
+ "
+ {
+- if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG)
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+ }")
+
+ (define_insn "*call_value_indirect"
+- [(set (match_operand 0 "" "=l")
+- (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+- (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+ [(set_attr "length" "4")])
+ ;; See comment for call_indirect pattern
+
+ (define_insn "*call_value_indirect_interwork"
+- [(set (match_operand 0 "" "=l")
+- (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+- (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+ [(set_attr "length" "4")])
+
+
+ (define_insn "*call_insn"
+- [(call (mem:SI (match_operand:SI 0 "" "i"))
+- (match_operand:SI 1 "" ""))]
+- "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+ [(set_attr "length" "4")])
+
+ (define_insn "*call_value_insn"
+- [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+- (match_operand 2 "" "")))]
+- "! TARGET_LONG_CALLS && GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+ [(set_attr "length" "4")])
+
+--- 1028,1095 ----
+ ;; would switch back into ARM mode...
+
+ (define_insn "*call_indirect_interwork"
++ [(parallel
++ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
++ (match_operand 1 "" ""))
++ (use (match_operand 2 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+ [(set_attr "length" "4")])
+
+ (define_expand "call_value"
++ [(parallel
++ [(set (match_operand 0 "" "")
++ (call (match_operand 1 "memory_operand" "")
++ (match_operand 2 "" "")))
++ (use (match_operand 3 "" ""))])]
+ ""
+ "
+ {
++ if (GET_CODE (XEXP (operands[1], 0)) != REG
++ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+ }")
+
+ (define_insn "*call_value_indirect"
++ [(parallel
++ [(set (match_operand 0 "" "=l")
++ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
++ (match_operand 2 "" "")))
++ (use (match_operand 3 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+ [(set_attr "length" "4")])
+ ;; See comment for call_indirect pattern
+
+ (define_insn "*call_value_indirect_interwork"
++ [(parallel
++ [(set (match_operand 0 "" "=l")
++ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
++ (match_operand 2 "" "")))
++ (use (match_operand 3 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+ [(set_attr "length" "4")])
+
+
+ (define_insn "*call_insn"
++ [(parallel
++ [(call (mem:SI (match_operand:SI 0 "" "i"))
++ (match_operand:SI 1 "" ""))
++ (use (match_operand 2 "" ""))])]
++ "GET_CODE (operands[0]) == SYMBOL_REF
++ && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "bl\\t%a0"
+ [(set_attr "length" "4")])
+
+ (define_insn "*call_value_insn"
++ [(parallel
++ [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
++ (match_operand 2 "" "")))
++ (use (match_operand 3 "" ""))])]
++ "GET_CODE(operands[1]) == SYMBOL_REF
++ && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "bl\\t%a1"
+ [(set_attr "length" "4")])
+
diff --git a/gcc_arm/config/arm/thumb_000513.h b/gcc_arm/config/arm/thumb_000513.h
new file mode 100755
index 0000000..a5c25b9
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_000513.h
@@ -0,0 +1,1187 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* Output a reference to a label. */
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "%s%s", user_label_prefix, (NAME))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \
+ ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \
+ ? 4 - (CUM) / 4 : 0)
+
+#define CUMULATIVE_ARGS int
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0)
+
+#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \
+ (CUM) += ((((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : GET_MODE_SIZE (MODE)) + 3) & ~3
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
diff --git a/gcc_arm/config/arm/thumb_010110a.c b/gcc_arm/config/arm/thumb_010110a.c
new file mode 100755
index 0000000..ef7ebff
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_010110a.c
@@ -0,0 +1,2124 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb_010110a.md b/gcc_arm/config/arm/thumb_010110a.md
new file mode 100755
index 0000000..29a75bb
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_010110a.md
@@ -0,0 +1,1166 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "s_register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb_010309a.c b/gcc_arm/config/arm/thumb_010309a.c
new file mode 100755
index 0000000..778cda9
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_010309a.c
@@ -0,0 +1,2132 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+static int current_function_has_far_jump = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ if (current_function_has_far_jump)
+ return 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ {
+ current_function_has_far_jump = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ current_function_has_far_jump = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb_020422.c b/gcc_arm/config/arm/thumb_020422.c
new file mode 100755
index 0000000..cefc7d4
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020422.c
@@ -0,0 +1,2291 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+static int current_function_has_far_jump = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+ /* If debugging information is going to be emitted
+ then we must make sure that any refences to
+ symbols which are removed by the above code are
+ also removed in the descriptions of the
+ function's variables. Failure to do this means
+ that the debugging information emitted could
+ refer to symbols which are not emited by
+ output_constant_pool() because
+ mark_constant_pool() never sees them as being
+ used. */
+
+
+ /* These are the tests used in
+ output_constant_pool() to decide if the constant
+ pool will be marked. Only necessary if debugging
+ info is being emitted. Only necessary for
+ references to memory whose address is given by a
+ symbol. */
+
+ if (optimize > 0
+ && flag_expensive_optimizations
+ && write_symbols != NO_DEBUG
+ && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF)
+ replace_symbols_in_block
+ (DECL_INITIAL (current_function_decl), src, newsrc);
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return non-zero if FUNC is a naked function. */
+
+static int
+arm_naked_function_p (func)
+ tree func;
+{
+ tree a;
+
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ a = lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (func));
+ return a != NULL_TREE;
+}
+#endif
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return non-zero if FUNC must be entered in ARM mode. */
+int
+is_called_in_ARM_mode (func)
+ tree func;
+{
+ if (TREE_CODE (func) != FUNCTION_DECL)
+ abort ();
+
+ /* Ignore the problem about functions whoes address is taken. */
+ if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
+ return TRUE;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ return lookup_attribute ("interfacearm", DECL_MACHINE_ATTRIBUTES (func)) != NULL_TREE;
+#else
+ return FALSE;
+#endif
+/* END CYGNUS LOCAL */
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function. If
+ 'reg_containing_return_addr' is -1, then the return address is
+ actually on the stack, at the stack pointer. */
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return.*/
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure and the function was not entered in ARM mode then
+ just pop the return address straight into the PC. */
+ else if ( ! TARGET_THUMB_INTERWORK
+ && ! TARGET_BACKTRACE
+ && ! is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ mode = GET_MODE (current_function_return_rtx);
+ else
+#endif
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+ if (mode == VOIDmode)
+ regs_available_for_popping =
+ (1 << ARG_1_REGISTER)
+ | (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping =
+ (1 << ARG_2_REGISTER)
+ | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping =
+ (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+ if (regs_available_for_popping == 0
+ && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [IP_REGISTER], reg_names [ARG_4_REGISTER]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* Process the registers we popped. */
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr =
+ number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names [frame_pointer]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that
+ contains it.*/
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [stack_pointer]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer have
+ any registers into which we can pop them, then we must move the return
+ address into the link register and make available the register that
+ contained it. */
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER],
+ reg_names [reg_containing_return_addr]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that at most we will want to pop FP and SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP.
+ Move whichever one it is into the correct register. */
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [move_to], reg_names [popped_into]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have only
+ had one register available to us and we are now popping the SP. */
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names [popped_into]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ if (reg_containing_return_addr != LINK_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [LINK_REGISTER], reg_names [ARG_4_REGISTER]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names [ARG_4_REGISTER], reg_names [IP_REGISTER]);
+ }
+
+ /* Return to caller. */
+ asm_fprintf (f, "\tbx\t%s\n", reg_names [reg_containing_return_addr]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+static void
+thumb_pushpop (f, mask, push)
+ FILE * f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in
+ thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ if (current_function_has_far_jump)
+ return 1;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ {
+ current_function_has_far_jump = 1;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* If a function is naked, don't use the "return" insn. */
+ if (arm_naked_function_p (current_function_decl))
+ return "";
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if ( TARGET_THUMB_INTERWORK
+ || TARGET_BACKTRACE
+ || is_called_in_ARM_mode (current_function_decl))
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+ if (is_called_in_ARM_mode (current_function_decl))
+ {
+ char * name;
+ if (GET_CODE (DECL_RTL (current_function_decl)) != MEM)
+ abort();
+ if (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0)) != SYMBOL_REF)
+ abort();
+ name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+
+ /* Generate code sequence to switch us into Thumb mode. */
+ /* The .code 32 directive has already been emitted by
+ ASM_DECLARE_FUNCITON_NAME */
+ asm_fprintf (f, "\torr\tr12, pc, #1\n");
+ asm_fprintf (f, "\tbx\tr12\n");
+
+ /* Generate a label, so that the debugger will notice the
+ change in instruction sets. This label is also used by
+ the assembler to bypass the ARM code when this function
+ is called from a Thumb encoded function elsewhere in the
+ same file. Hence the definition of STUB_NAME here must
+ agree with the definition in gas/config/tc-arm.c */
+
+#define STUB_NAME ".real_start_of"
+
+ asm_fprintf (f, "\t.code\t16\n");
+ asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
+ asm_fprintf (f, "\t.thumb_func\n");
+ asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
+ }
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have prologues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ /* CYGNUS LOCAL nickc/thumb-pe */
+#ifdef THUMB_PE
+ /* Naked functions don't have epilogues. */
+ if (arm_naked_function_p (current_function_decl))
+ return;
+#endif
+ /* END CYGNUS LOCAL nickc/thumb-pe */
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+ current_function_has_far_jump = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed ++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ if (had_to_push_lr
+ && ! is_called_in_ARM_mode (current_function_decl))
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function or
+ it is still on the stack because we do not want to
+ return by doing a pop {pc}. */
+
+ if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0)
+ thumb_exit (asm_out_file,
+ (had_to_push_lr
+ && is_called_in_ARM_mode (current_function_decl)) ?
+ -1 : LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names [STACK_POINTER],
+ reg_names [STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case '_':
+ fputs (user_label_prefix, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ case 'c':
+ /* We use 'c' operands with symbols for .vtinherit */
+ if (GET_CODE (x) == SYMBOL_REF)
+ output_addr_const(f, x);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void
+thumb_override_options ()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+
+ if (flag_pic)
+ {
+ warning ("Position independent code not supported. Ignored");
+ flag_pic = 0;
+ }
+}
+
+/* CYGNUS LOCAL nickc/thumb-pe */
+
+#ifdef THUMB_PE
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ naked: don't output any prologue or epilogue code, the user is assumed
+ to do the right thing.
+
+ interfacearm: Always assume that this function will be entered in ARM
+ mode, not Thumb mode, and that the caller wishes to be returned to in
+ ARM mode. */
+int
+arm_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("naked", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ if (is_attribute_p ("interfacearm", attr))
+ return TREE_CODE (decl) == FUNCTION_DECL;
+
+ return 0;
+}
+#endif /* THUMB_PE */
+/* END CYGNUS LOCAL nickc/thumb-pe */
+
+/* Return nonzero if ATTR is a valid attribute for TYPE.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR.
+
+ Supported attributes:
+
+ short_call: assume the offset from the caller to the callee is small.
+
+ long_call: don't assume the offset is small. */
+
+int
+arm_valid_machine_type_attribute (type, attributes, attr, args)
+ tree type;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("long_call", attr))
+ return 1;
+
+ if (is_attribute_p ("short_call", attr))
+ return 1;
+
+ return 0;
+}
+
+/* Encode long_call or short_call attribute by prefixing
+ symbol name in DECL with a special character FLAG. */
+
+void
+arm_encode_call_attribute (decl, flag)
+ tree decl;
+ int flag;
+{
+ const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
+ int len = strlen (str);
+ char * newstr;
+
+ /* Do not allow weak functions to be treated as short call. */
+ if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
+ return;
+
+ if (ENCODED_SHORT_CALL_ATTR_P (str)
+ || ENCODED_LONG_CALL_ATTR_P (str))
+ return;
+
+ newstr = malloc (len + 2);
+ newstr[0] = flag;
+ strcpy (newstr + 1, str);
+
+ XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
+}
+
+/* Return the length of a function name prefix
+ that starts with the character 'c'. */
+
+static int
+arm_get_strip_length (char c)
+{
+ switch (c)
+ {
+ ARM_NAME_ENCODING_LENGTHS
+ default: return 0;
+ }
+}
+
+/* Return a pointer to a function's name with any
+ and all prefix encodings stripped from it. */
+
+char *
+arm_strip_name_encoding (char * name)
+{
+ int skip;
+
+ while ((skip = arm_get_strip_length (* name)))
+ name += skip;
+
+ return name;
+}
+
+/* Return 1 if the operand is a SYMBOL_REF for a function known to be
+ defined within the current compilation unit. If this caanot be
+ determined, then 0 is returned. */
+
+static int
+current_file_function_operand (sym_ref)
+ rtx sym_ref;
+{
+ /* This is a bit of a fib. A function will have a short call flag
+ applied to its name if it has the short call attribute, or it has
+ already been defined within the current compilation unit. */
+ if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
+ return 1;
+
+ /* The current function is always defined within the current compilation
+ unit. if it s a weak definition however, then this may not be the real
+ definition of the function, and so we have to say no. */
+ if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
+ && !DECL_WEAK (current_function_decl))
+ return 1;
+
+ /* We cannot make the determination - default to returning 0. */
+ return 0;
+}
+
+/* Return non-zero if a 32 bit "long_call" should be generated for
+ this call. We generate a long_call if the function:
+
+ a. has an __attribute__((long call))
+ or b. the -mlong-calls command line switch has been specified
+
+ However we do not generate a long call if the function:
+
+ c. has an __attribute__ ((short_call))
+ or d. has an __attribute__ ((section))
+ or e. is defined within the current compilation unit.
+
+ This function will be called by C fragments contained in the machine
+ description file. CALL_REF and CALL_COOKIE correspond to the matched
+ rtl operands. CALL_SYMBOL is used to distinguish between
+ two different callers of the function. It is set to 1 in the
+ "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
+ and "call_value" patterns. This is because of the difference in the
+ SYM_REFs passed by these patterns. */
+
+int
+arm_is_longcall_p (sym_ref, call_cookie, call_symbol)
+ rtx sym_ref;
+ int call_cookie;
+ int call_symbol;
+{
+ if (!call_symbol)
+ {
+ if (GET_CODE (sym_ref) != MEM)
+ return 0;
+
+ sym_ref = XEXP (sym_ref, 0);
+ }
+
+ if (GET_CODE (sym_ref) != SYMBOL_REF)
+ return 0;
+
+ if (call_cookie & CALL_SHORT)
+ return 0;
+
+ if (TARGET_LONG_CALLS && flag_function_sections)
+ return 1;
+
+ if (current_file_function_operand (sym_ref))
+ return 0;
+
+ return (call_cookie & CALL_LONG)
+ || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
+ || TARGET_LONG_CALLS;
+}
+
+/* s_register_operand is the same as register_operand, but it doesn't accept
+ (SUBREG (MEM)...).
+
+ This function exists because at the time it was put in it led to better
+ code. SUBREG(MEM) always needs a reload in the places where
+ s_register_operand is used, and this seemed to lead to excessive
+ reloading. */
+
+int
+s_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ /* We don't consider registers whose class is NO_REGS
+ to be a register operand. */
+ /* XXX might have to check for lo regs only for thumb ??? */
+ return (GET_CODE (op) == REG
+ && (REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || REGNO_REG_CLASS (REGNO (op)) != NO_REGS));
+}
diff --git a/gcc_arm/config/arm/thumb_020422.h b/gcc_arm/config/arm/thumb_020422.h
new file mode 100755
index 0000000..554ed1d
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020422.h
@@ -0,0 +1,1295 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode \
+ ? GEN_INT ((CUM).call_cookie) \
+ : (NAMED) \
+ ? ((CUM).nregs >= 16 ? 0 : gen_rtx (REG, MODE, (CUM).nregs / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM).nregs < 16 && (CUM).nregs + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : (HARD_REGNO_NREGS (0, (MODE)) \
+ * 4)) > 16) \
+ ? 4 - (CUM).nregs / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT . */
+ int call_cookie;
+} CUMULATIVE_ARGS;
+
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM).nregs = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) \
+ ? 4 : 0), \
+ (CUM).call_cookie = \
+ (((FNTYPE) && lookup_attribute ("short_call", TYPE_ATTRIBUTES (FNTYPE))) \
+ ? CALL_SHORT \
+ : (((FNTYPE) && lookup_attribute ("long_call", \
+ TYPE_ATTRIBUTES (FNTYPE)))\
+ || TARGET_LONG_CALLS) \
+ ? CALL_LONG \
+ : CALL_NORMAL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nregs += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM).nregs < 16) \
+ (PRETEND_SIZE) = 16 - (CUM).nregs; \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+int arm_valid_machine_type_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+#define VALID_MACHINE_TYPE_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_type_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag.
+ This macro is used inside the ENCODE_SECTION macro. */
+#define ARM_ENCODE_CALL_TYPE(decl) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
+ { \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
+ else if (! TREE_PUBLIC (decl)) \
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
+ }
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define ENCODED_SHORT_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == SHORT_CALL_FLAG_CHAR)
+
+#define ENCODED_LONG_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == LONG_CALL_FLAG_CHAR)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragement for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case SHORT_CALL_FLAG_CHAR: return 1; \
+ case LONG_CALL_FLAG_CHAR: return 1; \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This has to be handled by a function because more than part of the
+ ARM backend uses function name prefixes to encode attributes. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+ (VAR) = arm_strip_name_encoding (SYMBOL_NAME)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ asm_fprintf (FILE, "%U%s", arm_strip_name_encoding (NAME))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
+char *arm_strip_name_encoding (/* const char * */);
+int arm_is_longcall_p (/* rtx, int, int */);
diff --git a/gcc_arm/config/arm/thumb_020422.md b/gcc_arm/config/arm/thumb_020422.md
new file mode 100755
index 0000000..04de07c
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020422.md
@@ -0,0 +1,1194 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(parallel
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[0], 0)) != REG
+ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_indirect"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(parallel
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[1], 0)) != REG
+ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_indirect"
+ [(parallel
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(parallel
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(parallel
+ [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "GET_CODE(operands[1]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb_020428.h b/gcc_arm/config/arm/thumb_020428.h
new file mode 100755
index 0000000..8bba8d0
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020428.h
@@ -0,0 +1,1297 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#endif
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END 0x0001
+#define THUMB_FLAG_BACKTRACE 0x0002
+#define THUMB_FLAG_LEAF_BACKTRACE 0x0004
+#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */
+#define THUMB_FLAG_CALLEE_SUPER_INTERWORKING 0x40000
+#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000
+
+/* Nonzero if all call instructions should be indirect. */
+#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */
+
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+/* Set if externally visable functions should assume that they
+ might be called in ARM mode, from a non-thumb aware code. */
+#define TARGET_CALLEE_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLEE_SUPER_INTERWORKING)
+
+/* Set if calls via function pointers should assume that their
+ destination is non-Thumb aware. */
+#define TARGET_CALLER_INTERWORKING \
+ (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING)
+
+#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS)
+
+/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"callee-super-interworking", THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"no-callee-super-interworking", -THUMB_FLAG_CALLEE_SUPER_INTERWORKING}, \
+ {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \
+ {"long-calls", ARM_FLAG_LONG_CALLS, \
+ "Generate all call instructions as indirect calls"}, \
+ {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ if ((LOG) > 0) \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing
+ arguments to functions. These are the registers that are available for
+ spilling during reload. The code in reload1.c:init_reload() will detect this
+ class and place it into 'reload_address_base_reg_class'. */
+
+enum reg_class
+{
+ NO_REGS,
+ NONARG_LO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "NONARG_LO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000f0, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \
+ : NONARG_LO_REGS) \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define MODE_BASE_REG_CLASS(MODE) \
+ ((MODE) != QImode && (MODE) != HImode \
+ ? BASE_REGS : LO_REGS)
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS
+ must always return a strict subset of the input class. Just blindly
+ returning LO_REGS is safe only if the input class is a superset of LO_REGS,
+ but there is no check for this. Added another exception for NONARG_LO_REGS
+ because it is not a superset of LO_REGS. */
+/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the
+ comments about BASE_REGS are now obsolete. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment
+ above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode \
+ ? GEN_INT ((CUM).call_cookie) \
+ : (NAMED) \
+ ? ((CUM).nregs >= 16 ? 0 : gen_rtx (REG, MODE, (CUM).nregs / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM).nregs < 16 && (CUM).nregs + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : (HARD_REGNO_NREGS (0, (MODE)) \
+ * 4)) > 16) \
+ ? 4 - (CUM).nregs / 4 : 0)
+
+/* A C type for declaring a variable that is used as the first argument of
+ `FUNCTION_ARG' and other related values. For some target machines, the
+ type `int' suffices and can hold the number of bytes of argument so far.
+
+ On the ARM, this is the number of bytes of arguments scanned so far. */
+typedef struct
+{
+ /* This is the number of registers of arguments scanned so far. */
+ int nregs;
+ /* One of CALL_NORMAL, CALL_LONG or CALL_SHORT . */
+ int call_cookie;
+} CUMULATIVE_ARGS;
+
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+ On the ARM, the offset starts at 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM).nregs = (((FNTYPE) && aggregate_value_p (TREE_TYPE ((FNTYPE)))) \
+ ? 4 : 0), \
+ (CUM).call_cookie = \
+ (((FNTYPE) && lookup_attribute ("short_call", TYPE_ATTRIBUTES (FNTYPE))) \
+ ? CALL_SHORT \
+ : (((FNTYPE) && lookup_attribute ("long_call", \
+ TYPE_ATTRIBUTES (FNTYPE)))\
+ || TARGET_LONG_CALLS) \
+ ? CALL_LONG \
+ : CALL_NORMAL))
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM).nregs += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3) \
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM).nregs < 16) \
+ (PRETEND_SIZE) = 16 - (CUM).nregs; \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? REG+REG addresses have been completely disabled before \
+ reload completes, because we do not have enough available \
+ reload registers. We only have 3 guaranteed reload registers \
+ (NONARG_LO_REGS - the frame pointer), but we need at least 4 \
+ to support REG+REG addresses. We have left them enabled after \
+ reload completes, in the hope that reload_cse_regs and related \
+ routines will be able to create them after the fact. It is \
+ probably possible to support REG+REG addresses with additional \
+ reload work, but I do not not have enough time to attempt such \
+ a change at this time. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ /* ??? See comment above. */ \
+ && reload_completed \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && XEXP (X, 0) != frame_pointer_rtx \
+ && XEXP (X, 1) != frame_pointer_rtx \
+ && XEXP (X, 0) != virtual_stack_vars_rtx \
+ && XEXP (X, 1) != virtual_stack_vars_rtx \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+/* ??? If an HImode FP+large_offset address is converted to an HImode
+ SP+large_offset address, then reload won't know how to fix it. It sees
+ only that SP isn't valid for HImode, and so reloads the SP into an index
+ register, but the resulting address is still invalid because the offset
+ is too big. We fix it here instead by reloading the entire address. */
+/* We could probably achieve better results by defining PROMOTE_MODE to help
+ cope with the variances between the Thumb's signed and unsigned byte and
+ halfword load instructions. */
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+{ \
+ if (GET_CODE (X) == PLUS \
+ && GET_MODE_SIZE (MODE) < 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && XEXP (X, 0) == stack_pointer_rtx \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ { \
+ rtx orig_X = X; \
+ X = copy_rtx (X); \
+ push_reload (orig_X, NULL_RTX, &X, NULL_PTR, \
+ BASE_REG_CLASS, \
+ Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+/* Flags for the call/call_value rtl operations set up by function_arg. */
+#define CALL_NORMAL 0x00000000 /* No special processing. */
+#define CALL_LONG 0x00000001 /* Always call indirect. */
+#define CALL_SHORT 0x00000002 /* Never call indirect. */
+
+#define ENCODE_SECTION_INFO(decl) \
+{ \
+ ARM_ENCODE_CALL_TYPE (decl) \
+}
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+int arm_valid_machine_type_attribute (/* union tree_node *, union tree_node *,
+ union tree_node *,
+ union tree_node * */);
+#define VALID_MACHINE_TYPE_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+arm_valid_machine_type_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* If we are referencing a function that is weak then encode a long call
+ flag in the function name, otherwise if the function is static or
+ or known to be defined in this file then encode a short call flag.
+ This macro is used inside the ENCODE_SECTION macro. */
+#define ARM_ENCODE_CALL_TYPE(decl) \
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd') \
+ { \
+ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl)) \
+ arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR); \
+ else if (! TREE_PUBLIC (decl)) \
+ arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR); \
+ }
+
+/* Special characters prefixed to function names
+ in order to encode attribute like information.
+ Note, '@' and '*' have already been taken. */
+#define SHORT_CALL_FLAG_CHAR '^'
+#define LONG_CALL_FLAG_CHAR '#'
+
+#define ENCODED_SHORT_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == SHORT_CALL_FLAG_CHAR)
+
+#define ENCODED_LONG_CALL_ATTR_P(SYMBOL_NAME) \
+ (*(SYMBOL_NAME) == LONG_CALL_FLAG_CHAR)
+
+#ifndef SUBTARGET_NAME_ENCODING_LENGTHS
+#define SUBTARGET_NAME_ENCODING_LENGTHS
+#endif
+
+/* This is a C fragement for the inside of a switch statement.
+ Each case label should return the number of characters to
+ be stripped from the start of a function's name, if that
+ name starts with the indicated character. */
+#define ARM_NAME_ENCODING_LENGTHS \
+ case SHORT_CALL_FLAG_CHAR: return 1; \
+ case LONG_CALL_FLAG_CHAR: return 1; \
+ case '*': return 1; \
+ SUBTARGET_NAME_ENCODING_LENGTHS
+
+/* This has to be handled by a function because more than part of the
+ ARM backend uses function name prefixes to encode attributes. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+ (VAR) = arm_strip_name_encoding (SYMBOL_NAME)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+ asm_fprintf (FILE, "%U%s", arm_strip_name_encoding (NAME))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_'))
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (file, "\t.thumb_func\n") ; \
+ else \
+ fprintf (file, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"s_register_operand", {SUBREG, REG}}, \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char * thumb_unexpanded_epilogue ();
+extern char * output_move_mem_multiple ();
+extern char * thumb_load_double_from_address ();
+extern char * output_return ();
+extern int far_jump_used_p();
+extern int is_called_in_ARM_mode ();
+
+char *arm_strip_name_encoding (/* const char * */);
+int arm_is_longcall_p (/* rtx, int, int */);
+int s_register_operand (/* register rtx op, enum machine_mode mode */);
diff --git a/gcc_arm/config/arm/thumb_020428.md b/gcc_arm/config/arm/thumb_020428.md
new file mode 100755
index 0000000..dedf42e
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_020428.md
@@ -0,0 +1,1194 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "s_register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "s_register_operand" "0")
+ (match_operand:DI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "s_register_operand" "l")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0")
+ (match_operand:SI 2 "s_register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "")
+ (and:SI (match_operand:SI 1 "s_register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l"))
+ (match_operand:SI 2 "s_register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "s_register_operand" "%0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (not:SI (match_operand:SI 1 "s_register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "s_register_operand" "0")
+ (match_operand:SI 2 "s_register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l")
+ (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(parallel
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[0], 0)) != REG
+ && arm_is_longcall_p (operands[0], INTVAL (operands[2]), 0))
+ XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0));
+}")
+
+(define_insn "*call_indirect"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(parallel
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ ""
+ "
+{
+ if (GET_CODE (XEXP (operands[1], 0)) != REG
+ && arm_is_longcall_p (operands[1], INTVAL (operands[3]), 0))
+ XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0));
+}")
+
+(define_insn "*call_value_indirect"
+ [(parallel
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(parallel
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(parallel
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))
+ (use (match_operand 2 "" ""))])]
+ "GET_CODE (operands[0]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(parallel
+ [(set (match_operand 0 "s_register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))])]
+ "GET_CODE(operands[1]) == SYMBOL_REF
+ && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/thumb_981111.md b/gcc_arm/config/arm/thumb_981111.md
new file mode 100755
index 0000000..93d0c05
--- /dev/null
+++ b/gcc_arm/config/arm/thumb_981111.md
@@ -0,0 +1,1166 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+
+ if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG)
+ output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
+ else if (GET_CODE (ops[1]) == REG)
+ {
+ if (REGNO (ops[1]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ else
+ {
+ if (REGNO (ops[2]) == REGNO (operands[0]))
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ else
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ }
+ else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0)))
+ {
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ }
+ return \"\";
+}"
+[(set_attr "length" "2,6")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "register_operand" "l")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "register_operand" "%l,*h,0")
+ (match_operand:SI 2 "register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (and:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "register_operand" "l"))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (not:SI (match_operand:SI 1 "register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "l")
+ (neg:SI (match_operand:SI 1 "register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%0"
+[(set_attr "length" "4")])
+;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version
+;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set
+;; the bottom bit of lr so that a function return (using bx)
+;; would switch back into ARM mode...
+
+(define_insn "*call_indirect_interwork"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%0"
+[(set_attr "length" "4")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_CALLER_INTERWORKING"
+ "bl\\t%__call_via_%1"
+[(set_attr "length" "4")])
+;; See comment for call_indirect pattern
+
+(define_insn "*call_value_indirect_interwork"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_CALLER_INTERWORKING"
+ "bl\\t%__interwork_call_via_%1"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc_arm/config/arm/tpe.h b/gcc_arm/config/arm/tpe.h
new file mode 100755
index 0000000..8f4f35f
--- /dev/null
+++ b/gcc_arm/config/arm/tpe.h
@@ -0,0 +1,427 @@
+/* CYGNUS LOCAL (entire file) nickc/thumb-pe */
+/* Definitions of target machine for GNU compiler,
+ for Thumb with PE object format.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Derived from arm/coff.h and arm/pe.h originally by Doug Evans (evans@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/thumb.h"
+
+#define THUMB_PE 1
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/pe)", stderr)
+
+/* Support the __declspec keyword by turning them into attributes.
+ We currently only support: naked, dllimport, and dllexport.
+ Note that the current way we do this may result in a collision with
+ predefined attributes later on. This can be solved by using one attribute,
+ say __declspec__, and passing args to it. The problem with that approach
+ is that args are not accumulated: each new appearance would clobber any
+ existing args. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-Dthumb -D__thumb -D__pe__ -Acpu(arm) -Amachine(arm) \
+-D__declspec(x)=__attribute__((x)) \
+"
+
+/* Experimental addition for pr 7885.
+ Ignore dllimport for functions. */
+#define ARM_FLAG_NOP_FUN_IMPORT 0x20000
+#define TARGET_NOP_FUN_DLLIMPORT (target_flags & ARM_FLAG_NOP_FUN_IMPORT)
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+{ "nop-fun-dllimport", ARM_FLAG_NOP_FUN_IMPORT }, \
+{ "no-nop-fun-dllimport", -ARM_FLAG_NOP_FUN_IMPORT },
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT ARM_FLAG_NOP_FUN_IMPORT
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* This is COFF, but prefer stabs. */
+#define SDB_DEBUGGING_INFO
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n%s", LOCAL_LABEL_PREFIX, ASM_APP_OFF )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/coff\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rdata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+/* This is to better conform to the ARM PCS.
+ Richard Earnshaw hasn't put this into FSF sources yet so it's here. */
+#undef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) \
+ ((TYPE_MODE ((TYPE)) == BLKmode && ! TYPE_NO_FORCE_BLK (TYPE)) \
+ || (AGGREGATE_TYPE_P ((TYPE)) && arm_pe_return_in_memory ((TYPE))))
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+extern int arm_pe_valid_machine_decl_attribute ();
+extern int arm_valid_machine_decl_attribute ();
+#undef VALID_MACHINE_DECL_ATTRIBUTE
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+ arm_pe_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+extern union tree_node * arm_pe_merge_machine_decl_attributes ();
+#define MERGE_MACHINE_DECL_ATTRIBUTES(OLD, NEW) \
+ arm_pe_merge_machine_decl_attributes ((OLD), (NEW))
+
+/* In addition to the stuff done in arm.h, we must mark dll symbols specially.
+ Definitions of dllexport'd objects install some info in the .drectve
+ section. References to dllimport'd objects are fetched indirectly via
+ __imp_. If both are declared, dllexport overrides.
+ This is also needed to implement one-only vtables: they go into their own
+ section and we need to set DECL_SECTION_NAME so we do that here.
+ Note that we can be called twice on the same decl. */
+extern void arm_pe_encode_section_info ();
+#undef ENCODE_SECTION_INFO
+#define ENCODE_SECTION_INFO(DECL) \
+ arm_pe_encode_section_info (DECL)
+
+#define REDO_SECTION_INFO_P(DECL) 1
+
+ /* Utility used only in this file. */
+#define ARM_STRIP_NAME_ENCODING(SYM_NAME) \
+((SYM_NAME) + ((SYM_NAME)[0] == '@' ? 3 : 0))
+
+/* Strip any text from SYM_NAME added by ENCODE_SECTION_INFO and store
+ the result in VAR. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYM_NAME) \
+(VAR) = ARM_STRIP_NAME_ENCODING (SYM_NAME)
+
+/* Define this macro if in some cases global symbols from one translation
+ unit may not be bound to undefined symbols in another translation unit
+ without user intervention. For instance, under Microsoft Windows
+ symbols must be explicitly imported from shared libraries (DLLs). */
+#define MULTIPLE_SYMBOL_SPACES
+
+#define UNIQUE_SECTION_P(DECL) DECL_ONE_ONLY (DECL)
+extern void arm_pe_unique_section ();
+#define UNIQUE_SECTION(DECL,RELOC) arm_pe_unique_section (DECL, RELOC)
+
+#define SUPPORTS_ONE_ONLY 1
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#undef ASM_OUTPUT_SECTION_NAME
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+ /* Functions may have been compiled at various levels of \
+ optimization so we can't use `same_size' here. Instead, \
+ have the linker pick one. */ \
+ if ((DECL) && DECL_ONE_ONLY (DECL)) \
+ fprintf (STREAM, "\t.linkonce %s\n", \
+ TREE_CODE (DECL) == FUNCTION_DECL \
+ ? "discard" : "same_size"); \
+} while (0)
+
+/* This outputs a lot of .req's to define alias for various registers.
+ Let's try to avoid this. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for ARM/pe\n", \
+ ASM_COMMENT_START, version_string); \
+ output_file_directive ((STREAM), main_input_filename); \
+} while (0)
+
+/* Output a reference to a label. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
+fprintf (STREAM, "%s%s", USER_LABEL_PREFIX, ARM_STRIP_NAME_ENCODING (NAME))
+
+/* Output a function definition label. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ function_section (DECL); \
+ } \
+ if (! is_called_in_ARM_mode (decl)) \
+ fprintf (STREAM, "\t.thumb_func\n") ; \
+ else \
+ fprintf (STREAM, "\t.code\t32\n") ; \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+} while (0)
+
+/* Output a common block. */
+#undef ASM_OUTPUT_COMMON
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ drectve_section (); \
+ fprintf ((STREAM), "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ } \
+ if (! arm_dllimport_name_p (NAME)) \
+ { \
+ fprintf ((STREAM), "\t.comm\t"); \
+ assemble_name ((STREAM), (NAME)); \
+ fprintf ((STREAM), ", %d\t%s %d\n", \
+ (ROUNDED), ASM_COMMENT_START, (SIZE)); \
+ } \
+} while (0)
+
+/* Output the label for an initialized variable. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \
+do { \
+ if (arm_dllexport_name_p (NAME)) \
+ { \
+ enum in_section save_section = in_section; \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ ARM_STRIP_NAME_ENCODING (NAME)); \
+ switch_to_section (save_section, (DECL)); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#define DRECTVE_SECTION_ASM_OP "\t.section .drectve"
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef SUBTARGET_EXTRA_SECTIONS
+#define SUBTARGET_EXTRA_SECTIONS in_drectve,
+
+/* A list of extra section function definitions. */
+
+#undef SUBTARGET_EXTRA_SECTION_FUNCTIONS
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ DRECTVE_SECTION_FUNCTION \
+ SWITCH_TO_SECTION_FUNCTION
+
+#define DRECTVE_SECTION_FUNCTION \
+void \
+drectve_section () \
+{ \
+ if (in_section != in_drectve) \
+ { \
+ fprintf (asm_out_file, "%s\n", DRECTVE_SECTION_ASM_OP); \
+ in_section = in_drectve; \
+ } \
+}
+
+/* Switch to SECTION (an `enum in_section').
+
+ ??? This facility should be provided by GCC proper.
+ The problem is that we want to temporarily switch sections in
+ ASM_DECLARE_OBJECT_NAME and then switch back to the original section
+ afterwards. */
+#define SWITCH_TO_SECTION_FUNCTION \
+void \
+switch_to_section (section, decl) \
+ enum in_section section; \
+ tree decl; \
+{ \
+ switch (section) \
+ { \
+ case in_text: text_section (); break; \
+ case in_data: data_section (); break; \
+ case in_named: named_section (decl, NULL, 0); break; \
+ case in_rdata: rdata_section (); break; \
+ case in_ctors: ctors_section (); break; \
+ case in_dtors: dtors_section (); break; \
+ case in_drectve: drectve_section (); break; \
+ default: abort (); break; \
+ } \
+}
+
+
+
+extern int thumb_pe_valid_machine_decl_attribute ();
diff --git a/gcc_arm/config/arm/unknown-elf-oabi.h b/gcc_arm/config/arm/unknown-elf-oabi.h
new file mode 100755
index 0000000..22aacf6
--- /dev/null
+++ b/gcc_arm/config/arm/unknown-elf-oabi.h
@@ -0,0 +1,36 @@
+/* Definitions for non-Linux based ARM systems using ELF old abi
+ Copyright (C) 1998, 1999 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF non-Linux old abi)", stderr);
+#endif
+
+#define CPP_PREDEFINES "-Darm_oabi -Darm -Darm_elf -Acpu(arm) -Amachine(arm) -D__ELF__"
+
+#ifndef ASM_SPEC
+#define ASM_SPEC "-moabi %{mbig-endian:-EB} %{mcpu=*:-m%*} %{march=*:-m%*} \
+ %{mapcs-*:-mapcs-%*} %{mthumb-interwork:-mthumb-interwork}"
+#endif
+
+/* Now get the routine arm-elf definitions. */
+#include "arm/unknown-elf.h"
+#include "arm/elf.h"
diff --git a/gcc_arm/config/arm/unknown-elf.h b/gcc_arm/config/arm/unknown-elf.h
new file mode 100755
index 0000000..53f9522
--- /dev/null
+++ b/gcc_arm/config/arm/unknown-elf.h
@@ -0,0 +1,166 @@
+/* Definitions for non-Linux based ARM systems using ELF
+ Copyright (C) 1998 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF non-Linux)", stderr);
+#endif
+
+/* If you don't define HAVE_ATEXIT, and the object file format/OS/whatever
+ does not support constructors/destructors, then gcc implements destructors
+ by defining its own exit function, which calls the destructors. This gcc
+ exit function overrides the C library's exit function, and this can cause
+ all kinds of havoc if the C library has a non-trivial exit function. You
+ really don't want to use the exit function in libgcc2.c. */
+#define HAVE_ATEXIT
+
+/* Default to using APCS-32 and software floating point. */
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
+
+/* Now we define the strings used to build the spec file. */
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#define ENDFILE_SPEC "crtend%O%s"
+
+#define USER_LABEL_PREFIX ""
+#define LOCAL_LABEL_PREFIX "."
+
+#define TEXT_SECTION " .text"
+
+#define INVOKE__main
+
+/* Debugging */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Support for Constructors and Destrcutors . */
+#define READONLY_DATA_SECTION rdata_section
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#define SUBTARGET_EXTRA_SECTIONS in_rdata,
+
+/* A list of extra section function definitions. */
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS RDATA_SECTION_FUNCTION
+
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \
+ fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE,ADDR) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, ADDR)
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char * name, * string, * prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ { \
+ prefix = "."; \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".text."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".rodata."; \
+ else \
+ prefix = ".data."; \
+ } \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm7tdmi
+
+/* Now get the routine arm-elf definitions. */
+#include "arm/elf.h"
diff --git a/gcc_arm/config/arm/unknown-elf_020422.h b/gcc_arm/config/arm/unknown-elf_020422.h
new file mode 100755
index 0000000..3f6090c
--- /dev/null
+++ b/gcc_arm/config/arm/unknown-elf_020422.h
@@ -0,0 +1,163 @@
+/* Definitions for non-Linux based ARM systems using ELF
+ Copyright (C) 1998, 2001 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (ARM/ELF non-Linux)", stderr);
+#endif
+
+/* If you don't define HAVE_ATEXIT, and the object file format/OS/whatever
+ does not support constructors/destructors, then gcc implements destructors
+ by defining its own exit function, which calls the destructors. This gcc
+ exit function overrides the C library's exit function, and this can cause
+ all kinds of havoc if the C library has a non-trivial exit function. You
+ really don't want to use the exit function in libgcc2.c. */
+#define HAVE_ATEXIT
+
+/* Default to using APCS-32 and software floating point. */
+#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
+
+/* Now we define the strings used to build the spec file. */
+#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s"
+
+#define ENDFILE_SPEC "crtend%O%s"
+
+#define USER_LABEL_PREFIX ""
+#define LOCAL_LABEL_PREFIX "."
+
+#define TEXT_SECTION " .text"
+
+#define INVOKE__main
+
+/* Debugging */
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Support for Constructors and Destrcutors . */
+#define READONLY_DATA_SECTION rdata_section
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+#define SUBTARGET_EXTRA_SECTIONS in_rdata,
+
+/* A list of extra section function definitions. */
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS RDATA_SECTION_FUNCTION
+
+#define RDATA_SECTION_ASM_OP "\t.section .rodata"
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTOR_LIST_BEGIN \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define CTOR_LIST_END \
+asm (CTORS_SECTION_ASM_OP); \
+func_ptr __CTOR_END__[1] = { (func_ptr) 0 };
+
+#define DTOR_LIST_BEGIN \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_LIST__[1] = { (func_ptr) (-1) }
+
+#define DTOR_LIST_END \
+asm (DTORS_SECTION_ASM_OP); \
+func_ptr __DTOR_END__[1] = { (func_ptr) 0 };
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \
+ else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \
+ fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \
+} while (0)
+
+/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in
+ dwarf2.out. */
+#define UNALIGNED_WORD_ASM_OP ".4byte"
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
+
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char * name, * string, * prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ { \
+ prefix = "."; \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".text."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".rodata."; \
+ else \
+ prefix = ".data."; \
+ } \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
+
+#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm7tdmi
+
+/* Now get the routine arm-elf definitions. */
+#include "arm/elf.h"
diff --git a/gcc_arm/config/arm/x-riscix b/gcc_arm/config/arm/x-riscix
new file mode 100755
index 0000000..4584f95
--- /dev/null
+++ b/gcc_arm/config/arm/x-riscix
@@ -0,0 +1,8 @@
+# Define new names for the getopt library, so that we don't have to statically
+# link [un]protoize. We have dirent.h not sys/dir.h, so define POSIX.
+X_CFLAGS= -DPOSIX -Dopterr=gcc_opterr -Doptind=gcc_optind \
+ -Dgetopt=gcc_getopt -Doptarg=gcc_optarg
+# Compile in BSD mode.
+OLDCC=/usr/ucb/cc
+CC=$(OLDCC)
+FIXPROTO_DEFINES= -D_POSIX_SOURCE -D_XOPEN_C -D_BSD_C -D_XOPEN_SOURCE
diff --git a/gcc_arm/config/arm/xm-arm.h b/gcc_arm/config/arm/xm-arm.h
new file mode 100755
index 0000000..a6143fa
--- /dev/null
+++ b/gcc_arm/config/arm/xm-arm.h
@@ -0,0 +1,68 @@
+/* Configuration for GNU C-compiler for Acorn RISC Machine.
+ Copyright (C) 1991, 1993 Free Software Foundation, Inc.
+ Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
+ and Martin Simmons (@harleqn.co.uk).
+ More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+
+/* A code distinguishing the floating point format of the host
+ machine. There are three defined values: IEEE_FLOAT_FORMAT,
+ VAX_FLOAT_FORMAT, and UNKNOWN_FLOAT_FORMAT. */
+
+#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+#define HOST_FLOAT_WORDS_BIG_ENDIAN 1
+
+/* If not compiled with GNU C, use C alloca. */
+#ifndef __GNUC__
+#define USE_C_ALLOCA
+#endif
+
+/* Define this to be 1 if you know the host compiler supports prototypes, even
+ if it doesn't define __STDC__, or define it to be 0 if you do not want any
+ prototypes when compiling GNU CC. */
+#define USE_PROTOTYPES 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* If we have defined POSIX, but are compiling in the BSD environment, then
+ we need to define getcwd in terms of getwd. */
+#if defined (POSIX) && defined (_BSD_C)
+#define HAVE_GETWD 1
+#endif
+
+/* EOF xm-arm.h */
+
+
diff --git a/gcc_arm/config/arm/xm-linux.h b/gcc_arm/config/arm/xm-linux.h
new file mode 100755
index 0000000..ca120a9
--- /dev/null
+++ b/gcc_arm/config/arm/xm-linux.h
@@ -0,0 +1,24 @@
+/* Configuration for GCC for Intel i386 running Linux-based GNU systems./
+ Copyright (C) 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
+ Contributed by H.J. Lu (hjl@nynexst.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <arm/xm-arm.h>
+#include <xm-linux.h>
+
diff --git a/gcc_arm/config/arm/xm-netbsd.h b/gcc_arm/config/arm/xm-netbsd.h
new file mode 100755
index 0000000..ea9a64e
--- /dev/null
+++ b/gcc_arm/config/arm/xm-netbsd.h
@@ -0,0 +1,7 @@
+/* Configuration for GCC for ARM running NetBSD as host. */
+
+#include <arm/xm-arm.h>
+
+#ifndef SYS_SIGLIST_DECLARED
+#define SYS_SIGLIST_DECLARED
+#endif
diff --git a/gcc_arm/config/arm/xm-thumb.h b/gcc_arm/config/arm/xm-thumb.h
new file mode 100755
index 0000000..3356ae2
--- /dev/null
+++ b/gcc_arm/config/arm/xm-thumb.h
@@ -0,0 +1 @@
+#include <tm.h>