From 9dc75fe3b4be91d6066c8e870eacec954117cc08 Mon Sep 17 00:00:00 2001 From: YamaArashi Date: Wed, 27 Apr 2016 00:00:40 -0700 Subject: reorganize files --- gcc/Makefile | 15 +- gcc/config.h | 2 +- gcc/config/arm/README-interworking | 742 ------------------ gcc/config/arm/lib1thumb.asm | 736 ----------------- gcc/config/arm/t-thumb-elf | 32 - gcc/config/arm/telf.h | 368 --------- gcc/config/arm/thumb.c | 1527 ------------------------------------ gcc/config/arm/thumb.h | 1137 --------------------------- gcc/config/arm/thumb.md | 1157 --------------------------- gcc/config/fp-bit.c | 1507 ----------------------------------- gcc/fp-test.c | 231 ------ gcc/libgcc1-test.c | 117 --- gcc/libgcc1.c | 596 -------------- gcc/libgcc2.c | 946 ---------------------- gcc/telf.h | 368 +++++++++ gcc/testsuite/fp-test.c | 231 ++++++ gcc/thumb.c | 1527 ++++++++++++++++++++++++++++++++++++ gcc/thumb.h | 1137 +++++++++++++++++++++++++++ gcc/thumb.md | 1157 +++++++++++++++++++++++++++ libgcc/fp-bit.c | 1507 +++++++++++++++++++++++++++++++++++ libgcc/lib1thumb.asm | 736 +++++++++++++++++ libgcc/libgcc1-test.c | 117 +++ libgcc/libgcc1.c | 596 ++++++++++++++ libgcc/libgcc2.c | 946 ++++++++++++++++++++++ 24 files changed, 8327 insertions(+), 9108 deletions(-) delete mode 100755 gcc/config/arm/README-interworking delete mode 100755 gcc/config/arm/lib1thumb.asm delete mode 100755 gcc/config/arm/t-thumb-elf delete mode 100755 gcc/config/arm/telf.h delete mode 100755 gcc/config/arm/thumb.c delete mode 100755 gcc/config/arm/thumb.h delete mode 100755 gcc/config/arm/thumb.md delete mode 100755 gcc/config/fp-bit.c delete mode 100755 gcc/fp-test.c delete mode 100755 gcc/libgcc1-test.c delete mode 100755 gcc/libgcc1.c delete mode 100755 gcc/libgcc2.c create mode 100755 gcc/telf.h create mode 100755 gcc/testsuite/fp-test.c create mode 100755 gcc/thumb.c create mode 100755 gcc/thumb.h create mode 100755 gcc/thumb.md create mode 100755 libgcc/fp-bit.c create mode 100755 libgcc/lib1thumb.asm create mode 100755 libgcc/libgcc1-test.c create mode 100755 libgcc/libgcc1.c create mode 100755 libgcc/libgcc2.c diff --git a/gcc/Makefile b/gcc/Makefile index d03008f..e677c70 100644 --- a/gcc/Makefile +++ b/gcc/Makefile @@ -26,9 +26,7 @@ CFLAGS = -g -std=gnu11 CC = gcc -out_file=$(srcdir)/config/arm/thumb.c -out_object_file=thumb.o -md_file=$(srcdir)/config/arm/thumb.md +md_file=$(srcdir)/thumb.md # End of variables for you to override. @@ -40,12 +38,8 @@ HOST_RTLANAL = rtlanal.o HOST_PRINT = print-rtl.o # Specify the directories to be searched for header files. -# Both . and srcdir are used, in that order, -# so that tm.h and config.h will be found in the compilation -# subdirectory rather than in the source directory. -INCLUDES = -I. -I$(srcdir) -I$(srcdir)/config -I$(srcdir)/../include +INCLUDES = -I. -I$(srcdir) -# Always use -I$(srcdir)/config when compiling. .c.o: $(CC) -c $(CFLAGS) $(INCLUDES) $< @@ -64,7 +58,7 @@ OBJS = toplev.o version.o tree.o print-tree.o stor-layout.o fold-const.o \ insn-peep.o final.o recog.o \ insn-opinit.o insn-recog.o insn-extract.o insn-output.o insn-emit.o \ lcm.o \ - insn-attrtab.o $(out_object_file) getpwd.o convert.o \ + insn-attrtab.o thumb.o getpwd.o convert.o \ dyn-string.o splay-tree.o graph.o sbitmap.o resource.o \ c-parse.o c-lex.o c-decl.o c-typeck.o c-convert.o c-aux-info.o c-common.o \ c-iterate.o obstack.o @@ -274,10 +268,9 @@ recog.o : recog.c $(CONFIG_H) system.h $(RTL_H) \ insn-flags.h insn-codes.h real.h toplev.h dyn-string.o: dyn-string.c dyn-string.h $(CONFIG_H) system.h -$(out_object_file): $(out_file) $(CONFIG_H) $(TREE_H) \ +thumb.o: thumb.c $(CONFIG_H) $(TREE_H) \ $(RTL_H) $(REGS_H) hard-reg-set.h real.h insn-config.h conditions.h \ insn-flags.h output.h insn-attr.h insn-codes.h system.h toplev.h - $(CC) -c $(CFLAGS) $(INCLUDES) $(out_file) # Generate header and source files from the machine description, # and compile them. diff --git a/gcc/config.h b/gcc/config.h index f9f6819..0232972 100644 --- a/gcc/config.h +++ b/gcc/config.h @@ -9,4 +9,4 @@ #define HOST_BITS_PER_SHORT 16 #define HOST_BITS_PER_INT 32 -#include "arm/telf.h" +#include "telf.h" diff --git a/gcc/config/arm/README-interworking b/gcc/config/arm/README-interworking deleted file mode 100755 index 46b76c9..0000000 --- a/gcc/config/arm/README-interworking +++ /dev/null @@ -1,742 +0,0 @@ - Arm / Thumb Interworking - ======================== - -The Cygnus GNU Pro Toolkit for the ARM7T processor supports function -calls between code compiled for the ARM instruction set and code -compiled for the Thumb instruction set and vice versa. This document -describes how that interworking support operates and explains the -command line switches that should be used in order to produce working -programs. - -Note: The Cygnus GNU Pro Toolkit does not support switching between -compiling for the ARM instruction set and the Thumb instruction set -on anything other than a per file basis. There are in fact two -completely separate compilers, one that produces ARM assembler -instructions and one that produces Thumb assembler instructions. The -two compilers share the same assembler, linker and so on. - - -1. Explicit interworking support for C and C++ files -==================================================== - -By default if a file is compiled without any special command line -switches then the code produced will not support interworking. -Provided that a program is made up entirely from object files and -libraries produced in this way and which contain either exclusively -ARM instructions or exclusively Thumb instructions then this will not -matter and a working executable will be created. If an attempt is -made to link together mixed ARM and Thumb object files and libraries, -then warning messages will be produced by the linker and a non-working -executable will be created. - -In order to produce code which does support interworking it should be -compiled with the - - -mthumb-interwork - -command line option. Provided that a program is made up entirely from -object files and libraries built with this command line switch a -working executable will be produced, even if both ARM and Thumb -instructions are used by the various components of the program. (No -warning messages will be produced by the linker either). - -Note that specifying -mthumb-interwork does result in slightly larger, -slower code being produced. This is why interworking support must be -specifically enabled by a switch. - - -2. Explicit interworking support for assembler files -==================================================== - -If assembler files are to be included into an interworking program -then the following rules must be obeyed: - - * Any externally visible functions must return by using the BX - instruction. - - * Normal function calls can just use the BL instruction. The - linker will automatically insert code to switch between ARM - and Thumb modes as necessary. - - * Calls via function pointers should use the BX instruction if - the call is made in ARM mode: - - .code 32 - mov lr, pc - bx rX - - This code sequence will not work in Thumb mode however, since - the mov instruction will not set the bottom bit of the lr - register. Instead a branch-and-link to the _call_via_rX - functions should be used instead: - - .code 16 - bl _call_via_rX - - where rX is replaced by the name of the register containing - the function address. - - * All externally visible functions which should be entered in - Thumb mode must have the .thumb_func pseudo op specified just - before their entry point. eg: - - .code 16 - .global function - .thumb_func - function: - ...start of function.... - - * All assembler files must be assembled with the switch - -mthumb-interwork specified on the command line. (If the file - is assembled by calling gcc it will automatically pass on the - -mthumb-interwork switch to the assembler, provided that it - was specified on the gcc command line in the first place.) - - -3. Support for old, non-interworking aware code. -================================================ - -If it is necessary to link together code produced by an older, -non-interworking aware compiler, or code produced by the new compiler -but without the -mthumb-interwork command line switch specified, then -there are two command line switches that can be used to support this. - -The switch - - -mcaller-super-interworking - -will allow calls via function pointers in Thumb mode to work, -regardless of whether the function pointer points to old, -non-interworking aware code or not. Specifying this switch does -produce slightly slower code however. - -Note: There is no switch to allow calls via function pointers in ARM -mode to be handled specially. Calls via function pointers from -interworking aware ARM code to non-interworking aware ARM code work -without any special considerations by the compiler. Calls via -function pointers from interworking aware ARM code to non-interworking -aware Thumb code however will not work. (Actually under some -circumstances they may work, but there are no guarantees). This is -because only the new compiler is able to produce Thumb code, and this -compiler already has a command line switch to produce interworking -aware code. - - -The switch - - -mcallee-super-interworking - -will allow non-interworking aware ARM or Thumb code to call Thumb -functions, either directly or via function pointers. Specifying this -switch does produce slightly larger, slower code however. - -Note: There is no switch to allow non-interworking aware ARM or Thumb -code to call ARM functions. There is no need for any special handling -of calls from non-interworking aware ARM code to interworking aware -ARM functions, they just work normally. Calls from non-interworking -aware Thumb functions to ARM code however, will not work. There is no -option to support this, since it is always possible to recompile the -Thumb code to be interworking aware. - -As an alternative to the command line switch --mcallee-super-interworking, which affects all externally visible -functions in a file, it is possible to specify an attribute or -declspec for individual functions, indicating that that particular -function should support being called by non-interworking aware code. -The function should be defined like this: - - int __attribute__((interfacearm)) function - { - ... body of function ... - } - -or - - int __declspec(interfacearm) function - { - ... body of function ... - } - - - -4. Interworking support in dlltool -================================== - -It is possible to create DLLs containing mixed ARM and Thumb code. It -is also possible to call Thumb code in a DLL from an ARM program and -vice versa. It is even possible to call ARM DLLs that have been compiled -without interworking support (say by an older version of the compiler), -from Thumb programs and still have things work properly. - - A version of the `dlltool' program which supports the `--interwork' -command line switch is needed, as well as the following special -considerations when building programs and DLLs: - -*Use `-mthumb-interwork'* - When compiling files for a DLL or a program the `-mthumb-interwork' - command line switch should be specified if calling between ARM and - Thumb code can happen. If a program is being compiled and the - mode of the DLLs that it uses is not known, then it should be - assumed that interworking might occur and the switch used. - -*Use `-m thumb'* - If the exported functions from a DLL are all Thumb encoded then the - `-m thumb' command line switch should be given to dlltool when - building the stubs. This will make dlltool create Thumb encoded - stubs, rather than its default of ARM encoded stubs. - - If the DLL consists of both exported Thumb functions and exported - ARM functions then the `-m thumb' switch should not be used. - Instead the Thumb functions in the DLL should be compiled with the - `-mcallee-super-interworking' switch, or with the `interfacearm' - attribute specified on their prototypes. In this way they will be - given ARM encoded prologues, which will work with the ARM encoded - stubs produced by dlltool. - -*Use `-mcaller-super-interworking'* - If it is possible for Thumb functions in a DLL to call - non-interworking aware code via a function pointer, then the Thumb - code must be compiled with the `-mcaller-super-interworking' - command line switch. This will force the function pointer calls - to use the _interwork_call_via_rX stub functions which will - correctly restore Thumb mode upon return from the called function. - -*Link with `libgcc.a'* - When the dll is built it may have to be linked with the GCC - library (`libgcc.a') in order to extract the _call_via_rX functions - or the _interwork_call_via_rX functions. This represents a partial - redundancy since the same functions *may* be present in the - application itself, but since they only take up 372 bytes this - should not be too much of a consideration. - -*Use `--support-old-code'* - When linking a program with an old DLL which does not support - interworking, the `--support-old-code' command line switch to the - linker should be used. This causes the linker to generate special - interworking stubs which can cope with old, non-interworking aware - ARM code, at the cost of generating bulkier code. The linker will - still generate a warning message along the lines of: - "Warning: input file XXX does not support interworking, whereas YYY does." - but this can now be ignored because the --support-old-code switch - has been used. - - - -5. How interworking support works -================================= - -Switching between the ARM and Thumb instruction sets is accomplished -via the BX instruction which takes as an argument a register name. -Control is transfered to the address held in this register (with the -bottom bit masked out), and if the bottom bit is set, then Thumb -instruction processing is enabled, otherwise ARM instruction -processing is enabled. - -When the -mthumb-interwork command line switch is specified, gcc -arranges for all functions to return to their caller by using the BX -instruction. Thus provided that the return address has the bottom bit -correctly initialised to indicate the instruction set of the caller, -correct operation will ensue. - -When a function is called explicitly (rather than via a function -pointer), the compiler generates a BL instruction to do this. The -Thumb version of the BL instruction has the special property of -setting the bottom bit of the LR register after it has stored the -return address into it, so that a future BX instruction will correctly -return the instruction after the BL instruction, in Thumb mode. - -The BL instruction does not change modes itself however, so if an ARM -function is calling a Thumb function, or vice versa, it is necessary -to generate some extra instructions to handle this. This is done in -the linker when it is storing the address of the referenced function -into the BL instruction. If the BL instruction is an ARM style BL -instruction, but the referenced function is a Thumb function, then the -linker automatically generates a calling stub that converts from ARM -mode to Thumb mode, puts the address of this stub into the BL -instruction, and puts the address of the referenced function into the -stub. Similarly if the BL instruction is a Thumb BL instruction, and -the referenced function is an ARM function, the linker generates a -stub which converts from Thumb to ARM mode, puts the address of this -stub into the BL instruction, and the address of the referenced -function into the stub. - -This is why it is necessary to mark Thumb functions with the -.thumb_func pseudo op when creating assembler files. This pseudo op -allows the assembler to distinguish between ARM functions and Thumb -functions. (The Thumb version of GCC automatically generates these -pseudo ops for any Thumb functions that it generates). - -Calls via function pointers work differently. Whenever the address of -a function is taken, the linker examines the type of the function -being referenced. If the function is a Thumb function, then it sets -the bottom bit of the address. Technically this makes the address -incorrect, since it is now one byte into the start of the function, -but this is never a problem because: - - a. with interworking enabled all calls via function pointer - are done using the BX instruction and this ignores the - bottom bit when computing where to go to. - - b. the linker will always set the bottom bit when the address - of the function is taken, so it is never possible to take - the address of the function in two different places and - then compare them and find that they are not equal. - -As already mentioned any call via a function pointer will use the BX -instruction (provided that interworking is enabled). The only problem -with this is computing the return address for the return from the -called function. For ARM code this can easily be done by the code -sequence: - - mov lr, pc - bx rX - -(where rX is the name of the register containing the function -pointer). This code does not work for the Thumb instruction set, -since the MOV instruction will not set the bottom bit of the LR -register, so that when the called function returns, it will return in -ARM mode not Thumb mode. Instead the compiler generates this -sequence: - - bl _call_via_rX - -(again where rX is the name if the register containing the function -pointer). The special call_via_rX functions look like this: - - .thumb_func -_call_via_r0: - bx r0 - nop - -The BL instruction ensures that the correct return address is stored -in the LR register and then the BX instruction jumps to the address -stored in the function pointer, switch modes if necessary. - - -6. How caller-super-interworking support works -============================================== - -When the -mcaller-super-interworking command line switch is specified -it changes the code produced by the Thumb compiler so that all calls -via function pointers (including virtual function calls) now go via a -different stub function. The code to call via a function pointer now -looks like this: - - bl _interwork_call_via_r0 - -Note: The compiler does not insist that r0 be used to hold the -function address. Any register will do, and there are a suite of stub -functions, one for each possible register. The stub functions look -like this: - - .code 16 - .thumb_func -_interwork_call_via_r0 - bx pc - nop - - .code 32 - tst r0, #1 - stmeqdb r13!, {lr} - adreq lr, _arm_return - bx r0 - -The stub first switches to ARM mode, since it is a lot easier to -perform the necessary operations using ARM instructions. It then -tests the bottom bit of the register containing the address of the -function to be called. If this bottom bit is set then the function -being called uses Thumb instructions and the BX instruction to come -will switch back into Thumb mode before calling this function. (Note -that it does not matter how this called function chooses to return to -its caller, since the both the caller and callee are Thumb functions, -and mode switching is necessary). If the function being called is an -ARM mode function however, the stub pushes the return address (with -its bottom bit set) onto the stack, replaces the return address with -the address of the a piece of code called '_arm_return' and then -performs a BX instruction to call the function. - -The '_arm_return' code looks like this: - - .code 32 -_arm_return: - ldmia r13!, {r12} - bx r12 - .code 16 - - -It simply retrieves the return address from the stack, and then -performs a BX operation to return to the caller and switch back into -Thumb mode. - - -7. How callee-super-interworking support works -============================================== - -When -mcallee-super-interworking is specified on the command line the -Thumb compiler behaves as if every externally visible function that it -compiles has had the (interfacearm) attribute specified for it. What -this attribute does is to put a special, ARM mode header onto the -function which forces a switch into Thumb mode: - - without __attribute__((interfacearm)): - - .code 16 - .thumb_func - function: - ... start of function ... - - with __attribute__((interfacearm)): - - .code 32 - function: - orr r12, pc, #1 - bx r12 - - .code 16 - .thumb_func - .real_start_of_function: - - ... start of function ... - -Note that since the function now expects to be entered in ARM mode, it -no longer has the .thumb_func pseudo op specified for its name. -Instead the pseudo op is attached to a new label .real_start_of_ -(where is the name of the function) which indicates the start -of the Thumb code. This does have the interesting side effect in that -if this function is now called from a Thumb mode piece of code -outsside of the current file, the linker will generate a calling stub -to switch from Thumb mode into ARM mode, and then this is immediately -overridden by the function's header which switches back into Thumb -mode. - -In addition the (interfacearm) attribute also forces the function to -return by using the BX instruction, even if has not been compiled with -the -mthumb-interwork command line flag, so that the correct mode will -be restored upon exit from the function. - - -8. Some examples -================ - - Given these two test files: - - int arm (void) { return 1 + thumb (); } - - int thumb (void) { return 2 + arm (); } - - The following pieces of assembler are produced by the ARM and Thumb -version of GCC depending upon the command line options used: - - `-O2': - .code 32 .code 16 - .global _arm .global _thumb - .thumb_func - _arm: _thumb: - mov ip, sp - stmfd sp!, {fp, ip, lr, pc} push {lr} - sub fp, ip, #4 - bl _thumb bl _arm - add r0, r0, #1 add r0, r0, #2 - ldmea fp, {fp, sp, pc} pop {pc} - - Note how the functions return without using the BX instruction. If -these files were assembled and linked together they would fail to work -because they do not change mode when returning to their caller. - - `-O2 -mthumb-interwork': - - .code 32 .code 16 - .global _arm .global _thumb - .thumb_func - _arm: _thumb: - mov ip, sp - stmfd sp!, {fp, ip, lr, pc} push {lr} - sub fp, ip, #4 - bl _thumb bl _arm - add r0, r0, #1 add r0, r0, #2 - ldmea fp, {fp, sp, lr} pop {r1} - bx lr bx r1 - - Now the functions use BX to return their caller. They have grown by -4 and 2 bytes respectively, but they can now successfully be linked -together and be expect to work. The linker will replace the -destinations of the two BL instructions with the addresses of calling -stubs which convert to the correct mode before jumping to the called -function. - - `-O2 -mcallee-super-interworking': - - .code 32 .code 32 - .global _arm .global _thumb - _arm: _thumb: - orr r12, pc, #1 - bx r12 - mov ip, sp .code 16 - stmfd sp!, {fp, ip, lr, pc} push {lr} - sub fp, ip, #4 - bl _thumb bl _arm - add r0, r0, #1 add r0, r0, #2 - ldmea fp, {fp, sp, lr} pop {r1} - bx lr bx r1 - - The thumb function now has an ARM encoded prologue, and it no longer -has the `.thumb-func' pseudo op attached to it. The linker will not -generate a calling stub for the call from arm() to thumb(), but it will -still have to generate a stub for the call from thumb() to arm(). Also -note how specifying `--mcallee-super-interworking' automatically -implies `-mthumb-interworking'. - - -9. Some Function Pointer Examples -================================= - - Given this test file: - - int func (void) { return 1; } - - int call (int (* ptr)(void)) { return ptr (); } - - The following varying pieces of assembler are produced by the Thumb -version of GCC depending upon the command line options used: - - `-O2': - .code 16 - .globl _func - .thumb_func - _func: - mov r0, #1 - bx lr - - .globl _call - .thumb_func - _call: - push {lr} - bl __call_via_r0 - pop {pc} - - Note how the two functions have different exit sequences. In -particular call() uses pop {pc} to return, which would not work if the -caller was in ARM mode. func() however, uses the BX instruction, even -though `-mthumb-interwork' has not been specified, as this is the most -efficient way to exit a function when the return address is held in the -link register. - - `-O2 -mthumb-interwork': - - .code 16 - .globl _func - .thumb_func - _func: - mov r0, #1 - bx lr - - .globl _call - .thumb_func - _call: - push {lr} - bl __call_via_r0 - pop {r1} - bx r1 - - This time both functions return by using the BX instruction. This -means that call() is now two bytes longer and several cycles slower -than the previous version. - - `-O2 -mcaller-super-interworking': - .code 16 - .globl _func - .thumb_func - _func: - mov r0, #1 - bx lr - - .globl _call - .thumb_func - _call: - push {lr} - bl __interwork_call_via_r0 - pop {pc} - - Very similar to the first (non-interworking) version, except that a -different stub is used to call via the function pointer. This new stub -will work even if the called function is not interworking aware, and -tries to return to call() in ARM mode. Note that the assembly code for -call() is still not interworking aware itself, and so should not be -called from ARM code. - - `-O2 -mcallee-super-interworking': - - .code 32 - .globl _func - _func: - orr r12, pc, #1 - bx r12 - - .code 16 - .globl .real_start_of_func - .thumb_func - .real_start_of_func: - mov r0, #1 - bx lr - - .code 32 - .globl _call - _call: - orr r12, pc, #1 - bx r12 - - .code 16 - .globl .real_start_of_call - .thumb_func - .real_start_of_call: - push {lr} - bl __call_via_r0 - pop {r1} - bx r1 - - Now both functions have an ARM coded prologue, and both functions -return by using the BX instruction. These functions are interworking -aware therefore and can safely be called from ARM code. The code for -the call() function is now 10 bytes longer than the original, non -interworking aware version, an increase of over 200%. - - If a prototype for call() is added to the source code, and this -prototype includes the `interfacearm' attribute: - - int __attribute__((interfacearm)) call (int (* ptr)(void)); - - then this code is produced (with only -O2 specified on the command -line): - - .code 16 - .globl _func - .thumb_func - _func: - mov r0, #1 - bx lr - - .globl _call - .code 32 - _call: - orr r12, pc, #1 - bx r12 - - .code 16 - .globl .real_start_of_call - .thumb_func - .real_start_of_call: - push {lr} - bl __call_via_r0 - pop {r1} - bx r1 - - So now both call() and func() can be safely called via -non-interworking aware ARM code. If, when such a file is assembled, -the assembler detects the fact that call() is being called by another -function in the same file, it will automatically adjust the target of -the BL instruction to point to .real_start_of_call. In this way there -is no need for the linker to generate a Thumb-to-ARM calling stub so -that call can be entered in ARM mode. - - -10. How to use dlltool to build ARM/Thumb DLLs -============================================== - Given a program (`prog.c') like this: - - extern int func_in_dll (void); - - int main (void) { return func_in_dll(); } - - And a DLL source file (`dll.c') like this: - - int func_in_dll (void) { return 1; } - - Here is how to build the DLL and the program for a purely ARM based -environment: - -*Step One - Build a `.def' file describing the DLL: - - ; example.def - ; This file describes the contents of the DLL - LIBRARY example - HEAPSIZE 0x40000, 0x2000 - EXPORTS - func_in_dll 1 - -*Step Two - Compile the DLL source code: - - arm-pe-gcc -O2 -c dll.c - -*Step Three - Use `dlltool' to create an exports file and a library file: - - dlltool --def example.def --output-exp example.o --output-lib example.a - -*Step Four - Link together the complete DLL: - - arm-pe-ld dll.o example.o -o example.dll - -*Step Five - Compile the program's source code: - - arm-pe-gcc -O2 -c prog.c - -*Step Six - Link together the program and the DLL's library file: - - arm-pe-gcc prog.o example.a -o prog - - If instead this was a Thumb DLL being called from an ARM program, the -steps would look like this. (To save space only those steps that are -different from the previous version are shown): - -*Step Two - Compile the DLL source code (using the Thumb compiler): - - thumb-pe-gcc -O2 -c dll.c -mthumb-interwork - -*Step Three - Build the exports and library files (and support interworking): - - dlltool -d example.def -z example.o -l example.a --interwork -m thumb - -*Step Five - Compile the program's source code (and support interworking): - - arm-pe-gcc -O2 -c prog.c -mthumb-interwork - - If instead, the DLL was an old, ARM DLL which does not support -interworking, and which cannot be rebuilt, then these steps would be -used. - -*Step One - Skip. If you do not have access to the sources of a DLL, there is - no point in building a `.def' file for it. - -*Step Two - Skip. With no DLL sources there is nothing to compile. - -*Step Three - Skip. Without a `.def' file you cannot use dlltool to build an - exports file or a library file. - -*Step Four - Skip. Without a set of DLL object files you cannot build the DLL. - Besides it has already been built for you by somebody else. - -*Step Five - Compile the program's source code, this is the same as before: - - arm-pe-gcc -O2 -c prog.c - -*Step Six - Link together the program and the DLL's library file, passing the - `--support-old-code' option to the linker: - - arm-pe-gcc prog.o example.a -Wl,--support-old-code -o prog - - Ignore the warning message about the input file not supporting - interworking as the --support-old-code switch has taken care if this. diff --git a/gcc/config/arm/lib1thumb.asm b/gcc/config/arm/lib1thumb.asm deleted file mode 100755 index e0ff746..0000000 --- a/gcc/config/arm/lib1thumb.asm +++ /dev/null @@ -1,736 +0,0 @@ -@ libgcc1 routines for ARM cpu. -@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk) - -/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc. - -This file is free software; you can redistribute it and/or modify it -under the terms of the GNU General Public License as published by the -Free Software Foundation; either version 2, or (at your option) any -later version. - -In addition to the permissions in the GNU General Public License, the -Free Software Foundation gives you unlimited permission to link the -compiled version of this file with other programs, and to distribute -those programs without any restriction coming from the use of this -file. (The General Public License restrictions do apply in other -respects; for example, they cover modification of the file, and -distribution when not linked into another program.) - -This file is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -/* As a special exception, if you link this library with other files, - some of which are compiled with GCC, to produce an executable, - this library does not by itself cause the resulting executable - to be covered by the GNU General Public License. - This exception does not however invalidate any other reasons why - the executable file might be covered by the GNU General Public License. */ - - .code 16 - -#ifdef __elf__ -#define __PLT__ (PLT) -#define TYPE(x) .type SYM(x),function -#define SIZE(x) .size SYM(x), . - SYM(x) -#else -#define __PLT__ -#define TYPE(x) -#define SIZE(x) -#endif - -#define RET mov pc, lr - -#define SYM(x) x - -work .req r4 @ XXXX is this safe ? - -#ifdef L_udivsi3 - -dividend .req r0 -divisor .req r1 -result .req r2 -curbit .req r3 -ip .req r12 -sp .req r13 -lr .req r14 -pc .req r15 - - .text - .globl SYM (__udivsi3) - TYPE (__udivsi3) - .align 0 - .thumb_func -SYM (__udivsi3): - cmp divisor, #0 - beq Ldiv0 - mov curbit, #1 - mov result, #0 - - push { work } - cmp dividend, divisor - bcc Lgot_result - - @ Load the constant 0x10000000 into our work register - mov work, #1 - lsl work, #28 -Loop1: - @ Unless the divisor is very big, shift it up in multiples of - @ four bits, since this is the amount of unwinding in the main - @ division loop. Continue shifting until the divisor is - @ larger than the dividend. - cmp divisor, work - bcs Lbignum - cmp divisor, dividend - bcs Lbignum - lsl divisor, #4 - lsl curbit, #4 - b Loop1 - -Lbignum: - @ Set work to 0x80000000 - lsl work, #3 -Loop2: - @ For very big divisors, we must shift it a bit at a time, or - @ we will be in danger of overflowing. - cmp divisor, work - bcs Loop3 - cmp divisor, dividend - bcs Loop3 - lsl divisor, #1 - lsl curbit, #1 - b Loop2 - -Loop3: - @ Test for possible subtractions, and note which bits - @ are done in the result. On the final pass, this may subtract - @ too much from the dividend, but the result will be ok, since the - @ "bit" will have been shifted out at the bottom. - cmp dividend, divisor - bcc Over1 - sub dividend, dividend, divisor - orr result, result, curbit -Over1: - lsr work, divisor, #1 - cmp dividend, work - bcc Over2 - sub dividend, dividend, work - lsr work, curbit, #1 - orr result, work -Over2: - lsr work, divisor, #2 - cmp dividend, work - bcc Over3 - sub dividend, dividend, work - lsr work, curbit, #2 - orr result, work -Over3: - lsr work, divisor, #3 - cmp dividend, work - bcc Over4 - sub dividend, dividend, work - lsr work, curbit, #3 - orr result, work -Over4: - cmp dividend, #0 @ Early termination? - beq Lgot_result - lsr curbit, #4 @ No, any more bits to do? - beq Lgot_result - lsr divisor, #4 - b Loop3 -Lgot_result: - mov r0, result - pop { work } - RET - -Ldiv0: - push { lr } - bl SYM (__div0) __PLT__ - mov r0, #0 @ about as wrong as it could be - pop { pc } - - SIZE (__udivsi3) - -#endif /* L_udivsi3 */ - -#ifdef L_umodsi3 - -dividend .req r0 -divisor .req r1 -overdone .req r2 -curbit .req r3 -ip .req r12 -sp .req r13 -lr .req r14 -pc .req r15 - - .text - .globl SYM (__umodsi3) - TYPE (__umodsi3) - .align 0 - .thumb_func -SYM (__umodsi3): - cmp divisor, #0 - beq Ldiv0 - mov curbit, #1 - cmp dividend, divisor - bcs Over1 - RET - -Over1: - @ Load the constant 0x10000000 into our work register - push { work } - mov work, #1 - lsl work, #28 -Loop1: - @ Unless the divisor is very big, shift it up in multiples of - @ four bits, since this is the amount of unwinding in the main - @ division loop. Continue shifting until the divisor is - @ larger than the dividend. - cmp divisor, work - bcs Lbignum - cmp divisor, dividend - bcs Lbignum - lsl divisor, #4 - lsl curbit, #4 - b Loop1 - -Lbignum: - @ Set work to 0x80000000 - lsl work, #3 -Loop2: - @ For very big divisors, we must shift it a bit at a time, or - @ we will be in danger of overflowing. - cmp divisor, work - bcs Loop3 - cmp divisor, dividend - bcs Loop3 - lsl divisor, #1 - lsl curbit, #1 - b Loop2 - -Loop3: - @ Test for possible subtractions. On the final pass, this may - @ subtract too much from the dividend, so keep track of which - @ subtractions are done, we can fix them up afterwards... - mov overdone, #0 - cmp dividend, divisor - bcc Over2 - sub dividend, dividend, divisor -Over2: - lsr work, divisor, #1 - cmp dividend, work - bcc Over3 - sub dividend, dividend, work - mov ip, curbit - mov work, #1 - ror curbit, work - orr overdone, curbit - mov curbit, ip -Over3: - lsr work, divisor, #2 - cmp dividend, work - bcc Over4 - sub dividend, dividend, work - mov ip, curbit - mov work, #2 - ror curbit, work - orr overdone, curbit - mov curbit, ip -Over4: - lsr work, divisor, #3 - cmp dividend, work - bcc Over5 - sub dividend, dividend, work - mov ip, curbit - mov work, #3 - ror curbit, work - orr overdone, curbit - mov curbit, ip -Over5: - mov ip, curbit - cmp dividend, #0 @ Early termination? - beq Over6 - lsr curbit, #4 @ No, any more bits to do? - beq Over6 - lsr divisor, #4 - b Loop3 - -Over6: - @ Any subtractions that we should not have done will be recorded in - @ the top three bits of "overdone". Exactly which were not needed - @ are governed by the position of the bit, stored in ip. - @ If we terminated early, because dividend became zero, - @ then none of the below will match, since the bit in ip will not be - @ in the bottom nibble. - - mov work, #0xe - lsl work, #28 - and overdone, work - bne Over7 - pop { work } - RET @ No fixups needed -Over7: - mov curbit, ip - mov work, #3 - ror curbit, work - tst overdone, curbit - beq Over8 - lsr work, divisor, #3 - add dividend, dividend, work -Over8: - mov curbit, ip - mov work, #2 - ror curbit, work - tst overdone, curbit - beq Over9 - lsr work, divisor, #2 - add dividend, dividend, work -Over9: - mov curbit, ip - mov work, #1 - ror curbit, work - tst overdone, curbit - beq Over10 - lsr work, divisor, #1 - add dividend, dividend, work -Over10: - pop { work } - RET - -Ldiv0: - push { lr } - bl SYM (__div0) __PLT__ - mov r0, #0 @ about as wrong as it could be - pop { pc } - - SIZE (__umodsi3) - -#endif /* L_umodsi3 */ - -#ifdef L_divsi3 - -dividend .req r0 -divisor .req r1 -result .req r2 -curbit .req r3 -ip .req r12 -sp .req r13 -lr .req r14 -pc .req r15 - - .text - .globl SYM (__divsi3) - TYPE (__divsi3) - .align 0 - .thumb_func -SYM (__divsi3): - cmp divisor, #0 - beq Ldiv0 - - push { work } - mov work, dividend - eor work, divisor @ Save the sign of the result. - mov ip, work - mov curbit, #1 - mov result, #0 - cmp divisor, #0 - bpl Over1 - neg divisor, divisor @ Loops below use unsigned. -Over1: - cmp dividend, #0 - bpl Over2 - neg dividend, dividend -Over2: - cmp dividend, divisor - bcc Lgot_result - - mov work, #1 - lsl work, #28 -Loop1: - @ Unless the divisor is very big, shift it up in multiples of - @ four bits, since this is the amount of unwinding in the main - @ division loop. Continue shifting until the divisor is - @ larger than the dividend. - cmp divisor, work - Bcs Lbignum - cmp divisor, dividend - Bcs Lbignum - lsl divisor, #4 - lsl curbit, #4 - b Loop1 - -Lbignum: - @ For very big divisors, we must shift it a bit at a time, or - @ we will be in danger of overflowing. - lsl work, #3 -Loop2: - cmp divisor, work - Bcs Loop3 - cmp divisor, dividend - Bcs Loop3 - lsl divisor, #1 - lsl curbit, #1 - b Loop2 - -Loop3: - @ Test for possible subtractions, and note which bits - @ are done in the result. On the final pass, this may subtract - @ too much from the dividend, but the result will be ok, since the - @ "bit" will have been shifted out at the bottom. - cmp dividend, divisor - Bcc Over3 - sub dividend, dividend, divisor - orr result, result, curbit -Over3: - lsr work, divisor, #1 - cmp dividend, work - Bcc Over4 - sub dividend, dividend, work - lsr work, curbit, #1 - orr result, work -Over4: - lsr work, divisor, #2 - cmp dividend, work - Bcc Over5 - sub dividend, dividend, work - lsr work, curbit, #2 - orr result, result, work -Over5: - lsr work, divisor, #3 - cmp dividend, work - Bcc Over6 - sub dividend, dividend, work - lsr work, curbit, #3 - orr result, result, work -Over6: - cmp dividend, #0 @ Early termination? - Beq Lgot_result - lsr curbit, #4 @ No, any more bits to do? - Beq Lgot_result - lsr divisor, #4 - b Loop3 - -Lgot_result: - mov r0, result - mov work, ip - cmp work, #0 - Bpl Over7 - neg r0, r0 -Over7: - pop { work } - RET - -Ldiv0: - push { lr } - bl SYM (__div0) __PLT__ - mov r0, #0 @ about as wrong as it could be - pop { pc } - - SIZE (__divsi3) - -#endif /* L_divsi3 */ - -#ifdef L_modsi3 - -dividend .req r0 -divisor .req r1 -overdone .req r2 -curbit .req r3 -ip .req r12 -sp .req r13 -lr .req r14 -pc .req r15 - - .text - .globl SYM (__modsi3) - TYPE (__modsi3) - .align 0 - .thumb_func -SYM (__modsi3): - mov curbit, #1 - cmp divisor, #0 - beq Ldiv0 - Bpl Over1 - neg divisor, divisor @ Loops below use unsigned. -Over1: - push { work } - @ Need to save the sign of the dividend, unfortunately, we need - @ ip later on. Must do this after saving the original value of - @ the work register, because we will pop this value off first. - push { dividend } - cmp dividend, #0 - Bpl Over2 - neg dividend, dividend -Over2: - cmp dividend, divisor - bcc Lgot_result - mov work, #1 - lsl work, #28 -Loop1: - @ Unless the divisor is very big, shift it up in multiples of - @ four bits, since this is the amount of unwinding in the main - @ division loop. Continue shifting until the divisor is - @ larger than the dividend. - cmp divisor, work - bcs Lbignum - cmp divisor, dividend - bcs Lbignum - lsl divisor, #4 - lsl curbit, #4 - b Loop1 - -Lbignum: - @ Set work to 0x80000000 - lsl work, #3 -Loop2: - @ For very big divisors, we must shift it a bit at a time, or - @ we will be in danger of overflowing. - cmp divisor, work - bcs Loop3 - cmp divisor, dividend - bcs Loop3 - lsl divisor, #1 - lsl curbit, #1 - b Loop2 - -Loop3: - @ Test for possible subtractions. On the final pass, this may - @ subtract too much from the dividend, so keep track of which - @ subtractions are done, we can fix them up afterwards... - mov overdone, #0 - cmp dividend, divisor - bcc Over3 - sub dividend, dividend, divisor -Over3: - lsr work, divisor, #1 - cmp dividend, work - bcc Over4 - sub dividend, dividend, work - mov ip, curbit - mov work, #1 - ror curbit, work - orr overdone, curbit - mov curbit, ip -Over4: - lsr work, divisor, #2 - cmp dividend, work - bcc Over5 - sub dividend, dividend, work - mov ip, curbit - mov work, #2 - ror curbit, work - orr overdone, curbit - mov curbit, ip -Over5: - lsr work, divisor, #3 - cmp dividend, work - bcc Over6 - sub dividend, dividend, work - mov ip, curbit - mov work, #3 - ror curbit, work - orr overdone, curbit - mov curbit, ip -Over6: - mov ip, curbit - cmp dividend, #0 @ Early termination? - beq Over7 - lsr curbit, #4 @ No, any more bits to do? - beq Over7 - lsr divisor, #4 - b Loop3 - -Over7: - @ Any subtractions that we should not have done will be recorded in - @ the top three bits of "overdone". Exactly which were not needed - @ are governed by the position of the bit, stored in ip. - @ If we terminated early, because dividend became zero, - @ then none of the below will match, since the bit in ip will not be - @ in the bottom nibble. - mov work, #0xe - lsl work, #28 - and overdone, work - beq Lgot_result - - mov curbit, ip - mov work, #3 - ror curbit, work - tst overdone, curbit - beq Over8 - lsr work, divisor, #3 - add dividend, dividend, work -Over8: - mov curbit, ip - mov work, #2 - ror curbit, work - tst overdone, curbit - beq Over9 - lsr work, divisor, #2 - add dividend, dividend, work -Over9: - mov curbit, ip - mov work, #1 - ror curbit, work - tst overdone, curbit - beq Lgot_result - lsr work, divisor, #1 - add dividend, dividend, work -Lgot_result: - pop { work } - cmp work, #0 - bpl Over10 - neg dividend, dividend -Over10: - pop { work } - RET - -Ldiv0: - push { lr } - bl SYM (__div0) __PLT__ - mov r0, #0 @ about as wrong as it could be - pop { pc } - - SIZE (__modsi3) - -#endif /* L_modsi3 */ - -#ifdef L_dvmd_tls - - .globl SYM (__div0) - TYPE (__div0) - .align 0 - .thumb_func -SYM (__div0): - RET - - SIZE (__div0) - -#endif /* L_divmodsi_tools */ - - -#ifdef L_call_via_rX - -/* These labels & instructions are used by the Arm/Thumb interworking code. - The address of function to be called is loaded into a register and then - one of these labels is called via a BL instruction. This puts the - return address into the link register with the bottom bit set, and the - code here switches to the correct mode before executing the function. */ - - .text - .align 0 - -.macro call_via register - .globl SYM (_call_via_\register) - TYPE (_call_via_\register) - .thumb_func -SYM (_call_via_\register): - bx \register - nop - - SIZE (_call_via_\register) -.endm - - call_via r0 - call_via r1 - call_via r2 - call_via r3 - call_via r4 - call_via r5 - call_via r6 - call_via r7 - call_via r8 - call_via r9 - call_via sl - call_via fp - call_via ip - call_via sp - call_via lr - -#endif /* L_call_via_rX */ - -#ifdef L_interwork_call_via_rX - -/* These labels & instructions are used by the Arm/Thumb interworking code, - when the target address is in an unknown instruction set. The address - of function to be called is loaded into a register and then one of these - labels is called via a BL instruction. This puts the return address - into the link register with the bottom bit set, and the code here - switches to the correct mode before executing the function. Unfortunately - the target code cannot be relied upon to return via a BX instruction, so - instead we have to store the resturn address on the stack and allow the - called function to return here instead. Upon return we recover the real - return address and use a BX to get back to Thumb mode. */ - - .text - .align 0 - - .code 32 - .globl _arm_return -_arm_return: - ldmia r13!, {r12} - bx r12 - -.macro interwork register - .code 16 - - .globl SYM (_interwork_call_via_\register) - TYPE (_interwork_call_via_\register) - .thumb_func -SYM (_interwork_call_via_\register): - bx pc - nop - - .code 32 - .globl .Lchange_\register -.Lchange_\register: - tst \register, #1 - stmeqdb r13!, {lr} - adreq lr, _arm_return - bx \register - - SIZE (_interwork_call_via_\register) -.endm - - interwork r0 - interwork r1 - interwork r2 - interwork r3 - interwork r4 - interwork r5 - interwork r6 - interwork r7 - interwork r8 - interwork r9 - interwork sl - interwork fp - interwork ip - interwork sp - - /* The lr case has to be handled a little differently...*/ - .code 16 - .globl SYM (_interwork_call_via_lr) - TYPE (_interwork_call_via_lr) - .thumb_func -SYM (_interwork_call_via_lr): - bx pc - nop - - .code 32 - .globl .Lchange_lr -.Lchange_lr: - tst lr, #1 - stmeqdb r13!, {lr} - mov ip, lr - adreq lr, _arm_return - bx ip - - SIZE (_interwork_call_via_lr) - -#endif /* L_interwork_call_via_rX */ diff --git a/gcc/config/arm/t-thumb-elf b/gcc/config/arm/t-thumb-elf deleted file mode 100755 index 3e940f8..0000000 --- a/gcc/config/arm/t-thumb-elf +++ /dev/null @@ -1,32 +0,0 @@ -# CYGNUS LOCAL (entire file) clm/arm-elf -CROSS_LIBGCC1 = libgcc1-asm.a -LIB1ASMSRC = arm/lib1thumb.asm -LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX _interwork_call_via_rX -# adddi3/subdi3 added to machine description - -# These are really part of libgcc1, but this will cause them to be -# built correctly, so... - -LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c - -fp-bit.c: $(srcdir)/config/fp-bit.c - echo '#define FLOAT' > fp-bit.c - echo '#ifndef __ARMEB__' >> fp-bit.c - echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c - echo '#endif' >> fp-bit.c - cat $(srcdir)/config/fp-bit.c >> fp-bit.c - -dp-bit.c: $(srcdir)/config/fp-bit.c - echo '#ifndef __ARMEB__' > dp-bit.c - echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c - echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c - echo '#endif' >> dp-bit.c - cat $(srcdir)/config/fp-bit.c >> dp-bit.c - -# Avoid building a duplicate set of libraries for the default endian-ness. -MULTILIB_OPTIONS = mno-thumb-interwork/mthumb-interwork -MULTILIB_DIRNAMES = normal interwork -MULTILIB_MATCHES = - -LIBGCC = stmp-multilib -INSTALL_LIBGCC = install-multilib diff --git a/gcc/config/arm/telf.h b/gcc/config/arm/telf.h deleted file mode 100755 index 9bd6bcf..0000000 --- a/gcc/config/arm/telf.h +++ /dev/null @@ -1,368 +0,0 @@ -/* CYGNUS LOCAL (entire file) clm/arm-elf */ -/* Definitions of target machine for GNU compiler, - for Thumb with ELF obj format. - Copyright (C) 1995, 1996 Free Software Foundation, Inc. - -This file is part of GNU CC. - -GNU CC is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2, or (at your option) -any later version. - -GNU CC is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with GNU CC; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -#define OBJECT_FORMAT_ELF - -#include "arm/thumb.h" -#include "tree.h" - -/* Run-time Target Specification. */ -#undef TARGET_VERSION -#define TARGET_VERSION fputs (" (Thumb/elf)", stderr) - -#define MULTILIB_DEFAULTS { "mlittle-endian" } - -/* Setting this to 32 produces more efficient code, but the value set in previous - versions of this toolchain was 8, which produces more compact structures. The - command line option -mstructure_size_boundary= can be used to change this - value. */ -#undef STRUCTURE_SIZE_BOUNDARY -#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary - -extern int arm_structure_size_boundary; - -/* Debug */ -#define DWARF2_DEBUGGING_INFO - - -/* Note - it is important that these definitions match those in semi.h for the ARM port. */ -#undef LOCAL_LABEL_PREFIX -#define LOCAL_LABEL_PREFIX "." - - -/* A C statement to output assembler commands which will identify the - object file as having been compiled with GNU CC (or another GNU - compiler). */ -#define ASM_IDENTIFY_GCC(STREAM) \ - fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX ) - -#undef ASM_FILE_START -#define ASM_FILE_START(STREAM) \ -do { \ - extern char *version_string; \ - fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \ - ASM_COMMENT_START, version_string); \ - fprintf ((STREAM), ASM_APP_OFF); \ -} while (0) - -/* A C statement to output something to the assembler file to switch to section - NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or - NULL_TREE. Some target formats do not support arbitrary sections. Do not - define this macro in such cases. */ -#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \ -do { \ - if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \ - fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \ - else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \ - fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \ - else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \ - fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \ - else \ - fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \ -} while (0) - -#undef INIT_SECTION_ASM_OP - -/* Define this macro if jump tables (for `tablejump' insns) should be - output in the text section, along with the assembler instructions. - Otherwise, the readonly data section is used. */ -#define JUMP_TABLES_IN_TEXT_SECTION 1 - -#undef READONLY_DATA_SECTION -#define READONLY_DATA_SECTION rdata_section -#undef RDATA_SECTION_ASM_OP -#define RDATA_SECTION_ASM_OP "\t.section .rodata" - -/* If defined, a C expression whose value is a string containing the - assembler operation to identify the following data as - uninitialized global data. If not defined, and neither - `ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined, - uninitialized global data will be output in the data section if - `-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be - used. */ -#ifndef BSS_SECTION_ASM_OP -#define BSS_SECTION_ASM_OP ".section\t.bss" -#endif - -/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a - separate, explicit argument. If you define this macro, it is used - in place of `ASM_OUTPUT_BSS', and gives you more flexibility in - handling the required alignment of the variable. The alignment is - specified as the number of bits. - - Try to use function `asm_output_aligned_bss' defined in file - `varasm.c' when defining this macro. */ -#ifndef ASM_OUTPUT_ALIGNED_BSS -#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \ - asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN) -#endif - -/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in - dwarf2.out. */ -#define UNALIGNED_WORD_ASM_OP ".4byte" - -#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE,ADDR) \ - if (((ADDR)[0] == '.') && ((ADDR)[1] == 'L')) \ - fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, (ADDR)); \ - else \ - fprintf ((FILE), "\t%s\t%s", \ - UNALIGNED_WORD_ASM_OP, (ADDR)) - -#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \ -do { \ - fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \ - output_addr_const ((FILE), (RTX)); \ - fputc ('\n', (FILE)); \ -} while (0) - -/* This is how to equate one symbol to another symbol. The syntax used is - `SYM1=SYM2'. Note that this is different from the way equates are done - with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */ - -#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ - do { fprintf ((FILE), "\t"); \ - assemble_name (FILE, LABEL1); \ - fprintf (FILE, " = "); \ - assemble_name (FILE, LABEL2); \ - fprintf (FILE, "\n"); \ - } while (0) - -/* For aliases of functions we use .thumb_set instead. */ -#define ASM_OUTPUT_DEF_FROM_DECLS(FILE,DECL1,DECL2) \ - do \ - { \ - char * LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \ - char * LABEL2 = IDENTIFIER_POINTER (DECL2); \ - \ - if (TREE_CODE (DECL1) == FUNCTION_DECL) \ - { \ - fprintf (FILE, "\t.thumb_set "); \ - assemble_name (FILE, LABEL1); \ - fprintf (FILE, ","); \ - assemble_name (FILE, LABEL2); \ - fprintf (FILE, "\n"); \ - } \ - else \ - ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \ - } \ - while (0) - -/* A list of other sections which the compiler might be "in" at any - given time. */ -#undef EXTRA_SECTIONS -#define EXTRA_SECTIONS in_rdata - -/* A list of extra section function definitions. */ - -#undef EXTRA_SECTION_FUNCTIONS -#define EXTRA_SECTION_FUNCTIONS \ - RDATA_SECTION_FUNCTION - -#define RDATA_SECTION_FUNCTION \ -void \ -rdata_section () \ -{ \ - if (in_section != in_rdata) \ - { \ - fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \ - in_section = in_rdata; \ - } \ -} - -#define INT_ASM_OP ".word" - -#define INVOKE__main - -#undef STARTFILE_SPEC -#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s" - -#undef ENDFILE_SPEC -#define ENDFILE_SPEC "crtend%O%s" - -/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS - is a valid machine specific attribute for DECL. - The attributes in ATTRIBUTES have previously been assigned to DECL. */ -extern int arm_valid_machine_decl_attribute(tree decl, tree attributes, tree attr, tree args); -#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \ - arm_valid_machine_decl_attribute(DECL, ATTRIBUTES, IDENTIFIER, ARGS) - -/* The ARM development system defines __main. */ -#define NAME__MAIN "__gccmain" -#define SYMBOL__MAIN __gccmain - -#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL)) -#define UNIQUE_SECTION(DECL,RELOC) \ -do { \ - int len; \ - char * name, * string, * prefix; \ - \ - name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \ - \ - if (! DECL_ONE_ONLY (DECL)) \ - { \ - prefix = "."; \ - if (TREE_CODE (DECL) == FUNCTION_DECL) \ - prefix = ".text."; \ - else if (DECL_READONLY_SECTION (DECL, RELOC)) \ - prefix = ".rodata."; \ - else \ - prefix = ".data."; \ - } \ - else if (TREE_CODE (DECL) == FUNCTION_DECL) \ - prefix = ".gnu.linkonce.t."; \ - else if (DECL_READONLY_SECTION (DECL, RELOC)) \ - prefix = ".gnu.linkonce.r."; \ - else \ - prefix = ".gnu.linkonce.d."; \ - \ - len = strlen (name) + strlen (prefix); \ - string = alloca (len + 1); \ - sprintf (string, "%s%s", prefix, name); \ - \ - DECL_SECTION_NAME (DECL) = build_string (len, string); \ -} while (0) - -/* This is how we tell the assembler that a symbol is weak. */ -#ifndef ASM_WEAKEN_LABEL -#define ASM_WEAKEN_LABEL(FILE, NAME) \ - do \ - { \ - fputs ("\t.weak\t", FILE); \ - assemble_name (FILE, NAME); \ - fputc ('\n', FILE); \ - } \ - while (0) -#endif - -#ifndef TYPE_ASM_OP - -/* These macros generate the special .type and .size directives which - are used to set the corresponding fields of the linker symbol table - entries in an ELF object file under SVR4. These macros also output - the starting labels for the relevant functions/objects. */ -#define TYPE_ASM_OP ".type" -#define SIZE_ASM_OP ".size" - -/* The following macro defines the format used to output the second - operand of the .type assembler directive. Different svr4 assemblers - expect various different forms for this operand. The one given here - is just a default. You may need to override it in your machine- - specific tm.h file (depending upon the particulars of your assembler). */ -#define TYPE_OPERAND_FMT "%s" - -/* Write the extra assembler code needed to declare a function's result. - Most svr4 assemblers don't require any special declaration of the - result value, but there are exceptions. */ -#ifndef ASM_DECLARE_RESULT -#define ASM_DECLARE_RESULT(FILE, RESULT) -#endif - -/* Write the extra assembler code needed to declare a function properly. - Some svr4 assemblers need to also have something extra said about the - function's return value. We allow for that here. */ -#undef ASM_DECLARE_FUNCTION_NAME -#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \ - do \ - { \ - fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \ - assemble_name (FILE, NAME); \ - putc (',', FILE); \ - fprintf (FILE, TYPE_OPERAND_FMT, "function"); \ - putc ('\n', FILE); \ - ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \ - fprintf (FILE, "\t.thumb_func\n") ; \ - ASM_OUTPUT_LABEL(FILE, NAME); \ - } \ - while (0) - -/* Write the extra assembler code needed to declare an object properly. */ -#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \ - do \ - { \ - fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \ - assemble_name (FILE, NAME); \ - putc (',', FILE); \ - fprintf (FILE, TYPE_OPERAND_FMT, "object"); \ - putc ('\n', FILE); \ - size_directive_output = 0; \ - if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \ - { \ - size_directive_output = 1; \ - fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \ - assemble_name (FILE, NAME); \ - putc (',', FILE); \ - fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \ - int_size_in_bytes (TREE_TYPE (DECL))); \ - fputc ('\n', FILE); \ - } \ - ASM_OUTPUT_LABEL(FILE, NAME); \ - } \ - while (0) - -/* Output the size directive for a decl in rest_of_decl_compilation - in the case where we did not do so before the initializer. - Once we find the error_mark_node, we know that the value of - size_directive_output was set - by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */ -#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \ - do \ - { \ - char * name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \ - if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \ - && ! AT_END && TOP_LEVEL \ - && DECL_INITIAL (DECL) == error_mark_node \ - && !size_directive_output) \ - { \ - size_directive_output = 1; \ - fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \ - assemble_name (FILE, name); \ - putc (',', FILE); \ - fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \ - int_size_in_bytes (TREE_TYPE (DECL))); \ - fputc ('\n', FILE); \ - } \ - } \ - while (0) - -/* This is how to declare the size of a function. */ -#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \ - do \ - { \ - if (!flag_inhibit_size_directive) \ - { \ - char label[256]; \ - static int labelno; \ - labelno ++; \ - ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \ - ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \ - fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \ - assemble_name (FILE, (FNAME)); \ - fprintf (FILE, ","); \ - assemble_name (FILE, label); \ - fprintf (FILE, "-"); \ - assemble_name (FILE, (FNAME)); \ - putc ('\n', FILE); \ - } \ - } \ - while (0) - -#endif /* TYPE_ASM_OP */ diff --git a/gcc/config/arm/thumb.c b/gcc/config/arm/thumb.c deleted file mode 100755 index 0310a51..0000000 --- a/gcc/config/arm/thumb.c +++ /dev/null @@ -1,1527 +0,0 @@ -/* Output routines for GCC for ARM/Thumb - Copyright (C) 1996 Cygnus Software Technologies Ltd - The basis of this contribution was generated by - Richard Earnshaw, Advanced RISC Machines Ltd - - This file is part of GNU CC. - - GNU CC is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - GNU CC is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with GNU CC; see the file COPYING. If not, write to - the Free Software Foundation, 59 Temple Place - Suite 330, - Boston, MA 02111-1307, USA. */ - -#include -#include -#include "config.h" -#include "rtl.h" -#include "hard-reg-set.h" -#include "regs.h" -#include "output.h" -#include "insn-flags.h" -#include "insn-attr.h" -#include "insn-config.h" -#include "flags.h" -#include "tree.h" -#include "expr.h" -#include "toplev.h" -#include "recog.h" - -int current_function_anonymous_args = 0; -static int current_function_has_far_jump = 0; - -/* Used to parse -mstructure_size_boundary command line option. */ -char *structure_size_string = NULL; -int arm_structure_size_boundary = 32; /* Used to be 8 */ - -/* Predicates */ - -/* Return nonzero if op is suitable for the RHS of a cmp instruction. */ -int -thumb_cmp_operand(rtx op, enum machine_mode mode) -{ - return ((GET_CODE(op) == CONST_INT - && (HOST_WIDE_UINT) (INTVAL(op)) < 256) - || register_operand(op, mode)); -} - -int -thumb_shiftable_const(HOST_WIDE_INT val) -{ - HOST_WIDE_UINT x = val; - HOST_WIDE_UINT mask = 0xff; - int i; - - for (i = 0; i < 25; i++) - if ((val & (mask << i)) == val) - return 1; - - return 0; -} - -/* Routines for handling the constant pool */ -/* This is unashamedly hacked from the version in sh.c, since the problem is - extremely similar. */ - -/* Thumb instructions cannot load a large constant into a register, - constants have to come from a pc relative load. The reference of a pc - relative load instruction must be less than 1k infront of the instruction. - This means that we often have to dump a constant inside a function, and - generate code to branch around it. - - It is important to minimize this, since the branches will slow things - down and make things bigger. - - Worst case code looks like: - - ldr rn, L1 - b L2 - align - L1: .long value - L2: - .. - - ldr rn, L3 - b L4 - align - L3: .long value - L4: - .. - - We fix this by performing a scan before scheduling, which notices which - instructions need to have their operands fetched from the constant table - and builds the table. - - - The algorithm is: - - scan, find an instruction which needs a pcrel move. Look forward, find the - last barrier which is within MAX_COUNT bytes of the requirement. - If there isn't one, make one. Process all the instructions between - the find and the barrier. - - In the above example, we can tell that L3 is within 1k of L1, so - the first move can be shrunk from the 2 insn+constant sequence into - just 1 insn, and the constant moved to L3 to make: - - ldr rn, L1 - .. - ldr rn, L3 - b L4 - align - L1: .long value - L3: .long value - L4: - - Then the second move becomes the target for the shortening process. - - */ - -typedef struct -{ - rtx value; /* Value in table */ - HOST_WIDE_INT next_offset; - enum machine_mode mode; /* Mode of value */ -} pool_node; - -/* The maximum number of constants that can fit into one pool, since - the pc relative range is 0...1020 bytes and constants are at least 4 - bytes long */ - -#define MAX_POOL_SIZE (1020/4) -static pool_node pool_vector[MAX_POOL_SIZE]; -static int pool_size; -static rtx pool_vector_label; - -/* Add a constant to the pool and return its label. */ - -static HOST_WIDE_INT -add_constant(rtx x, enum machine_mode mode) -{ - int i; - rtx lab; - HOST_WIDE_INT offset; - - if (mode == SImode && GET_CODE(x) == MEM && CONSTANT_P(XEXP(x, 0)) - && CONSTANT_POOL_ADDRESS_P(XEXP(x, 0))) - x = get_pool_constant(XEXP(x, 0)); - - /* First see if we've already got it */ - - for (i = 0; i < pool_size; i++) - { - if (x->code == pool_vector[i].value->code - && mode == pool_vector[i].mode) - { - if (x->code == CODE_LABEL) - { - if (XINT(x, 3) != XINT(pool_vector[i].value, 3)) - continue; - } - if (rtx_equal_p(x, pool_vector[i].value)) - return pool_vector[i].next_offset - GET_MODE_SIZE(mode); - } - } - - /* Need a new one */ - - pool_vector[pool_size].next_offset = GET_MODE_SIZE(mode); - offset = 0; - if (pool_size == 0) - pool_vector_label = gen_label_rtx(); - else - pool_vector[pool_size].next_offset - += (offset = pool_vector[pool_size - 1].next_offset); - - pool_vector[pool_size].value = x; - pool_vector[pool_size].mode = mode; - pool_size++; - return offset; -} - -/* Output the literal table */ - -static void -dump_table(rtx scan) -{ - int i; - - scan = emit_label_after(gen_label_rtx(), scan); - scan = emit_insn_after(gen_align_4(), scan); - scan = emit_label_after(pool_vector_label, scan); - - for (i = 0; i < pool_size; i++) - { - pool_node *p = pool_vector + i; - - switch (GET_MODE_SIZE(p->mode)) - { - case 4: - scan = emit_insn_after(gen_consttable_4(p->value), scan); - break; - - case 8: - scan = emit_insn_after(gen_consttable_8(p->value), scan); - break; - - default: - abort(); - break; - } - } - - scan = emit_insn_after(gen_consttable_end(), scan); - scan = emit_barrier_after(scan); - pool_size = 0; -} - -/* Non zero if the src operand needs to be fixed up */ -static -int -fixit(rtx src, enum machine_mode mode) -{ - return ((CONSTANT_P(src) - && (GET_CODE(src) != CONST_INT - || !(CONST_OK_FOR_LETTER_P(INTVAL(src), 'I') - || CONST_OK_FOR_LETTER_P(INTVAL(src), 'J') - || (mode != DImode - && CONST_OK_FOR_LETTER_P(INTVAL(src), 'K'))))) - || (mode == SImode && GET_CODE(src) == MEM - && GET_CODE(XEXP(src, 0)) == SYMBOL_REF - && CONSTANT_POOL_ADDRESS_P(XEXP(src, 0)))); -} - -/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */ - -#define MAX_COUNT_SI 1000 - -static rtx -find_barrier(rtx from) -{ - int count = 0; - rtx found_barrier = 0; - rtx label; - - while (from && count < MAX_COUNT_SI) - { - if (GET_CODE(from) == BARRIER) - return from; - - /* Count the length of this insn */ - if (GET_CODE(from) == INSN - && GET_CODE(PATTERN(from)) == SET - && CONSTANT_P(SET_SRC(PATTERN(from))) - && CONSTANT_POOL_ADDRESS_P(SET_SRC(PATTERN(from)))) - { - rtx src = SET_SRC(PATTERN(from)); - count += 2; - } - else - count += get_attr_length(from); - - from = NEXT_INSN(from); - } - - /* We didn't find a barrier in time to - dump our stuff, so we'll make one */ - label = gen_label_rtx(); - - if (from) - from = PREV_INSN(from); - else - from = get_last_insn(); - - /* Walk back to be just before any jump */ - while (GET_CODE(from) == JUMP_INSN - || GET_CODE(from) == NOTE - || GET_CODE(from) == CODE_LABEL) - from = PREV_INSN(from); - - from = emit_jump_insn_after(gen_jump(label), from); - JUMP_LABEL(from) = label; - found_barrier = emit_barrier_after(from); - emit_label_after(label, found_barrier); - return found_barrier; -} - -/* Non zero if the insn is a move instruction which needs to be fixed. */ - -static int -broken_move(rtx insn) -{ - if (!INSN_DELETED_P(insn) - && GET_CODE(insn) == INSN - && GET_CODE(PATTERN(insn)) == SET) - { - rtx pat = PATTERN(insn); - rtx src = SET_SRC(pat); - rtx dst = SET_DEST(pat); - enum machine_mode mode = GET_MODE(dst); - if (dst == pc_rtx) - return 0; - return fixit(src, mode); - } - return 0; -} - -/* Recursively search through all of the blocks in a function - checking to see if any of the variables created in that - function match the RTX called 'orig'. If they do then - replace them with the RTX called 'replacement'. */ - -static void -replace_symbols_in_block(tree block, rtx orig, rtx replacement) -{ - for (; block; block = BLOCK_CHAIN(block)) - { - tree sym; - - if (!TREE_USED(block)) - continue; - - for (sym = BLOCK_VARS(block); sym; sym = TREE_CHAIN(sym)) - { - if ( (DECL_NAME(sym) == 0 && TREE_CODE(sym) != TYPE_DECL) - || DECL_IGNORED_P(sym) - || TREE_CODE(sym) != VAR_DECL - || DECL_EXTERNAL(sym) - || !rtx_equal_p(DECL_RTL(sym), orig) - ) - continue; - - DECL_RTL(sym) = replacement; - } - - replace_symbols_in_block(BLOCK_SUBBLOCKS(block), orig, replacement); - } -} - -void -thumb_reorg(rtx first) -{ - rtx insn; - for (insn = first; insn; insn = NEXT_INSN(insn)) - { - if (broken_move(insn)) - { - /* This is a broken move instruction, scan ahead looking for - a barrier to stick the constant table behind */ - rtx scan; - rtx barrier = find_barrier(insn); - - /* Now find all the moves between the points and modify them */ - for (scan = insn; scan != barrier; scan = NEXT_INSN(scan)) - { - if (broken_move(scan)) - { - /* This is a broken move instruction, add it to the pool */ - rtx pat = PATTERN(scan); - rtx src = SET_SRC(pat); - rtx dst = SET_DEST(pat); - enum machine_mode mode = GET_MODE(dst); - HOST_WIDE_INT offset; - rtx newinsn; - rtx newsrc; - - /* If this is an HImode constant load, convert it into - an SImode constant load. Since the register is always - 32 bits this is safe. We have to do this, since the - load pc-relative instruction only does a 32-bit load. */ - if (mode == HImode) - { - mode = SImode; - if (GET_CODE(dst) != REG) - abort(); - PUT_MODE(dst, SImode); - } - - offset = add_constant(src, mode); - newsrc = gen_rtx(MEM, mode, - plus_constant(gen_rtx(LABEL_REF, - VOIDmode, - pool_vector_label), - offset)); - - /* Build a jump insn wrapper around the move instead - of an ordinary insn, because we want to have room for - the target label rtx in fld[7], which an ordinary - insn doesn't have. */ - newinsn = emit_jump_insn_after(gen_rtx(SET, VOIDmode, - dst, newsrc), scan); - JUMP_LABEL(newinsn) = pool_vector_label; - - /* But it's still an ordinary insn */ - PUT_CODE(newinsn, INSN); - - /* If debugging information is going to be emitted - then we must make sure that any refences to - symbols which are removed by the above code are - also removed in the descriptions of the - function's variables. Failure to do this means - that the debugging information emitted could - refer to symbols which are not emited by - output_constant_pool() because - mark_constant_pool() never sees them as being - used. */ - - - /* These are the tests used in - output_constant_pool() to decide if the constant - pool will be marked. Only necessary if debugging - info is being emitted. Only necessary for - references to memory whose address is given by a - symbol. */ - - if (optimize > 0 - && flag_expensive_optimizations - && write_symbols != NO_DEBUG - && GET_CODE(src) == MEM - && GET_CODE(XEXP(src, 0)) == SYMBOL_REF) - replace_symbols_in_block - (DECL_INITIAL(current_function_decl), src, newsrc); - - /* Kill old insn */ - delete_insn(scan); - scan = newinsn; - } - } - dump_table(barrier); - } - } -} - -/* Routines for generating rtl */ - -void -thumb_expand_movstrqi(rtx *operands) -{ - rtx out = copy_to_mode_reg(SImode, XEXP(operands[0], 0)); - rtx in = copy_to_mode_reg(SImode, XEXP(operands[1], 0)); - HOST_WIDE_INT len = INTVAL(operands[2]); - HOST_WIDE_INT offset = 0; - - while (len >= 12) - { - emit_insn(gen_movmem12b(out, in)); - len -= 12; - } - if (len >= 8) - { - emit_insn(gen_movmem8b(out, in)); - len -= 8; - } - if (len >= 4) - { - rtx reg = gen_reg_rtx(SImode); - emit_insn(gen_movsi(reg, gen_rtx(MEM, SImode, in))); - emit_insn(gen_movsi(gen_rtx(MEM, SImode, out), reg)); - len -= 4; - offset += 4; - } - if (len >= 2) - { - rtx reg = gen_reg_rtx(HImode); - emit_insn(gen_movhi(reg, gen_rtx(MEM, HImode, - plus_constant(in, offset)))); - emit_insn(gen_movhi(gen_rtx(MEM, HImode, plus_constant(out, offset)), - reg)); - len -= 2; - offset += 2; - } - if (len) - { - rtx reg = gen_reg_rtx(QImode); - emit_insn(gen_movqi(reg, gen_rtx(MEM, QImode, - plus_constant(in, offset)))); - emit_insn(gen_movqi(gen_rtx(MEM, QImode, plus_constant(out, offset)), - reg)); - } -} - -/* Routines for reloading */ - -void -thumb_reload_out_si(rtx operands) -{ - abort(); -} - -static int -arm_naked_function_p(tree func) -{ - tree a; - - if (TREE_CODE(func) != FUNCTION_DECL) - abort(); - - a = lookup_attribute("naked", DECL_MACHINE_ATTRIBUTES(func)); - return a != NULL_TREE; -} - -/* Routines for emitting code */ - -void -final_prescan_insn(rtx insn) -{ - extern int *insn_addresses; - - if (flag_print_asm_name) - fprintf(asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START, - insn_addresses[INSN_UID(insn)]); -} - - -static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */ - -static inline int -number_of_first_bit_set(int mask) -{ - int bit; - - for (bit = 0; - (mask & (1 << bit)) == 0; - ++bit) - continue; - - return bit; -} - -#define ARG_1_REGISTER 0 -#define ARG_2_REGISTER 1 -#define ARG_3_REGISTER 2 -#define ARG_4_REGISTER 3 -#define WORK_REGISTER 7 -#define FRAME_POINTER 11 -#define IP_REGISTER 12 -#define STACK_POINTER STACK_POINTER_REGNUM -#define LINK_REGISTER 14 -#define PROGRAM_COUNTER 15 - -/* Generate code to return from a thumb function. - If 'reg_containing_return_addr' is -1, then the return address is - actually on the stack, at the stack pointer. */ -static void -thumb_exit(FILE *f, int reg_containing_return_addr) -{ - int reg_available_for_popping; - int mode; - int size; - int restore_a4 = FALSE; - - if (reg_containing_return_addr != -1) - { - /* If the return address is in a register, - then just emit the BX instruction and return. */ - asm_fprintf(f, "\tbx\t%s\n", reg_names[reg_containing_return_addr]); - return; - } - - if (!TARGET_THUMB_INTERWORK) - { - /* If we are not supporting interworking, - then just pop the return address straight into the PC. */ - asm_fprintf(f, "\tpop\t{pc}\n" ); - return; - } - - /* If we can deduce the registers used from the function's return value. - This is more reliable that examining regs_ever_live[] because that - will be set if the register is ever used in the function, not just if - the register is used to hold a return value. */ - - if (current_function_return_rtx != 0) - mode = GET_MODE(current_function_return_rtx); - else - mode = DECL_MODE(DECL_RESULT(current_function_decl)); - - size = GET_MODE_SIZE(mode); - - if (size == 0) - { - /* In a void function we can use any argument register. - In a function that returns a structure on the stack - we can use the second and third argument registers. */ - if (mode == VOIDmode) - reg_available_for_popping = ARG_1_REGISTER; - else - reg_available_for_popping = ARG_2_REGISTER; - } - else if (size <= 4) - { - reg_available_for_popping = ARG_2_REGISTER; - } - else if (size <= 8) - { - reg_available_for_popping = ARG_3_REGISTER; - } - else - { - reg_available_for_popping = ARG_4_REGISTER; - - if (size > 12) - { - /* Register a4 is being used to hold part of the return value, - but we have dire need of a free, low register. */ - restore_a4 = TRUE; - - asm_fprintf(f, "\tmov\t%s, %s\n", - reg_names[IP_REGISTER], reg_names[ARG_4_REGISTER]); - } - } - - /* Pop the return address. */ - thumb_pushpop(f, (1 << reg_available_for_popping), FALSE); - - reg_containing_return_addr = reg_available_for_popping; - - /* If necessary restore the a4 register. */ - if (restore_a4) - { - asm_fprintf(f, "\tmov\t%s, %s\n", - reg_names[LINK_REGISTER], reg_names[ARG_4_REGISTER]); - - reg_containing_return_addr = LINK_REGISTER; - - asm_fprintf(f, "\tmov\t%s, %s\n", - reg_names[ARG_4_REGISTER], reg_names[IP_REGISTER]); - } - - /* Return to caller. */ - asm_fprintf(f, "\tbx\t%s\n", reg_names[reg_containing_return_addr]); -} - -/* Emit code to push or pop registers to or from the stack. */ -static void -thumb_pushpop(FILE *f, int mask, int push) -{ - int regno; - int lo_mask = mask & 0xFF; - - if (lo_mask == 0 && !push && (mask & (1 << 15))) - { - /* Special case. Do not generate a POP PC statement here, do it in - thumb_exit() */ - - thumb_exit(f, -1); - return; - } - - asm_fprintf(f, "\t%s\t{", push ? "push" : "pop"); - - /* Look at the low registers first. */ - - for (regno = 0; regno < 8; regno++, lo_mask >>= 1) - { - if (lo_mask & 1) - { - asm_fprintf(f, reg_names[regno]); - - if ((lo_mask & ~1) != 0) - asm_fprintf(f, ", "); - } - } - - if (push && (mask & (1 << 14))) - { - /* Catch pushing the LR. */ - - if (mask & 0xFF) - asm_fprintf(f, ", "); - - asm_fprintf(f, reg_names[14]); - } - else if (!push && (mask & (1 << 15))) - { - /* Catch popping the PC. */ - - if (TARGET_THUMB_INTERWORK) - { - /* The PC is never popped directly, instead - it is popped into r0-r3 and then BX is used. */ - - asm_fprintf(f, "}\n"); - - thumb_exit(f, -1); - - return; - } - else - { - if (mask & 0xFF) - asm_fprintf(f, ", "); - - asm_fprintf(f, reg_names[15]); - } - } - - asm_fprintf(f, "}\n"); -} - -/* Returns non-zero if the current function contains a far jump */ - -int -far_jump_used_p() -{ - rtx insn; - - if (current_function_has_far_jump) - return 1; - - for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) - { - if (GET_CODE(insn) == JUMP_INSN - /* Ignore tablejump patterns. */ - && GET_CODE(PATTERN(insn)) != ADDR_VEC - && GET_CODE(PATTERN(insn)) != ADDR_DIFF_VEC - && get_attr_far_jump(insn) == FAR_JUMP_YES) - { - current_function_has_far_jump = 1; - return 1; - } - } - - return 0; -} - -static int return_used_this_function = 0; - -void -thumb_function_prologue(FILE *f, int frame_size) -{ - int amount = frame_size + current_function_outgoing_args_size; - int live_regs_mask = 0; - int high_regs_pushed = 0; - int store_arg_regs = 0; - int regno; - - if (arm_naked_function_p(current_function_decl)) - return; - - if (current_function_anonymous_args && current_function_pretend_args_size) - store_arg_regs = 1; - - if (current_function_pretend_args_size) - { - if (store_arg_regs) - { - asm_fprintf(f, "\tpush\t{"); - for (regno = 4 - current_function_pretend_args_size / 4; regno < 4; - regno++) - asm_fprintf(f, "%s%s", reg_names[regno], regno == 3 ? "" : ", "); - asm_fprintf(f, "}\n"); - } - else - asm_fprintf(f, "\tsub\t%Rsp, %Rsp, #%d\n", - current_function_pretend_args_size); - } - - for (regno = 0; regno < 8; regno++) - if (regs_ever_live[regno] && !call_used_regs[regno]) - live_regs_mask |= 1 << regno; - - if (live_regs_mask || !leaf_function_p() || far_jump_used_p()) - live_regs_mask |= 1 << 14; - - if (live_regs_mask) - thumb_pushpop(f, live_regs_mask, 1); - - for (regno = 8; regno < 13; regno++) - { - if (regs_ever_live[regno] && !call_used_regs[regno]) - high_regs_pushed++; - } - - if (high_regs_pushed) - { - int pushable_regs = 0; - int mask = live_regs_mask & 0xff; - int next_hi_reg; - - for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--) - { - if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]) - break; - } - - pushable_regs = mask; - - if (pushable_regs == 0) - { - /* desperation time -- this probably will never happen */ - if (regs_ever_live[3] || !call_used_regs[3]) - asm_fprintf(f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]); - mask = 1 << 3; - } - - while (high_regs_pushed > 0) - { - for (regno = 7; regno >= 0; regno--) - { - if (mask & (1 << regno)) - { - asm_fprintf(f, "\tmov\t%s, %s\n", reg_names[regno], - reg_names[next_hi_reg]); - high_regs_pushed--; - if (high_regs_pushed) - for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--) - { - if (regs_ever_live[next_hi_reg] - && !call_used_regs[next_hi_reg]) - break; - } - else - { - mask &= ~((1 << regno) - 1); - break; - } - } - } - thumb_pushpop(f, mask, 1); - } - - if (pushable_regs == 0 && (regs_ever_live[3] || !call_used_regs[3])) - asm_fprintf(f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]); - } -} - -void -thumb_expand_prologue() -{ - HOST_WIDE_INT amount = (get_frame_size() - + current_function_outgoing_args_size); - int regno; - int live_regs_mask; - - if (arm_naked_function_p(current_function_decl)) - return; - - if (amount) - { - live_regs_mask = 0; - for (regno = 0; regno < 8; regno++) - if (regs_ever_live[regno] && !call_used_regs[regno]) - live_regs_mask |= 1 << regno; - - if (amount < 512) - emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx, - GEN_INT(-amount))); - else - { - rtx reg, spare; - - if ((live_regs_mask & 0xff) == 0) /* Very unlikely */ - emit_insn(gen_movsi(spare = gen_rtx(REG, SImode, 12), - reg = gen_rtx(REG, SImode, 4))); - else - { - for (regno = 0; regno < 8; regno++) - if (live_regs_mask & (1 << regno)) - break; - reg = gen_rtx(REG, SImode, regno); - } - - emit_insn(gen_movsi(reg, GEN_INT(-amount))); - emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx, reg)); - if ((live_regs_mask & 0xff) == 0) - emit_insn(gen_movsi(reg, spare)); - } - } - - if (frame_pointer_needed) - { - if (current_function_outgoing_args_size) - { - rtx offset = GEN_INT(current_function_outgoing_args_size); - - if (current_function_outgoing_args_size < 1024) - emit_insn(gen_addsi3(frame_pointer_rtx, stack_pointer_rtx, - offset)); - else - { - emit_insn(gen_movsi(frame_pointer_rtx, offset)); - emit_insn(gen_addsi3(frame_pointer_rtx, frame_pointer_rtx, - stack_pointer_rtx)); - } - } - else - emit_insn(gen_movsi(frame_pointer_rtx, stack_pointer_rtx)); - } - - /* if (profile_flag || profile_block_flag) */ - emit_insn(gen_blockage()); -} - -void -thumb_expand_epilogue() -{ - HOST_WIDE_INT amount = (get_frame_size() - + current_function_outgoing_args_size); - int regno; - - if (arm_naked_function_p(current_function_decl)) - return; - - if (amount) - { - if (amount < 512) - emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx, - GEN_INT(amount))); - else - { - rtx reg = gen_rtx(REG, SImode, 3); /* Always free in the epilogue */ - - emit_insn(gen_movsi(reg, GEN_INT(amount))); - emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx, reg)); - } - /* if (profile_flag || profile_block_flag) */ - emit_insn(gen_blockage()); - } -} - -void -thumb_function_epilogue(FILE *f, int frame_size) -{ - /* ??? Probably not safe to set this here, since it assumes that a - function will be emitted as assembly immediately after we generate - RTL for it. This does not happen for inline functions. */ - return_used_this_function = 0; - current_function_has_far_jump = 0; -#if 0 /* TODO : comment not really needed */ - fprintf(f, "%s THUMB Epilogue\n", ASM_COMMENT_START); -#endif -} - -/* The bits which aren't usefully expanded as rtl. */ -char * -thumb_unexpanded_epilogue() -{ - int regno; - int live_regs_mask = 0; - int high_regs_pushed = 0; - int leaf_function = leaf_function_p(); - int had_to_push_lr; - - if (arm_naked_function_p(current_function_decl) - || return_used_this_function) - return ""; - - for (regno = 0; regno < 8; regno++) - if (regs_ever_live[regno] && !call_used_regs[regno]) - live_regs_mask |= 1 << regno; - - for (regno = 8; regno < 13; regno++) - { - if (regs_ever_live[regno] && !call_used_regs[regno]) - high_regs_pushed++; - } - - /* The prolog may have pushed some high registers to use as - work registers. eg the testuite file: - gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c - compiles to produce: - push {r4, r5, r6, r7, lr} - mov r7, r9 - mov r6, r8 - push {r6, r7} - as part of the prolog. We have to undo that pushing here. */ - - if (high_regs_pushed) - { - int mask = live_regs_mask; - int next_hi_reg; - int size; - int mode; - - /* If we can deduce the registers used from the function's return value. - This is more reliable that examining regs_ever_live[] because that - will be set if the register is ever used in the function, not just if - the register is used to hold a return value. */ - - if (current_function_return_rtx != 0) - { - mode = GET_MODE(current_function_return_rtx); - } - else - { - mode = DECL_MODE(DECL_RESULT(current_function_decl)); - } - - size = GET_MODE_SIZE(mode); - - /* Unless we are returning a type of size > 12 register r3 is available. */ - if (size < 13) - mask |= 1 << 3; - - if (mask == 0) - { - /* Oh dear! We have no low registers into which we can pop high registers! */ - - fatal("No low registers available for popping high registers"); - } - - for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++) - if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]) - break; - - while (high_regs_pushed) - { - /* Find low register(s) into which the high register(s) can be popped. */ - for (regno = 0; regno < 8; regno++) - { - if (mask & (1 << regno)) - high_regs_pushed--; - if (high_regs_pushed == 0) - break; - } - - mask &= (2 << regno) - 1; /* A noop if regno == 8 */ - - /* Pop the values into the low register(s). */ - thumb_pushpop(asm_out_file, mask, 0); - - /* Move the value(s) into the high registers. */ - for (regno = 0; regno < 8; regno++) - { - if (mask & (1 << regno)) - { - asm_fprintf(asm_out_file, "\tmov\t%s, %s\n", - reg_names[next_hi_reg], reg_names[regno]); - for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++) - if (regs_ever_live[next_hi_reg] && - !call_used_regs[next_hi_reg]) - break; - } - } - } - } - - had_to_push_lr = (live_regs_mask || !leaf_function || far_jump_used_p()); - - if (current_function_pretend_args_size == 0) - { - if (had_to_push_lr) - live_regs_mask |= 1 << PROGRAM_COUNTER; - - /* No argument registers were pushed, so just pop everything. */ - - if (live_regs_mask) - thumb_pushpop(asm_out_file, live_regs_mask, FALSE); - - /* We have either just popped the return address into the - PC or it is was kept in LR for the entire function or - it is still on the stack because we do not want to - return by doing a pop {pc}. */ - - if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0) - thumb_exit(asm_out_file, LINK_REGISTER); - } - else - { - /* Pop everything but the return address. */ - live_regs_mask &= ~(1 << PROGRAM_COUNTER); - - if (live_regs_mask) - thumb_pushpop(asm_out_file, live_regs_mask, FALSE); - - if (had_to_push_lr) - { - /* Get the return address into a temporary register. */ - thumb_pushpop(asm_out_file, 1 << ARG_4_REGISTER, 0); - } - - /* Remove the argument registers that were pushed onto the stack. */ - asm_fprintf(asm_out_file, "\tadd\t%s, %s, #%d\n", - reg_names[STACK_POINTER], - reg_names[STACK_POINTER], - current_function_pretend_args_size); - - thumb_exit(asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER); - } - - return ""; -} - -/* Handle the case of a double word load into a low register from - a computed memory address. The computed address may involve a - register which is overwritten by the load. */ - -char * -thumb_load_double_from_address(rtx *operands) -{ - rtx addr; - rtx base; - rtx offset; - rtx arg1; - rtx arg2; - - if (GET_CODE(operands[0]) != REG) - fatal("thumb_load_double_from_address: destination is not a register"); - - if (GET_CODE(operands[1]) != MEM) - fatal("thumb_load_double_from_address: source is not a computed memory address"); - - /* Get the memory address. */ - - addr = XEXP(operands[1], 0); - - /* Work out how the memory address is computed. */ - - switch (GET_CODE(addr)) - { - case REG: - operands[2] = gen_rtx(MEM, SImode, plus_constant(XEXP(operands[1], 0), 4)); - - if (REGNO(operands[0]) == REGNO(addr)) - { - output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); - output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); - } - else - { - output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); - output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); - } - break; - - case CONST: - /* Compute
+ 4 for the high order load. */ - - operands[2] = gen_rtx(MEM, SImode, plus_constant(XEXP(operands[1], 0), 4)); - - output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); - output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); - break; - - case PLUS: - arg1 = XEXP(addr, 0); - arg2 = XEXP(addr, 1); - - if (CONSTANT_P(arg1)) - base = arg2, offset = arg1; - else - base = arg1, offset = arg2; - - if (GET_CODE(base) != REG) - fatal("thumb_load_double_from_address: base is not a register"); - - /* Catch the case of
= + */ - - if (GET_CODE(offset) == REG) - { - int reg_offset = REGNO(offset); - int reg_base = REGNO(base); - int reg_dest = REGNO(operands[0]); - - /* Add the base and offset registers together into the higher destination register. */ - - fprintf(asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address", - reg_names[ reg_dest + 1 ], - reg_names[ reg_base ], - reg_names[ reg_offset ], - ASM_COMMENT_START); - - /* Load the lower destination register from the address in the higher destination register. */ - - fprintf(asm_out_file, "\tldr\t%s,[%s, #0]\t\t%s created by thumb_load_double_from_address", - reg_names[ reg_dest ], - reg_names[ reg_dest + 1], - ASM_COMMENT_START); - - /* Load the higher destination register from its own address plus 4. */ - - fprintf(asm_out_file, "\tldr\t%s,[%s, #4]\t\t%s created by thumb_load_double_from_address", - reg_names[ reg_dest + 1 ], - reg_names[ reg_dest + 1 ], - ASM_COMMENT_START); - } - else - { - /* Compute
+ 4 for the high order load. */ - - operands[2] = gen_rtx(MEM, SImode, plus_constant(XEXP(operands[1], 0), 4)); - - /* If the computed address is held in the low order register - then load the high order register first, otherwise always - load the low order register first. */ - - if (REGNO(operands[0]) == REGNO(base)) - { - output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); - output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); - } - else - { - output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); - output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); - } - } - break; - - case LABEL_REF: - /* With no registers to worry about we can just load the value directly. */ - operands[2] = gen_rtx(MEM, SImode, plus_constant(XEXP(operands[1], 0), 4)); - - output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); - output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); - break; - - default: - debug_rtx(operands[1]); - fatal("thumb_load_double_from_address: Unhandled address calculation"); - break; - } - - return ""; -} - -char * -output_move_mem_multiple(int n, rtx *operands) -{ - rtx tmp; - - switch (n) - { - case 2: - if (REGNO(operands[2]) > REGNO(operands[3])) - { - tmp = operands[2]; - operands[2] = operands[3]; - operands[3] = tmp; - } - output_asm_insn("ldmia\t%1!, {%2, %3}", operands); - output_asm_insn("stmia\t%0!, {%2, %3}", operands); - break; - - case 3: - if (REGNO(operands[2]) > REGNO(operands[3])) - { - tmp = operands[2]; - operands[2] = operands[3]; - operands[3] = tmp; - } - if (REGNO(operands[3]) > REGNO(operands[4])) - { - tmp = operands[3]; - operands[3] = operands[4]; - operands[4] = tmp; - } - if (REGNO(operands[2]) > REGNO(operands[3])) - { - tmp = operands[2]; - operands[2] = operands[3]; - operands[3] = tmp; - } - output_asm_insn("ldmia\t%1!, {%2, %3, %4}", operands); - output_asm_insn("stmia\t%0!, {%2, %3, %4}", operands); - break; - - default: - abort(); - } - - return ""; -} - -static char *conds[] = -{ - "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", - "hi", "ls", "ge", "lt", "gt", "le" -}; - -static char * -thumb_condition_code(rtx x, int invert) -{ - int val; - - switch (GET_CODE(x)) - { - case EQ: val = 0; break; - case NE: val = 1; break; - case GEU: val = 2; break; - case LTU: val = 3; break; - case GTU: val = 8; break; - case LEU: val = 9; break; - case GE: val = 10; break; - case LT: val = 11; break; - case GT: val = 12; break; - case LE: val = 13; break; - default: - abort(); - } - - return conds[val ^ invert]; -} - -void -thumb_print_operand(FILE *f, rtx x, int code) -{ - if (code) - { - switch (code) - { - case '@': - fputs(ASM_COMMENT_START, f); - return; - - case 'D': - if (x) - fputs(thumb_condition_code(x, 1), f); - return; - - case 'd': - if (x) - fputs(thumb_condition_code(x, 0), f); - return; - - /* An explanation of the 'Q', 'R' and 'H' register operands: - - In a pair of registers containing a DI or DF value the 'Q' - operand returns the register number of the register containing - the least signficant part of the value. The 'R' operand returns - the register number of the register containing the most - significant part of the value. - - The 'H' operand returns the higher of the two register numbers. - On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the - same as the 'Q' operand, since the most signficant part of the - value is held in the lower number register. The reverse is true - on systems where WORDS_BIG_ENDIAN is false. - - The purpose of these operands is to distinguish between cases - where the endian-ness of the values is important (for example - when they are added together), and cases where the endian-ness - is irrelevant, but the order of register operations is important. - For example when loading a value from memory into a register - pair, the endian-ness does not matter. Provided that the value - from the lower memory address is put into the lower numbered - register, and the value from the higher address is put into the - higher numbered register, the load will work regardless of whether - the value being loaded is big-wordian or little-wordian. The - order of the two register loads can matter however, if the address - of the memory location is actually held in one of the registers - being overwritten by the load. */ - case 'Q': - if (REGNO(x) > 15) - abort(); - fputs(reg_names[REGNO(x)], f); - return; - - case 'R': - if (REGNO(x) > 15) - abort(); - fputs(reg_names[REGNO(x) + 1], f); - return; - - case 'H': - if (REGNO(x) > 15) - abort(); - fputs(reg_names[REGNO(x) + 1], f); - return; - - case 'c': - /* We use 'c' operands with symbols for .vtinherit */ - if (GET_CODE(x) == SYMBOL_REF) - output_addr_const(f, x); - return; - - default: - abort(); - } - } - if (GET_CODE(x) == REG) - fputs(reg_names[REGNO(x)], f); - else if (GET_CODE(x) == MEM) - output_address(XEXP(x, 0)); - else if (GET_CODE(x) == CONST_INT) - { - fputc('#', f); - output_addr_const(f, x); - } - else - abort(); -} - -/* Decide whether a type should be returned in memory (true) - or in a register (false). This is called by the macro - RETURN_IN_MEMORY. */ - -int -thumb_return_in_memory(tree type) -{ - if (!AGGREGATE_TYPE_P(type)) - { - /* All simple types are returned in registers. */ - - return 0; - } - else if (int_size_in_bytes(type) > 4) - { - /* All structures/unions bigger than one word are returned in memory. */ - - return 1; - } - else if (TREE_CODE(type) == RECORD_TYPE) - { - tree field; - - /* For a struct the APCS says that we must return in a register if - every addressable element has an offset of zero. For practical - purposes this means that the structure can have at most one non- - bit-field element and that this element must be the first one in - the structure. */ - - /* Find the first field, ignoring non FIELD_DECL things which will - have been created by C++. */ - for (field = TYPE_FIELDS(type); - field && TREE_CODE(field) != FIELD_DECL; - field = TREE_CHAIN(field)) - continue; - - if (field == NULL) - return 0; /* An empty structure. Allowed by an extension to ANSI C. */ - - /* Now check the remaining fields, if any. */ - for (field = TREE_CHAIN(field); field; field = TREE_CHAIN(field)) - { - if (TREE_CODE(field) != FIELD_DECL) - continue; - - if (!DECL_BIT_FIELD_TYPE(field)) - return 1; - } - - return 0; - } - else if (TREE_CODE(type) == UNION_TYPE) - { - tree field; - - /* Unions can be returned in registers if every element is - integral, or can be returned in an integer register. */ - - for (field = TYPE_FIELDS(type); - field; - field = TREE_CHAIN(field)) - { - if (TREE_CODE(field) != FIELD_DECL) - continue; - - if (RETURN_IN_MEMORY(TREE_TYPE(field))) - return 1; - } - - return 0; - } - /* XXX Not sure what should be done for other aggregates, so put them in - memory. */ - return 1; -} - -void -thumb_override_options() -{ - if (structure_size_string != NULL) - { - int size = strtol(structure_size_string, NULL, 0); - - if (size == 8 || size == 32) - arm_structure_size_boundary = size; - else - warning("Structure size boundary can only be set to 8 or 32"); - } -} - -/* Return nonzero if ATTR is a valid attribute for DECL. - ATTRIBUTES are any existing attributes and ARGS are the arguments - supplied with ATTR. - - Supported attributes: - - naked: don't output any prologue or epilogue code, the user is assumed - to do the right thing. - */ -int -arm_valid_machine_decl_attribute(tree decl, tree attributes, tree attr, tree args) -{ - if (args != NULL_TREE) - return 0; - - if (is_attribute_p("naked", attr)) - return TREE_CODE(decl) == FUNCTION_DECL; - - return 0; -} - -/* s_register_operand is the same as register_operand, but it doesn't accept - (SUBREG (MEM)...). - - This function exists because at the time it was put in it led to better - code. SUBREG(MEM) always needs a reload in the places where - s_register_operand is used, and this seemed to lead to excessive - reloading. */ - -int -s_register_operand(rtx op, enum machine_mode mode) -{ - if (GET_MODE(op) != mode && mode != VOIDmode) - return 0; - - if (GET_CODE(op) == SUBREG) - op = SUBREG_REG(op); - - /* We don't consider registers whose class is NO_REGS - to be a register operand. */ - /* XXX might have to check for lo regs only for thumb ??? */ - return (GET_CODE(op) == REG - && (REGNO(op) >= FIRST_PSEUDO_REGISTER - || REGNO_REG_CLASS(REGNO(op)) != NO_REGS)); -} diff --git a/gcc/config/arm/thumb.h b/gcc/config/arm/thumb.h deleted file mode 100755 index e5918a1..0000000 --- a/gcc/config/arm/thumb.h +++ /dev/null @@ -1,1137 +0,0 @@ -/* Definitions of target machine for GNU compiler, for ARM/Thumb. - Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc. - The basis of this contribution was generated by - Richard Earnshaw, Advanced RISC Machines Ltd - -This file is part of GNU CC. - -GNU CC is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2, or (at your option) -any later version. - -GNU CC is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with GNU CC; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */ - -/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced - gcc hacker in their entirety. */ - -/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm - files, which will lead to many maintenance problems. These files are - likely missing all bug fixes made to the arm port since they diverged. */ - -/* ??? Many patterns in the md file accept operands that will require a - reload. These should be eliminated if possible by tightening the - predicates and/or constraints. This will give faster/smaller code. */ - -/* ??? There is no pattern for the TST instuction. Check for other unsupported - instructions. */ - -#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr); - -#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */ -#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000 - -/* Nonzero if all call instructions should be indirect. */ -#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */ - - -/* Run-time compilation parameters selecting different hardware/software subsets. */ -extern int target_flags; -#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */ -#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB) - -/* Set if calls via function pointers should assume that their - destination is non-Thumb aware. */ -#define TARGET_CALLER_INTERWORKING \ - (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING) - -#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS) - -/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */ -#ifndef SUBTARGET_SWITCHES -#define SUBTARGET_SWITCHES -#endif - -#define TARGET_SWITCHES \ -{ \ - {"thumb-interwork", ARM_FLAG_THUMB}, \ - {"no-thumb-interwork", -ARM_FLAG_THUMB}, \ - {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \ - {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \ - {"long-calls", ARM_FLAG_LONG_CALLS, \ - "Generate all call instructions as indirect calls"}, \ - {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \ - SUBTARGET_SWITCHES \ - {"", TARGET_DEFAULT} \ -} - -#define TARGET_OPTIONS \ -{ \ - { "structure-size-boundary=", & structure_size_string }, \ -} - -#define REGISTER_PREFIX "" - -#define CAN_DEBUG_WITHOUT_FP 1 - -#define ASM_APP_ON "" -#define ASM_APP_OFF "\t.code\t16\n" - -/* Output a gap. In fact we fill it with nulls. */ -#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \ - fprintf ((STREAM), "\t.space\t%u\n", (NBYTES)) - -/* This is how to output an assembler line - that says to advance the location counter - to a multiple of 2**LOG bytes. */ - -#ifdef OLD_ASM - -#define ASM_OUTPUT_ALIGN(STREAM,LOG) \ -{ \ - if ((LOG) > 0) \ - fprintf (STREAM, "\t.align\t%d\n", (LOG)); \ -} - -#else - -#define ASM_OUTPUT_ALIGN(STREAM,LOG) \ -{ \ - if ((LOG) > 0) \ - fprintf (STREAM, "\t.align\t%d, 0\n", (LOG)); \ -} - -#endif - -/* Output a common block */ -#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \ - (fprintf ((STREAM), "\t.comm\t"), \ - assemble_name ((STREAM), (NAME)), \ - fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE))) - -#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \ - sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM)) - -/* This is how to output an internal numbered label where - PREFIX is the class of label and NUM is the number within the class. */ -#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \ - fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM)) - -/* This is how to output a label which precedes a jumptable. Since - instructions are 2 bytes, we need explicit alignment here. */ - -#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \ - do { \ - ASM_OUTPUT_ALIGN (FILE, 2); \ - ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \ - } while (0) - -/* This says how to define a local common symbol (ie, not visible to - linker). */ -#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \ - (fprintf((STREAM),"\n\t.lcomm\t"), \ - assemble_name((STREAM),(NAME)), \ - fprintf((STREAM),",%u\n",(SIZE))) - -/* Output a reference to a label. */ -#define ASM_OUTPUT_LABELREF(STREAM,NAME) \ - fprintf ((STREAM), "%s", (NAME)) - -/* This is how to output an assembler line for a numeric constant byte. */ -#define ASM_OUTPUT_BYTE(STREAM,VALUE) \ - fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE)) - -#define ASM_OUTPUT_INT(STREAM,VALUE) \ -{ \ - fprintf (STREAM, "\t.word\t"); \ - output_addr_const (STREAM, (VALUE)); \ - fprintf (STREAM, "\n"); \ -} - -#define ASM_OUTPUT_SHORT(STREAM,VALUE) \ -{ \ - fprintf (STREAM, "\t.short\t"); \ - output_addr_const (STREAM, (VALUE)); \ - fprintf (STREAM, "\n"); \ -} - -#define ASM_OUTPUT_CHAR(STREAM,VALUE) \ -{ \ - fprintf (STREAM, "\t.byte\t"); \ - output_addr_const (STREAM, (VALUE)); \ - fprintf (STREAM, "\n"); \ -} - -#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \ -do { char dstr[30]; \ - long l[2]; \ - REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \ - REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \ - fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \ - l[1], ASM_COMMENT_START, dstr); \ - } while (0) - -#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \ -do { char dstr[30]; \ - long l; \ - REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \ - REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \ - fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \ - ASM_COMMENT_START, dstr); \ - } while (0); - -/* Define results of standard character escape sequences. */ -#define TARGET_BELL 007 -#define TARGET_BS 010 -#define TARGET_TAB 011 -#define TARGET_NEWLINE 012 -#define TARGET_VT 013 -#define TARGET_FF 014 -#define TARGET_CR 015 - -/* This is how to output a string. */ -#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \ -do { \ - register int i, c, len = (LEN), cur_pos = 17; \ - register unsigned char *string = (unsigned char *)(STRING); \ - fprintf ((STREAM), "\t.ascii\t\""); \ - for (i = 0; i < len; i++) \ - { \ - register int c = string[i]; \ - \ - switch (c) \ - { \ - case '\"': \ - case '\\': \ - putc ('\\', (STREAM)); \ - putc (c, (STREAM)); \ - cur_pos += 2; \ - break; \ - \ - case TARGET_NEWLINE: \ - fputs ("\\n", (STREAM)); \ - if (i+1 < len \ - && (((c = string[i+1]) >= '\040' && c <= '~') \ - || c == TARGET_TAB)) \ - cur_pos = 32767; /* break right here */ \ - else \ - cur_pos += 2; \ - break; \ - \ - case TARGET_TAB: \ - fputs ("\\t", (STREAM)); \ - cur_pos += 2; \ - break; \ - \ - case TARGET_FF: \ - fputs ("\\f", (STREAM)); \ - cur_pos += 2; \ - break; \ - \ - case TARGET_BS: \ - fputs ("\\b", (STREAM)); \ - cur_pos += 2; \ - break; \ - \ - case TARGET_CR: \ - fputs ("\\r", (STREAM)); \ - cur_pos += 2; \ - break; \ - \ - default: \ - if (c >= ' ' && c < 0177) \ - { \ - putc (c, (STREAM)); \ - cur_pos++; \ - } \ - else \ - { \ - fprintf ((STREAM), "\\%03o", c); \ - cur_pos += 4; \ - } \ - } \ - \ - if (cur_pos > 72 && i+1 < len) \ - { \ - cur_pos = 17; \ - fprintf ((STREAM), "\"\n\t.ascii\t\""); \ - } \ - } \ - fprintf ((STREAM), "\"\n"); \ -} while (0) - -/* Output and Generation of Labels */ -#define ASM_OUTPUT_LABEL(STREAM,NAME) \ - (assemble_name ((STREAM), (NAME)), \ - fprintf ((STREAM), ":\n")) - -#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \ - (fprintf ((STREAM), "\t.globl\t"), \ - assemble_name ((STREAM), (NAME)), \ - fputc ('\n', (STREAM))) - -/* Construct a private name. */ -#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \ - ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \ - sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER))) - -/* Switch to the text or data segment. */ -#define TEXT_SECTION_ASM_OP ".text" -#define DATA_SECTION_ASM_OP ".data" -#define BSS_SECTION_ASM_OP ".bss" - -/* The assembler's names for the registers. */ -#ifndef REGISTER_NAMES -#define REGISTER_NAMES \ -{ \ - "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ - "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \ -} -#endif - -#ifndef ADDITIONAL_REGISTER_NAMES -#define ADDITIONAL_REGISTER_NAMES \ -{ \ - {"a1", 0}, \ - {"a2", 1}, \ - {"a3", 2}, \ - {"a4", 3}, \ - {"v1", 4}, \ - {"v2", 5}, \ - {"v3", 6}, \ - {"v4", 7}, \ - {"v5", 8}, \ - {"v6", 9}, \ - {"sb", 9}, \ - {"v7", 10}, \ - {"r10", 10}, /* sl */ \ - {"r11", 11}, /* fp */ \ - {"r12", 12}, /* ip */ \ - {"r13", 13}, /* sp */ \ - {"r14", 14}, /* lr */ \ - {"r15", 15} /* pc */ \ -} -#endif - -/* The assembler's parentheses characters. */ -#define ASM_OPEN_PAREN "(" -#define ASM_CLOSE_PAREN ")" - -#ifndef ASM_COMMENT_START -#define ASM_COMMENT_START "@" -#endif - -/* Output an element of a dispatch table. */ -#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \ - fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE)) - -#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \ - fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE)) - -/* Storage Layout */ - -#define FLOAT_WORDS_BIG_ENDIAN 1 - -#define BITS_PER_UNIT 8 -#define BITS_PER_WORD 32 - -#define UNITS_PER_WORD 4 - -#define POINTER_SIZE 32 - -#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \ -{ \ - if (GET_MODE_CLASS (MODE) == MODE_INT \ - && GET_MODE_SIZE (MODE) < 4) \ - { \ - (UNSIGNEDP) = 1; \ - (MODE) = SImode; \ - } \ -} - -#define PARM_BOUNDARY 32 -#define STACK_BOUNDARY 32 - -#define FUNCTION_BOUNDARY 32 -#define BIGGEST_ALIGNMENT 32 - -/* Make strings word-aligned so strcpy from constants will be faster. */ -#define CONSTANT_ALIGNMENT(EXP, ALIGN) \ - (TREE_CODE (EXP) == STRING_CST \ - && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN)) - -#define EMPTY_FIELD_BOUNDARY 32 - -#define STRUCTURE_SIZE_BOUNDARY 32 - -/* Used when parsing command line option -mstructure_size_boundary. */ -extern char * structure_size_string; - -#define STRICT_ALIGNMENT 1 - -#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT - - -/* Layout of Source Language Data Types */ - -#define TARGET_BELL 007 -#define TARGET_BS 010 -#define TARGET_TAB 011 -#define TARGET_NEWLINE 012 -#define TARGET_VT 013 -#define TARGET_FF 014 -#define TARGET_CR 015 - - -/* Register Usage */ - -/* Note there are 16 hard registers on the Thumb. We invent a 17th register - which is assigned to ARG_POINTER_REGNUM, but this is later removed by - elimination passes in the compiler. */ -#define FIRST_PSEUDO_REGISTER 17 - -/* ??? This is questionable. */ -#define FIXED_REGISTERS \ -{ \ - 0,0,0,0, \ - 0,0,0,0, \ - 0,0,0,1, \ - 0,1,1,1,1 \ -} - -/* ??? This is questionable. */ -#define CALL_USED_REGISTERS \ -{ \ - 1,1,1,1, \ - 0,0,0,0, \ - 0,0,0,1, \ - 1,1,1,1,1 \ -} - -#define HARD_REGNO_NREGS(REGNO,MODE) \ - ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \ - / UNITS_PER_WORD) - -/* ??? Probably should only allow DImode/DFmode in even numbered registers. */ -#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1) - -#define MODES_TIEABLE_P(MODE1,MODE2) 1 - -/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing - arguments to functions. These are the registers that are available for - spilling during reload. The code in reload1.c:init_reload() will detect this - class and place it into 'reload_address_base_reg_class'. */ - -enum reg_class -{ - NO_REGS, - NONARG_LO_REGS, - LO_REGS, - STACK_REG, - BASE_REGS, - HI_REGS, - ALL_REGS, - LIM_REG_CLASSES -}; - -#define GENERAL_REGS ALL_REGS - -#define N_REG_CLASSES (int) LIM_REG_CLASSES - -#define REG_CLASS_NAMES \ -{ \ - "NO_REGS", \ - "NONARG_LO_REGS", \ - "LO_REGS", \ - "STACK_REG", \ - "BASE_REGS", \ - "HI_REGS", \ - "ALL_REGS" \ -} - -#define REG_CLASS_CONTENTS \ -{ \ - 0x00000, \ - 0x000f0, \ - 0x000ff, \ - 0x02000, \ - 0x020ff, \ - 0x0ff00, \ - 0x1ffff, \ -} - -#define REGNO_REG_CLASS(REGNO) \ - ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \ - : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \ - : NONARG_LO_REGS) \ - : HI_REGS) - -#define BASE_REG_CLASS BASE_REGS - -#define MODE_BASE_REG_CLASS(MODE) \ - ((MODE) != QImode && (MODE) != HImode \ - ? BASE_REGS : LO_REGS) - -#define INDEX_REG_CLASS LO_REGS - -/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows - registers explicitly used in the rtl to be used as spill registers - but prevents the compiler from extending the lifetime of these - registers. */ - -#define SMALL_REGISTER_CLASSES 1 - -#define REG_CLASS_FROM_LETTER(C) \ - ((C) == 'l' ? LO_REGS \ - : (C) == 'h' ? HI_REGS \ - : (C) == 'b' ? BASE_REGS \ - : (C) == 'k' ? STACK_REG \ - : NO_REGS) - -#define REGNO_OK_FOR_BASE_P(REGNO) \ - ((REGNO) < 8 \ - || (REGNO) == STACK_POINTER_REGNUM \ - || (unsigned) reg_renumber[REGNO] < 8 \ - || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM) - -#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \ - ((REGNO) < 8 \ - || (unsigned) reg_renumber[REGNO] < 8 \ - || (GET_MODE_SIZE (MODE) >= 4 \ - && ((REGNO) == STACK_POINTER_REGNUM \ - || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM))) - -#define REGNO_OK_FOR_INDEX_P(REGNO) \ - ((REGNO) < 8 \ - || (unsigned) reg_renumber[REGNO] < 8) - -/* ??? This looks suspiciously wrong. */ -/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save - lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class) - and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS - says to allocate a LO_REGS spill instead, then this mismatch gives an - abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS - to be LO_REGS instead of BASE_REGS. It is not clear what affect this - change would have. */ -/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS - must always return a strict subset of the input class. Just blindly - returning LO_REGS is safe only if the input class is a superset of LO_REGS, - but there is no check for this. Added another exception for NONARG_LO_REGS - because it is not a superset of LO_REGS. */ -/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the - comments about BASE_REGS are now obsolete. */ -#define PREFERRED_RELOAD_CLASS(X,CLASS) \ - ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \ - : LO_REGS) -/* - ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \ - && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \ - : (GET_CODE ((X)) == CONST_INT \ - && (HOST_WIDE_UINT) INTVAL ((X)) > 255) ? NO_REGS \ - : LO_REGS) */ - -/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment - above. */ -#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \ - ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \ - ? ((true_regnum (X) == -1 ? LO_REGS \ - : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \ - : NO_REGS)) \ - : NO_REGS) - -#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE)) - -int thumb_shiftable_const (); - -#define CONST_OK_FOR_LETTER_P(VAL,C) \ - ((C) == 'I' ? (HOST_WIDE_UINT) (VAL) < 256 \ - : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \ - : (C) == 'K' ? thumb_shiftable_const (VAL) \ - : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \ - : (C) == 'M' ? ((HOST_WIDE_UINT) (VAL) < 1024 \ - && ((VAL) & 3) == 0) \ - : (C) == 'N' ? ((HOST_WIDE_UINT) (VAL) < 32) \ - : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \ - : 0) - -#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0 - -#define EXTRA_CONSTRAINT(X,C) \ - ((C) == 'Q' ? (GET_CODE (X) == MEM \ - && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0) - -/* Stack Layout and Calling Conventions */ - -#define STACK_GROWS_DOWNWARD 1 - -/* #define FRAME_GROWS_DOWNWARD 1 */ - -/* #define ARGS_GROW_DOWNWARD 1 */ - -#define STARTING_FRAME_OFFSET 0 - -#define FIRST_PARM_OFFSET(FNDECL) 0 - -/* Registers that address the stack frame */ - -#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */ - -#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */ - -#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */ - -#define STATIC_CHAIN_REGNUM 9 - -#define FRAME_POINTER_REQUIRED 0 - -#define ELIMINABLE_REGS \ -{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ - {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \ - {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}} - -/* On the Thumb we always want to perform the eliminations as we - actually only have one real register pointing to the stashed - variables: the stack pointer, and we never use the frame pointer. */ -#define CAN_ELIMINATE(FROM,TO) 1 - -/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */ -#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \ -{ \ - (OFFSET) = 0; \ - if ((FROM) == ARG_POINTER_REGNUM) \ - { \ - int count_regs = 0; \ - int regno; \ - (OFFSET) += get_frame_size (); \ - for (regno = 8; regno < 13; regno++) \ - if (regs_ever_live[regno] && ! call_used_regs[regno]) \ - count_regs++; \ - if (count_regs) \ - (OFFSET) += 4 * count_regs; \ - count_regs = 0; \ - for (regno = 0; regno < 8; regno++) \ - if (regs_ever_live[regno] && ! call_used_regs[regno]) \ - count_regs++; \ - if (count_regs || ! leaf_function_p () || far_jump_used_p()) \ - (OFFSET) += 4 * (count_regs + 1); \ - } \ - if ((TO) == STACK_POINTER_REGNUM) \ - (OFFSET) += current_function_outgoing_args_size; \ -} - -/* Passing Arguments on the stack */ - -#define PROMOTE_PROTOTYPES 1 - -#define ACCUMULATE_OUTGOING_ARGS 1 - -#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0 - -#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \ - ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \ - : 0) - -#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \ - (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \ - ? int_size_in_bytes (TYPE) \ - : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \ - ? 4 - (CUM) / 4 : 0) - -#define CUMULATIVE_ARGS int - -#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \ - ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0) - -#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \ - (CUM) += ((((MODE) == BLKmode) \ - ? int_size_in_bytes (TYPE) \ - : GET_MODE_SIZE (MODE)) + 3) & ~3 - -#define FUNCTION_ARG_REGNO_P(REGNO) \ - ((REGNO) >=0 && (REGNO) <= 3) - -#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0) - -#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0) - -#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0) - - /* How large values are returned */ -/* A C expression which can inhibit the returning of certain function values - in registers, based on the type of value. */ -#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE) - -/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return - values must be in memory. On the ARM, they need only do so if larger - than a word, or if they contain elements offset from zero in the struct. */ -#define DEFAULT_PCC_STRUCT_RETURN 0 - - -#define STRUCT_VALUE_REGNUM 0 - -#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE)) - -#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE)) - -/* Implementing the Varargs Macros */ - -#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \ -{ \ - extern int current_function_anonymous_args; \ - current_function_anonymous_args = 1; \ - if ((CUM) < 16) \ - (PRETEND_SIZE) = 16 - (CUM); \ -} - -/* Trampolines for nested functions */ - -/* Output assembler code for a block containing the constant parts of - a trampoline, leaving space for the variable parts. - - On the Thumb we always switch into ARM mode to execute the trampoline. - Why - because it is easier. This code will always be branched to via - a BX instruction and since the compiler magically generates the address - of the function the linker has no opportunity to ensure that the - bottom bit is set. Thus the processor will be in ARM mode when it - reaches this code. So we duplicate the ARM trampoline code and add - a switch into Thumb mode as well. - - On the ARM, (if r8 is the static chain regnum, and remembering that - referencing pc adds an offset of 8) the trampoline looks like: - ldr r8, [pc, #0] - ldr pc, [pc] - .word static chain value - .word function's address - ??? FIXME: When the trampoline returns, r8 will be clobbered. */ -#define TRAMPOLINE_TEMPLATE(FILE) \ -{ \ - fprintf ((FILE), "\t.code 32\n"); \ - fprintf ((FILE), ".Ltrampoline_start:\n"); \ - fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \ - reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \ - fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \ - REGISTER_PREFIX, REGISTER_PREFIX); \ - fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \ - REGISTER_PREFIX, REGISTER_PREFIX); \ - fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \ - fprintf ((FILE), "\t.word\t0\n"); \ - fprintf ((FILE), "\t.word\t0\n"); \ - fprintf ((FILE), "\t.code 16\n"); \ -} - -/* Length in units of the trampoline for entering a nested function. */ -#define TRAMPOLINE_SIZE 24 - -/* Alignment required for a trampoline in units. */ -#define TRAMPOLINE_ALIGN 4 - -#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \ -{ \ - emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \ - (CHAIN)); \ - emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \ - (FNADDR)); \ -} - - -/* Implicit Calls to Library Routines */ - -#define TARGET_MEM_FUNCTIONS 1 - -#define OVERRIDE_OPTIONS thumb_override_options () - - -/* Addressing Modes */ - -#define HAVE_POST_INCREMENT 1 - -#define CONSTANT_ADDRESS_P(X) \ - (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X)) - -#define MAX_REGS_PER_ADDRESS 2 - -#ifdef REG_OK_STRICT - -#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) -#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X)) - -#define REG_MODE_OK_FOR_BASE_P(X,MODE) \ - REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE) - -#else /* REG_OK_STRICT */ - -#define REG_OK_FOR_BASE_P(X) \ - (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \ - || (X) == arg_pointer_rtx \ - || REGNO (X) >= FIRST_PSEUDO_REGISTER) - -#define REG_MODE_OK_FOR_BASE_P(X,MODE) \ - (REGNO (X) < 8 \ - || REGNO (X) >= FIRST_PSEUDO_REGISTER \ - || (GET_MODE_SIZE (MODE) >= 4 \ - && (REGNO (X) == STACK_POINTER_REGNUM \ - || (X) == arg_pointer_rtx))) - -#define REG_OK_FOR_INDEX_P(X) \ - (REGNO (X) < 8 \ - || REGNO (X) >= FIRST_PSEUDO_REGISTER) - -#endif /* REG_OK_STRICT */ - -/* In a REG+REG address, both must be INDEX registers. */ -#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X) - -#define LEGITIMATE_OFFSET(MODE,VAL) \ -(GET_MODE_SIZE (MODE) == 1 ? ((HOST_WIDE_UINT) (VAL) < 32) \ - : GET_MODE_SIZE (MODE) == 2 ? ((HOST_WIDE_UINT) (VAL) < 64 \ - && ((VAL) & 1) == 0) \ - : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \ - && ((VAL) & 3) == 0)) - -/* The AP may be eliminated to either the SP or the FP, so we use the - least common denominator, e.g. SImode, and offsets from 0 to 64. */ - -/* ??? Verify whether the above is the right approach. */ - -/* ??? Also, the FP may be eliminated to the SP, so perhaps that - needs special handling also. */ - -/* ??? Look at how the mips16 port solves this problem. It probably uses - better ways to solve some of these problems. */ - -/* Although it is not incorrect, we don't accept QImode and HImode - addresses based on the frame pointer or arg pointer until the reload pass starts. - This is so that eliminating such addresses into stack based ones - won't produce impossible code. */ -#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \ -{ \ - /* ??? Not clear if this is right. Experiment. */ \ - if (GET_MODE_SIZE (MODE) < 4 \ - && ! (reload_in_progress || reload_completed) \ - && (reg_mentioned_p (frame_pointer_rtx, X) \ - || reg_mentioned_p (arg_pointer_rtx, X) \ - || reg_mentioned_p (virtual_incoming_args_rtx, X) \ - || reg_mentioned_p (virtual_outgoing_args_rtx, X) \ - || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \ - || reg_mentioned_p (virtual_stack_vars_rtx, X))) \ - ; \ - /* Accept any base register. SP only in SImode or larger. */ \ - else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \ - goto WIN; \ - /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \ - else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \ - && CONSTANT_POOL_ADDRESS_P (X)) \ - goto WIN; \ - /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \ - else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \ - && (GET_CODE (X) == LABEL_REF \ - || (GET_CODE (X) == CONST \ - && GET_CODE (XEXP (X, 0)) == PLUS \ - && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \ - && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \ - goto WIN; \ - /* Post-inc indexing only supported for SImode and larger. */ \ - else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \ - && GET_CODE (XEXP (X, 0)) == REG \ - && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \ - goto WIN; \ - else if (GET_CODE (X) == PLUS) \ - { \ - /* REG+REG address can be any two index registers. */ \ - /* ??? REG+REG addresses have been completely disabled before \ - reload completes, because we do not have enough available \ - reload registers. We only have 3 guaranteed reload registers \ - (NONARG_LO_REGS - the frame pointer), but we need at least 4 \ - to support REG+REG addresses. We have left them enabled after \ - reload completes, in the hope that reload_cse_regs and related \ - routines will be able to create them after the fact. It is \ - probably possible to support REG+REG addresses with additional \ - reload work, but I do not not have enough time to attempt such \ - a change at this time. */ \ - /* ??? Normally checking the mode here is wrong, since it isn't \ - impossible to use REG+REG with DFmode. However, the movdf \ - pattern requires offsettable addresses, and REG+REG is not \ - offsettable, so it must be rejected somehow. Trying to use \ - 'o' fails, because offsettable_address_p does a QImode check. \ - QImode is not valid for stack addresses, and has a smaller \ - range for non-stack bases, and this causes valid addresses \ - to be rejected. So we just eliminate REG+REG here by checking \ - the mode. */ \ - /* We also disallow FRAME+REG addressing since we know that FRAME \ - will be replaced with STACK, and SP relative addressing only \ - permits SP+OFFSET. */ \ - if (GET_MODE_SIZE (MODE) <= 4 \ - /* ??? See comment above. */ \ - && reload_completed \ - && GET_CODE (XEXP (X, 0)) == REG \ - && GET_CODE (XEXP (X, 1)) == REG \ - && XEXP (X, 0) != frame_pointer_rtx \ - && XEXP (X, 1) != frame_pointer_rtx \ - && XEXP (X, 0) != virtual_stack_vars_rtx \ - && XEXP (X, 1) != virtual_stack_vars_rtx \ - && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \ - && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \ - goto WIN; \ - /* REG+const has 5-7 bit offset for non-SP registers. */ \ - else if (GET_CODE (XEXP (X, 0)) == REG \ - && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \ - || XEXP (X, 0) == arg_pointer_rtx) \ - && GET_CODE (XEXP (X, 1)) == CONST_INT \ - && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \ - goto WIN; \ - /* REG+const has 10 bit offset for SP, but only SImode and \ - larger is supported. */ \ - /* ??? Should probably check for DI/DFmode overflow here \ - just like GO_IF_LEGITIMATE_OFFSET does. */ \ - else if (GET_CODE (XEXP (X, 0)) == REG \ - && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \ - && GET_MODE_SIZE (MODE) >= 4 \ - && GET_CODE (XEXP (X, 1)) == CONST_INT \ - && (HOST_WIDE_UINT) INTVAL (XEXP (X, 1)) < 1024 \ - && (INTVAL (XEXP (X, 1)) & 3) == 0) \ - goto WIN; \ - } \ -} - -/* ??? If an HImode FP+large_offset address is converted to an HImode - SP+large_offset address, then reload won't know how to fix it. It sees - only that SP isn't valid for HImode, and so reloads the SP into an index - register, but the resulting address is still invalid because the offset - is too big. We fix it here instead by reloading the entire address. */ -/* We could probably achieve better results by defining PROMOTE_MODE to help - cope with the variances between the Thumb's signed and unsigned byte and - halfword load instructions. */ -#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \ -{ \ - if (GET_CODE (X) == PLUS \ - && GET_MODE_SIZE (MODE) < 4 \ - && GET_CODE (XEXP (X, 0)) == REG \ - && XEXP (X, 0) == stack_pointer_rtx \ - && GET_CODE (XEXP (X, 1)) == CONST_INT \ - && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \ - { \ - rtx orig_X = X; \ - X = copy_rtx (X); \ - push_reload (orig_X, NULL_RTX, &X, NULL, \ - BASE_REG_CLASS, \ - Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \ - goto WIN; \ - } \ -} - -#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) - -#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) - -#define LEGITIMATE_CONSTANT_P(X) \ - (GET_CODE (X) == CONST_INT \ - || GET_CODE (X) == CONST_DOUBLE \ - || CONSTANT_ADDRESS_P (X)) - - -/* Condition Code Status */ - -#define NOTICE_UPDATE_CC(EXP,INSN) \ -{ \ - if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \ - CC_STATUS_INIT; \ -} - - -/* Describing Relative Costs of Operations */ - -#define SLOW_BYTE_ACCESS 0 - -#define SLOW_UNALIGNED_ACCESS 1 - -#define NO_FUNCTION_CSE 1 - -#define NO_RECURSIVE_FUNCTION_CSE 1 - -#define REGISTER_MOVE_COST(FROM,TO) \ - (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2) - -#define MEMORY_MOVE_COST(M,CLASS,IN) \ - ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2)) - -/* This will allow better space optimization when compiling with -O */ -#define BRANCH_COST (optimize > 1 ? 1 : 0) - -#define RTX_COSTS(X,CODE,OUTER) \ - case MULT: \ - if (GET_CODE (XEXP (X, 1)) == CONST_INT) \ - { \ - int cycles = 0; \ - HOST_WIDE_UINT i = INTVAL (XEXP (X, 1)); \ - while (i) \ - { \ - i >>= 2; \ - cycles++; \ - } \ - return COSTS_N_INSNS (2) + cycles; \ - } \ - return COSTS_N_INSNS (1) + 16; \ - case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \ - case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \ - return COSTS_N_INSNS (1); \ - case SET: \ - return (COSTS_N_INSNS (1) \ - + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \ - + GET_CODE (SET_DEST (X)) == MEM)) - -#define CONST_COSTS(X,CODE,OUTER) \ - case CONST_INT: \ - if ((OUTER) == SET) \ - { \ - if ((HOST_WIDE_UINT) INTVAL (X) < 256) \ - return 0; \ - if (thumb_shiftable_const (INTVAL (X))) \ - return COSTS_N_INSNS (2); \ - return COSTS_N_INSNS (3); \ - } \ - else if (OUTER == PLUS \ - && INTVAL (X) < 256 && INTVAL (X) > -256) \ - return 0; \ - else if (OUTER == COMPARE \ - && (HOST_WIDE_UINT) INTVAL (X) < 256) \ - return 0; \ - else if (OUTER == ASHIFT || OUTER == ASHIFTRT \ - || OUTER == LSHIFTRT) \ - return 0; \ - return COSTS_N_INSNS (2); \ - case CONST: \ - case CONST_DOUBLE: \ - case LABEL_REF: \ - case SYMBOL_REF: \ - return COSTS_N_INSNS(3); - -#define ADDRESS_COST(X) \ - ((GET_CODE (X) == REG \ - || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \ - && GET_CODE (XEXP (X, 1)) == CONST_INT)) \ - ? 1 : 2) - - -/* Position Independent Code */ - -#define PRINT_OPERAND(STREAM,X,CODE) \ - thumb_print_operand((STREAM), (X), (CODE)) - -#define PRINT_OPERAND_ADDRESS(STREAM,X) \ -{ \ - if (GET_CODE ((X)) == REG) \ - fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \ - else if (GET_CODE ((X)) == POST_INC) \ - fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \ - else if (GET_CODE ((X)) == PLUS) \ - { \ - if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \ - fprintf ((STREAM), "[%s, #%d]", \ - reg_names[REGNO (XEXP ((X), 0))], \ - (int) INTVAL (XEXP ((X), 1))); \ - else \ - fprintf ((STREAM), "[%s, %s]", \ - reg_names[REGNO (XEXP ((X), 0))], \ - reg_names[REGNO (XEXP ((X), 1))]); \ - } \ - else \ - output_addr_const ((STREAM), (X)); \ -} - -#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_')) - -#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \ - asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)]) - -#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \ - fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)]) - -#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \ - final_prescan_insn((INSN)) - -/* Controlling Debugging Information Format */ -#define DBX_REGISTER_NUMBER(REGNO) (REGNO) - -/* Specific options for DBX Output */ - -#define DBX_DEBUGGING_INFO 1 - -#define DEFAULT_GDB_EXTENSIONS 1 - - -/* Cross Compilation and Floating Point */ - -#define REAL_ARITHMETIC - - -/* Miscellaneous Parameters */ - -#define PREDICATE_CODES \ - {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}}, - -#define CASE_VECTOR_MODE Pmode - -#define WORD_REGISTER_OPERATIONS - -#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND - -#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR - -#define EASY_DIV_EXPR TRUNC_DIV_EXPR - -#define MOVE_MAX 4 - -#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1 - -#define STORE_FLAG_VALUE 1 - -#define Pmode SImode - -#define FUNCTION_MODE SImode - -#define NO_DOLLAR_IN_LABEL 1 - -#define HAVE_ATEXIT - -/* The literal pool needs to reside in the text area due to the - limited PC addressing range: */ -#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN)) - -#include - -enum machine_mode; - -struct rtx_def; -typedef struct rtx_def *rtx; - -union tree_node; -typedef union tree_node *tree; - -extern int thumb_cmp_operand(rtx, enum machine_mode); -extern void thumb_reorg(rtx first); -extern void thumb_expand_movstrqi(rtx *); -extern void thumb_reload_out_si(rtx); -extern void final_prescan_insn(rtx); -extern int far_jump_used_p(); -extern void thumb_function_prologue(FILE *, int); -extern void thumb_expand_prologue(); -extern void thumb_function_epilogue(FILE *, int); -extern void thumb_expand_epilogue(); -extern char *thumb_unexpanded_epilogue(); -extern char *thumb_load_double_from_address(); -extern char *output_move_mem_multiple(); -extern void thumb_print_operand(FILE *, rtx, int); -extern int thumb_return_in_memory(tree); -extern void thumb_override_options(); -extern int arm_valid_machine_decl_attribute(tree, tree, tree, tree); -extern int s_register_operand(rtx, enum machine_mode); diff --git a/gcc/config/arm/thumb.md b/gcc/config/arm/thumb.md deleted file mode 100755 index fe37c9c..0000000 --- a/gcc/config/arm/thumb.md +++ /dev/null @@ -1,1157 +0,0 @@ -;; thumb.md Machine description for ARM/Thumb processors -;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc. -;; The basis of this contribution was generated by -;; Richard Earnshaw, Advanced RISC Machines Ltd - -;; This file is part of GNU CC. - -;; GNU CC is free software; you can redistribute it and/or modify -;; it under the terms of the GNU General Public License as published by -;; the Free Software Foundation; either version 2, or (at your option) -;; any later version. - -;; GNU CC is distributed in the hope that it will be useful, -;; but WITHOUT ANY WARRANTY; without even the implied warranty of -;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -;; GNU General Public License for more details. - -;; You should have received a copy of the GNU General Public License -;; along with GNU CC; see the file COPYING. If not, write to -;; the Free Software Foundation, 59 Temple Place - Suite 330, -;; Boston, MA 02111-1307, USA. - -;; LENGTH of an instruction is 2 bytes -(define_attr "length" "" (const_int 2)) - -;; CONDS is set to UNCHANGED when an insn does not affect the condition codes -;; Most insns change the condition codes -(define_attr "conds" "changed,unchanged" (const_string "changed")) - -;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a -;; distant label. -(define_attr "far_jump" "yes,no" (const_string "no")) - -;; Start with move insns - -(define_expand "movsi" - [(set (match_operand:SI 0 "general_operand" "") - (match_operand:SI 1 "general_operand" ""))] - "" - " - if (! (reload_in_progress || reload_completed)) - { - if (GET_CODE (operands[0]) != REG) - operands[1] = force_reg (SImode, operands[1]); - } -") - -(define_insn "*movsi_insn" - [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h") - (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))] - "register_operand (operands[0], SImode) - || register_operand (operands[1], SImode)" - "@ - add\\t%0, %1, #0 - mov\\t%0, %1 - # - # - ldmia\\t%1, {%0} - stmia\\t%0, {%1} - ldr\\t%0, %1 - str\\t%1, %0 - mov\\t%0, %1 - mov\\t%0, %1" -[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")]) - -(define_split - [(set (match_operand:SI 0 "register_operand" "") - (match_operand:SI 1 "const_int_operand" ""))] - "thumb_shiftable_const (INTVAL (operands[1]))" - [(set (match_dup 0) (match_dup 1)) - (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))] - " -{ - HOST_WIDE_UINT val = INTVAL (operands[1]); - HOST_WIDE_UINT mask = 0xff; - int i; - for (i = 0; i < 25; i++) - if ((val & (mask << i)) == val) - break; - - if (i == 0) - FAIL; - - operands[1] = GEN_INT (val >> i); - operands[2] = GEN_INT (i); -}") - -(define_split - [(set (match_operand:SI 0 "register_operand" "") - (match_operand:SI 1 "const_int_operand" ""))] - "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256" - [(set (match_dup 0) (match_dup 1)) - (set (match_dup 0) (neg:SI (match_dup 0)))] - " - operands[1] = GEN_INT (- INTVAL (operands[1])); -") - -(define_expand "movhi" - [(set (match_operand:HI 0 "general_operand" "") - (match_operand:HI 1 "general_operand" ""))] - "" - " -{ - if (! (reload_in_progress || reload_completed)) - { - if (GET_CODE (operands[0]) != REG) - operands[1] = force_reg (HImode, operands[1]); - - /* ??? We shouldn't really get invalid addresses here, but this can - happen if we are passed a SP (never OK for HImode/QImode) or virtual - register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode) - relative address. */ - /* ??? This should perhaps be fixed elsewhere, for instance, in - fixup_stack_1, by checking for other kinds of invalid addresses, - e.g. a bare reference to a virtual register. This may confuse the - alpha though, which must handle this case differently. */ - if (GET_CODE (operands[0]) == MEM - && ! memory_address_p (GET_MODE (operands[0]), - XEXP (operands[0], 0))) - { - rtx temp = copy_to_reg (XEXP (operands[0], 0)); - operands[0] = change_address (operands[0], VOIDmode, temp); - } - if (GET_CODE (operands[1]) == MEM - && ! memory_address_p (GET_MODE (operands[1]), - XEXP (operands[1], 0))) - { - rtx temp = copy_to_reg (XEXP (operands[1], 0)); - operands[1] = change_address (operands[1], VOIDmode, temp); - } - } - /* Handle loading a large integer during reload */ - else if (GET_CODE (operands[1]) == CONST_INT - && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I')) - { - /* Writing a constant to memory needs a scratch, which should - be handled with SECONDARY_RELOADs. */ - if (GET_CODE (operands[0]) != REG) - abort (); - - operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0); - emit_insn (gen_movsi (operands[0], operands[1])); - DONE; - } -}") - -(define_insn "*movhi_insn" - [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l") - (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))] - "register_operand (operands[0], HImode) - || register_operand (operands[1], HImode)" - "@ - add\\t%0, %1, #0 - ldrh\\t%0, %1 - strh\\t%1, %0 - mov\\t%0, %1 - mov\\t%0, %1 - mov\\t%0, %1") - -(define_expand "movqi" - [(set (match_operand:QI 0 "general_operand" "") - (match_operand:QI 1 "general_operand" ""))] - "" - " -{ - if (! (reload_in_progress || reload_completed)) - { - if (GET_CODE (operands[0]) != REG) - operands[1] = force_reg (QImode, operands[1]); - - /* ??? We shouldn't really get invalid addresses here, but this can - happen if we are passed a SP (never OK for HImode/QImode) or virtual - register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode) - relative address. */ - /* ??? This should perhaps be fixed elsewhere, for instance, in - fixup_stack_1, by checking for other kinds of invalid addresses, - e.g. a bare reference to a virtual register. This may confuse the - alpha though, which must handle this case differently. */ - if (GET_CODE (operands[0]) == MEM - && ! memory_address_p (GET_MODE (operands[0]), - XEXP (operands[0], 0))) - { - rtx temp = copy_to_reg (XEXP (operands[0], 0)); - operands[0] = change_address (operands[0], VOIDmode, temp); - } - if (GET_CODE (operands[1]) == MEM - && ! memory_address_p (GET_MODE (operands[1]), - XEXP (operands[1], 0))) - { - rtx temp = copy_to_reg (XEXP (operands[1], 0)); - operands[1] = change_address (operands[1], VOIDmode, temp); - } - } - /* Handle loading a large integer during reload */ - else if (GET_CODE (operands[1]) == CONST_INT - && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I')) - { - /* Writing a constant to memory needs a scratch, which should - be handled with SECONDARY_RELOADs. */ - if (GET_CODE (operands[0]) != REG) - abort (); - - operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0); - emit_insn (gen_movsi (operands[0], operands[1])); - DONE; - } -}") - -(define_insn "*movqi_insn" - [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l") - (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))] - "register_operand (operands[0], QImode) - || register_operand (operands[1], QImode)" - "@ - add\\t%0, %1, #0 - ldrb\\t%0, %1 - strb\\t%1, %0 - mov\\t%0, %1 - mov\\t%0, %1 - mov\\t%0, %1") - -(define_expand "movdi" - [(set (match_operand:DI 0 "general_operand" "") - (match_operand:DI 1 "general_operand" ""))] - "" - " - if (! (reload_in_progress || reload_completed)) - { - if (GET_CODE (operands[0]) != REG) - operands[1] = force_reg (DImode, operands[1]); - } -") - -;;; ??? This should have alternatives for constants. -;;; ??? This was originally identical to the movdf_insn pattern. -;;; ??? The 'i' constraint looks funny, but it should always be replaced by -;;; thumb_reorg with a memory reference. -(define_insn "*movdi_insn" - [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r") - (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))] - "register_operand (operands[0], DImode) - || register_operand (operands[1], DImode)" - "* -{ - switch (which_alternative) - { - case 0: - if (REGNO (operands[1]) == REGNO (operands[0]) + 1) - return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\"; - return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\"; - case 1: - return \"mov\\t%Q0, %1\;mov\\t%R0, #0\"; - case 2: - operands[1] = GEN_INT (- INTVAL (operands[1])); - return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\"; - case 3: - return \"ldmia\\t%1, {%0, %H0}\"; - case 4: - return \"stmia\\t%0, {%1, %H1}\"; - case 5: - return thumb_load_double_from_address (operands); - case 6: - operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4)); - output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands); - return \"\"; - case 7: - if (REGNO (operands[1]) == REGNO (operands[0]) + 1) - return \"mov\\t%0, %1\;mov\\t%H0, %H1\"; - return \"mov\\t%H0, %H1\;mov\\t%0, %1\"; - } -}"[(set_attr "length" "4,4,6,2,2,6,4,4")]) - -(define_expand "movdf" - [(set (match_operand:DF 0 "general_operand" "") - (match_operand:DF 1 "general_operand" ""))] - "" - " - if (! (reload_in_progress || reload_completed)) - { - if (GET_CODE (operands[0]) != REG) - operands[1] = force_reg (DFmode, operands[1]); - } -") - -;;; ??? This should have alternatives for constants. -;;; ??? This was originally identical to the movdi_insn pattern. -;;; ??? The 'F' constraint looks funny, but it should always be replaced by -;;; thumb_reorg with a memory reference. -(define_insn "*movdf_insn" - [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r") - (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))] - "register_operand (operands[0], DFmode) - || register_operand (operands[1], DFmode)" - "* - switch (which_alternative) - { - case 0: - if (REGNO (operands[1]) == REGNO (operands[0]) + 1) - return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\"; - return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\"; - case 1: - return \"ldmia\\t%1, {%0, %H0}\"; - case 2: - return \"stmia\\t%0, {%1, %H1}\"; - case 3: - return thumb_load_double_from_address (operands); - case 4: - operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4)); - output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands); - return \"\"; - case 5: - if (REGNO (operands[1]) == REGNO (operands[0]) + 1) - return \"mov\\t%0, %1\;mov\\t%H0, %H1\"; - return \"mov\\t%H0, %H1\;mov\\t%0, %1\"; - } -"[(set_attr "length" "4,2,2,6,4,4")]) - -(define_expand "movsf" - [(set (match_operand:SF 0 "general_operand" "") - (match_operand:SF 1 "general_operand" ""))] - "" - " - if (! (reload_in_progress || reload_completed)) - { - if (GET_CODE (operands[0]) != REG) - operands[1] = force_reg (SFmode, operands[1]); - } -") - -;;; ??? This should have alternatives for constants. -(define_insn "*movsf_insn" - [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h") - (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))] - "register_operand (operands[0], SFmode) - || register_operand (operands[1], SFmode)" - "@ - add\\t%0, %1, #0 - ldmia\\t%1, {%0} - stmia\\t%0, {%1} - ldr\\t%0, %1 - str\\t%1, %0 - mov\\t%0, %1 - mov\\t%0, %1") - -;; Widening move insns - -(define_expand "zero_extendhisi2" - [(set (match_operand:SI 0 "s_register_operand" "") - (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))] - "" - " - if (GET_CODE (operands[1]) != MEM) - { - rtx temp = gen_reg_rtx (SImode); - - operands[1] = force_reg (HImode, operands[1]); - operands[1] = gen_lowpart (SImode, operands[1]); - emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16))); - emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16))); - DONE; - } -") - -(define_insn "*zero_extendhisi2_insn" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))] - "" - "ldrh\\t%0, %1") - -(define_expand "zero_extendqisi2" - [(set (match_operand:SI 0 "s_register_operand" "") - (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))] - "" - " - if (GET_CODE (operands[1]) != MEM) - { - rtx temp = gen_reg_rtx (SImode); - - operands[1] = force_reg (QImode, operands[1]); - operands[1] = gen_lowpart (SImode, operands[1]); - emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24))); - emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24))); - DONE; - } -") - -(define_insn "*zero_extendqisi2_insn" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))] - "" - "ldrb\\t%0, %1") - -(define_expand "extendhisi2" - [(parallel [(set (match_operand:SI 0 "s_register_operand" "") - (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" ""))) - (clobber (match_scratch:SI 2 ""))])] - "" - " - if (GET_CODE (operands[1]) != MEM) - { - rtx temp = gen_reg_rtx (SImode); - - operands[1] = force_reg (HImode, operands[1]); - operands[1] = gen_lowpart (SImode, operands[1]); - emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16))); - emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16))); - DONE; - } -") - -(define_insn "*extendhisi2_insn" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (sign_extend:SI (match_operand:HI 1 "memory_operand" "m"))) - (clobber (match_scratch:SI 2 "=&l"))] - "" - "* -{ - rtx ops[4]; - /* This code used to try to use 'V', and fix the address only if it was - offsettable, but this fails for e.g. REG+48 because 48 is outside the - range of QImode offsets, and offsettable_address_p does a QImode - address check. */ - - if (GET_CODE (XEXP (operands[1], 0)) == PLUS) - { - ops[1] = XEXP (XEXP (operands[1], 0), 0); - ops[2] = XEXP (XEXP (operands[1], 0), 1); - } - else - { - ops[1] = XEXP (operands[1], 0); - ops[2] = const0_rtx; - } - if (GET_CODE (ops[2]) == REG) - return \"ldrsh\\t%0, %1\"; - - ops[0] = operands[0]; - ops[3] = operands[2]; - output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops); - return \"\"; -}" -[(set_attr "length" "4")]) - -(define_expand "extendqisi2" - [(set (match_operand:SI 0 "s_register_operand" "") - (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))] - "" - " - if (GET_CODE (operands[1]) != MEM) - { - rtx temp = gen_reg_rtx (SImode); - - operands[1] = force_reg (QImode, operands[1]); - operands[1] = gen_lowpart (SImode, operands[1]); - emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24))); - emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24))); - DONE; - } -") - -(define_insn "*extendqisi2_insn" - [(set (match_operand:SI 0 "s_register_operand" "=l,l") - (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))] - "" - "* -{ - rtx ops[3]; - - if (which_alternative == 0) - return \"ldrsb\\t%0, %1\"; - ops[0] = operands[0]; - if (GET_CODE (XEXP (operands[1], 0)) == PLUS) - { - ops[1] = XEXP (XEXP (operands[1], 0), 0); - ops[2] = XEXP (XEXP (operands[1], 0), 1); - - if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG) - output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops); - else if (GET_CODE (ops[1]) == REG) - { - if (REGNO (ops[1]) == REGNO (operands[0])) - output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops); - else - output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops); - } - else - { - if (REGNO (ops[2]) == REGNO (operands[0])) - output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops); - else - output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops); - } - } - else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0))) - { - output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops); - } - else - { - ops[1] = XEXP (operands[1], 0); - ops[2] = const0_rtx; - output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops); - } - return \"\"; -}" -[(set_attr "length" "2,6")]) - -;; We don't really have extzv, but defining this using shifts helps -;; to reduce register pressure later on. - -(define_expand "extzv" - [(set (match_dup 4) - (ashift:SI (match_operand:SI 1 "register_operand" "") - (match_operand:SI 2 "const_int_operand" ""))) - (set (match_operand:SI 0 "register_operand" "") - (lshiftrt:SI (match_dup 4) - (match_operand:SI 3 "const_int_operand" "")))] - "" - " -{ - HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]); - HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]); - operands[3] = GEN_INT (rshift); - if (lshift == 0) - { - emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3])); - DONE; - } - operands[2] = GEN_INT (lshift); - operands[4] = gen_reg_rtx (SImode); -} -") - -;; Block-move insns - -(define_expand "movstrqi" - [(match_operand:BLK 0 "general_operand" "") - (match_operand:BLK 1 "general_operand" "") - (match_operand:SI 2 "" "") - (match_operand:SI 3 "const_int_operand" "")] - "" - " - if (INTVAL (operands[3]) != 4 - || GET_CODE (operands[2]) != CONST_INT - || INTVAL (operands[2]) > 48) - FAIL; - - thumb_expand_movstrqi (operands); - DONE; -") - -(define_insn "movmem12b" - [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l")) - (mem:SI (match_operand:SI 1 "register_operand" "+&l"))) - (set (mem:SI (plus:SI (match_dup 0) (const_int 4))) - (mem:SI (plus:SI (match_dup 1) (const_int 4)))) - (set (mem:SI (plus:SI (match_dup 0) (const_int 8))) - (mem:SI (plus:SI (match_dup 1) (const_int 8)))) - (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12))) - (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12))) - (clobber (match_scratch:SI 2 "=&l")) - (clobber (match_scratch:SI 3 "=&l")) - (clobber (match_scratch:SI 4 "=&l"))] - "" - "* return output_move_mem_multiple (3, operands);" -[(set_attr "length" "4")]) - -(define_insn "movmem8b" - [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l")) - (mem:SI (match_operand:SI 1 "register_operand" "+&l"))) - (set (mem:SI (plus:SI (match_dup 0) (const_int 4))) - (mem:SI (plus:SI (match_dup 1) (const_int 4)))) - (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8))) - (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8))) - (clobber (match_scratch:SI 2 "=&l")) - (clobber (match_scratch:SI 3 "=&l"))] - "" - "* return output_move_mem_multiple (2, operands);" -[(set_attr "length" "4")]) - -;; Arithmetic insns - -(define_insn "adddi3" - [(set (match_operand:DI 0 "s_register_operand" "=l") - (plus:DI (match_operand:DI 1 "s_register_operand" "%0") - (match_operand:DI 2 "s_register_operand" "l")))] - "" - "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2" -[(set_attr "conds" "changed") - (set_attr "length" "8")]) - -;; register group 'k' is a single register group containing only the stack -;; register. Trying to reload it will always fail catastrophically, -;; so never allow those alternatives to match if reloading is needed. -(define_insn "addsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k") - (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k") - (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))] - "" - "* - static char *asms[] = -{ - \"add\\t%0, %0, %2\", - \"sub\\t%0, %0, #%n2\", - \"add\\t%0, %1, %2\", - \"add\\t%0, %0, %2\", - \"add\\t%0, %0, %2\", - \"add\\t%0, %1, %2\", - \"add\\t%0, %1, %2\" -}; - if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT - && INTVAL (operands[2]) < 0) - return \"sub\\t%0, %1, #%n2\"; - return asms[which_alternative]; -") - -; reloading and elimination of the frame pointer can sometimes cause this -; optimization to be missed. -(define_peephole - [(set (match_operand:SI 0 "register_operand" "=l") - (match_operand:SI 1 "const_int_operand" "M")) - (set (match_dup 0) - (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))] - "REGNO (operands[2]) == STACK_POINTER_REGNUM - && (HOST_WIDE_UINT) (INTVAL (operands[1])) < 1024 - && (INTVAL (operands[1]) & 3) == 0" - "add\\t%0, %2, %1") - -(define_insn "subdi3" - [(set (match_operand:DI 0 "s_register_operand" "=l") - (minus:DI (match_operand:DI 1 "s_register_operand" "0") - (match_operand:DI 2 "s_register_operand" "l")))] - "" - "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2" -[(set_attr "conds" "changed") - (set_attr "length" "8")]) - -(define_insn "subsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (minus:SI (match_operand:SI 1 "s_register_operand" "l") - (match_operand:SI 2 "s_register_operand" "l")))] - "" - "sub\\t%0, %1, %2") - -;; We must ensure that one input matches the output, and that the other input -;; does not match the output. Using 0 satisfies the first, and using & -;; satisfies the second. Unfortunately, this fails when operands 1 and 2 -;; are the same, because reload will make operand 0 match operand 1 without -;; realizing that this conflicts with operand 2. We fix this by adding another -;; alternative to match this case, and then `reload' it ourselves. This -;; alternative must come first. -(define_insn "mulsi3" - [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l") - (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0") - (match_operand:SI 2 "s_register_operand" "l,l,l")))] - "" - "* -{ - if (which_alternative < 2) - return \"mov\\t%0, %1\;mul\\t%0, %0, %2\"; - else - return \"mul\\t%0, %0, %2\"; -}" - [(set_attr "length" "4,4,2")]) - -(define_insn "negsi2" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (neg:SI (match_operand:SI 1 "s_register_operand" "l")))] - "" - "neg\\t%0, %1") - -;; Logical insns - -(define_expand "andsi3" - [(set (match_operand:SI 0 "s_register_operand" "") - (and:SI (match_operand:SI 1 "s_register_operand" "") - (match_operand:SI 2 "nonmemory_operand" "")))] - "" - " - if (GET_CODE (operands[2]) != CONST_INT) - operands[2] = force_reg (SImode, operands[2]); - else - { - int i; - if (((HOST_WIDE_UINT) ~ INTVAL (operands[2])) < 256) - { - operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2]))); - emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1])); - DONE; - } - - for (i = 9; i <= 31; i++) - if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2])) - { - emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i), - const0_rtx)); - DONE; - } - else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2])) - { - rtx shift = GEN_INT (i); - rtx reg = gen_reg_rtx (SImode); - emit_insn (gen_lshrsi3 (reg, operands[1], shift)); - emit_insn (gen_ashlsi3 (operands[0], reg, shift)); - DONE; - } - - operands[2] = force_reg (SImode, operands[2]); - } -") - -(define_insn "*andsi3_insn" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (and:SI (match_operand:SI 1 "s_register_operand" "%0") - (match_operand:SI 2 "s_register_operand" "l")))] - "" - "and\\t%0, %0, %2") - -(define_insn "bicsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l")) - (match_operand:SI 2 "s_register_operand" "0")))] - "" - "bic\\t%0, %0, %1") - -(define_insn "iorsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (ior:SI (match_operand:SI 1 "s_register_operand" "%0") - (match_operand:SI 2 "s_register_operand" "l")))] - "" - "orr\\t%0, %0, %2") - -(define_insn "xorsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (xor:SI (match_operand:SI 1 "s_register_operand" "%0") - (match_operand:SI 2 "s_register_operand" "l")))] - "" - "eor\\t%0, %0, %2") - -(define_insn "one_cmplsi2" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (not:SI (match_operand:SI 1 "s_register_operand" "l")))] - "" - "mvn\\t%0, %1") - -;; Shift and rotation insns - -(define_insn "ashlsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l,l") - (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0") - (match_operand:SI 2 "nonmemory_operand" "N,l")))] - "" - "@ - lsl\\t%0, %1, %2 - lsl\\t%0, %0, %2") - -(define_insn "ashrsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l,l") - (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0") - (match_operand:SI 2 "nonmemory_operand" "N,l")))] - "" - "@ - asr\\t%0, %1, %2 - asr\\t%0, %0, %2") - -(define_insn "lshrsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l,l") - (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0") - (match_operand:SI 2 "nonmemory_operand" "N,l")))] - "" - "@ - lsr\\t%0, %1, %2 - lsr\\t%0, %0, %2") - -(define_insn "rotrsi3" - [(set (match_operand:SI 0 "s_register_operand" "=l") - (rotatert:SI (match_operand:SI 1 "s_register_operand" "0") - (match_operand:SI 2 "s_register_operand" "l")))] - "" - "ror\\t%0, %0, %2") - -;; Comparison insns - -(define_expand "cmpsi" - [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "") - (match_operand:SI 1 "nonmemory_operand" "")))] - "" - " - if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG) - { - if (GET_CODE (operands[1]) != CONST_INT - || (HOST_WIDE_UINT) (INTVAL (operands[1])) >= 256) - { - if (GET_CODE (operands[1]) != CONST_INT - || INTVAL (operands[1]) < -255 - || INTVAL (operands[1]) > 0) - operands[1] = force_reg (SImode, operands[1]); - else - { - operands[1] = force_reg (SImode, - GEN_INT (- INTVAL (operands[1]))); - emit_insn (gen_cmnsi (operands[0], operands[1])); - DONE; - } - } - } -") - -(define_insn "*cmpsi_insn" - [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h") - (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))] - "" - "@ - cmp\\t%0, %1 - cmp\\t%0, %1 - cmp\\t%0, %1") - -(define_insn "tstsi" - [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))] - "" - "cmp\\t%0, #0") - -(define_insn "cmnsi" - [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l") - (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))] - "" - "cmn\\t%0, %1") - -;; Jump insns - -(define_insn "jump" - [(set (pc) (label_ref (match_operand 0 "" "")))] - "" - "* - if (get_attr_length (insn) == 2) - return \"b\\t%l0\"; - return \"bl\\t%l0\\t%@ far jump\"; -"[(set (attr "far_jump") - (if_then_else (eq_attr "length" "4") - (const_string "yes") - (const_string "no"))) - (set (attr "length") - (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048)) - (le (minus (match_dup 0) (pc)) (const_int 2044))) - (const_int 2) - (const_int 4)))]) - - -(define_expand "beq" - [(set (pc) (if_then_else (eq (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "bne" - [(set (pc) (if_then_else (ne (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "bge" - [(set (pc) (if_then_else (ge (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "ble" - [(set (pc) (if_then_else (le (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "bgt" - [(set (pc) (if_then_else (gt (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "blt" - [(set (pc) (if_then_else (lt (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "bgeu" - [(set (pc) (if_then_else (geu (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "bleu" - [(set (pc) (if_then_else (leu (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "bgtu" - [(set (pc) (if_then_else (gtu (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_expand "bltu" - [(set (pc) (if_then_else (ltu (cc0) (const_int 0)) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "") - -(define_insn "*cond_branch" - [(set (pc) (if_then_else (match_operator 1 "comparison_operator" - [(cc0) (const_int 0)]) - (label_ref (match_operand 0 "" "")) - (pc)))] - "" - "* - switch (get_attr_length (insn)) - { - case 2: return \"b%d1\\t%l0\\t%@cond_branch\"; - case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\"; - default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\"; - } -"[(set (attr "far_jump") - (if_then_else (eq_attr "length" "6") - (const_string "yes") - (const_string "no"))) - (set (attr "length") - (if_then_else - (and (ge (minus (match_dup 0) (pc)) (const_int -252)) - (le (minus (match_dup 0) (pc)) (const_int 254))) - (const_int 2) - (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044)) - (le (minus (match_dup 0) (pc)) (const_int 2044))) - (const_int 4) - (const_int 6))))]) - -(define_insn "*cond_branch_reversed" - [(set (pc) (if_then_else (match_operator 1 "comparison_operator" - [(cc0) (const_int 0)]) - (pc) - (label_ref (match_operand 0 "" ""))))] - "" - "* - switch (get_attr_length (insn)) - { - case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\"; - case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\"; - default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\"; - } - return \"\"; -"[(set (attr "far_jump") - (if_then_else (eq_attr "length" "6") - (const_string "yes") - (const_string "no"))) - (set (attr "length") - (if_then_else - (and (ge (minus (match_dup 0) (pc)) (const_int -252)) - (le (minus (match_dup 0) (pc)) (const_int 254))) - (const_int 2) - (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044)) - (le (minus (match_dup 0) (pc)) (const_int 2044))) - (const_int 4) - (const_int 6))))]) - -(define_insn "indirect_jump" - [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))] - "" - "mov\\tpc, %0") - -(define_insn "tablejump" - [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r")) - (use (label_ref (match_operand 1 "" "")))] - "" - "mov\\tpc, %0") - -;; Call insns - -(define_expand "call" - [(call (match_operand:SI 0 "memory_operand" "") - (match_operand 1 "" ""))] - "" - " -{ - if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG) - XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0)); -}") - -(define_insn "*call_indirect" - [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r")) - (match_operand 1 "" ""))] - "! TARGET_CALLER_INTERWORKING" - "bl\\t_call_via_%0" -[(set_attr "length" "4")]) -;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version -;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set -;; the bottom bit of lr so that a function return (using bx) -;; would switch back into ARM mode... - -(define_insn "*call_indirect_interwork" - [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r")) - (match_operand 1 "" ""))] - "TARGET_CALLER_INTERWORKING" - "bl\\t_interwork_call_via_%0" -[(set_attr "length" "4")]) - -(define_expand "call_value" - [(set (match_operand 0 "" "") - (call (match_operand 1 "memory_operand" "") - (match_operand 2 "" "")))] - "" - " -{ - if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG) - XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0)); -}") - -(define_insn "*call_value_indirect" - [(set (match_operand 0 "" "=l") - (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r")) - (match_operand 2 "" "")))] - "! TARGET_CALLER_INTERWORKING" - "bl\\t_call_via_%1" -[(set_attr "length" "4")]) -;; See comment for call_indirect pattern - -(define_insn "*call_value_indirect_interwork" - [(set (match_operand 0 "" "=l") - (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r")) - (match_operand 2 "" "")))] - "TARGET_CALLER_INTERWORKING" - "bl\\t_interwork_call_via_%1" -[(set_attr "length" "4")]) - - -(define_insn "*call_insn" - [(call (mem:SI (match_operand:SI 0 "" "i")) - (match_operand:SI 1 "" ""))] - "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF" - "bl\\t%a0" -[(set_attr "length" "4")]) - -(define_insn "*call_value_insn" - [(set (match_operand 0 "s_register_operand" "=l") - (call (mem:SI (match_operand 1 "" "i")) - (match_operand 2 "" "")))] - "! TARGET_LONG_CALLS && GET_CODE (operands[1]) == SYMBOL_REF" - "bl\\t%a1" -[(set_attr "length" "4")]) - -;; Untyped call not required, since all funcs return in r0 - -;; Miscellaneous patterns - -(define_insn "nop" - [(clobber (const_int 0))] - "" - "mov\\tr8, r8") - -(define_insn "blockage" - [(unspec_volatile [(const_int 0)] 0)] - "" - "" - [(set_attr "length" "0")]) - -(define_expand "prologue" - [(const_int 0)] - "" - " - thumb_expand_prologue (); - DONE; -") - -(define_expand "epilogue" - [(unspec_volatile [(const_int 0)] 1)] - "" - " - thumb_expand_epilogue (); -") - -(define_insn "*epilogue_insns" - [(unspec_volatile [(const_int 0)] 1)] - "" - "* - return thumb_unexpanded_epilogue (); -" -[(set_attr "length" "42")]) - -;; Special patterns for dealing with the constant pool - -(define_insn "consttable_4" - [(unspec_volatile [(match_operand 0 "" "")] 2)] - "" - "* -{ - switch (GET_MODE_CLASS (GET_MODE (operands[0]))) - { - case MODE_FLOAT: - { - union real_extract u; - memcpy((char *)&u, (char *)&CONST_DOUBLE_LOW(operands[0]), sizeof u); - assemble_real (u.d, GET_MODE (operands[0])); - break; - } - default: - assemble_integer (operands[0], 4, 1); - break; - } - return \"\"; -}" -[(set_attr "length" "4")]) - -(define_insn "consttable_8" - [(unspec_volatile [(match_operand 0 "" "")] 3)] - "" - "* -{ - switch (GET_MODE_CLASS (GET_MODE (operands[0]))) - { - case MODE_FLOAT: - { - union real_extract u; - memcpy((char *)&u, (char *)&CONST_DOUBLE_LOW(operands[0]), sizeof u); - assemble_real (u.d, GET_MODE (operands[0])); - break; - } - default: - assemble_integer (operands[0], 8, 1); - break; - } - return \"\"; -}" -[(set_attr "length" "8")]) - -(define_insn "consttable_end" - [(unspec_volatile [(const_int 0)] 4)] - "" - "* - /* Nothing to do (currently). */ - return \"\"; -") - -(define_insn "align_4" - [(unspec_volatile [(const_int 0)] 5)] - "" - "* - assemble_align (32); - return \"\"; -") diff --git a/gcc/config/fp-bit.c b/gcc/config/fp-bit.c deleted file mode 100755 index 6b8bd70..0000000 --- a/gcc/config/fp-bit.c +++ /dev/null @@ -1,1507 +0,0 @@ -/* This is a software floating point library which can be used instead of - the floating point routines in libgcc1.c for targets without hardware - floating point. - Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc. - -This file is free software; you can redistribute it and/or modify it -under the terms of the GNU General Public License as published by the -Free Software Foundation; either version 2, or (at your option) any -later version. - -In addition to the permissions in the GNU General Public License, the -Free Software Foundation gives you unlimited permission to link the -compiled version of this file with other programs, and to distribute -those programs without any restriction coming from the use of this -file. (The General Public License restrictions do apply in other -respects; for example, they cover modification of the file, and -distribution when not linked into another program.) - -This file is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -/* As a special exception, if you link this library with other files, - some of which are compiled with GCC, to produce an executable, - this library does not by itself cause the resulting executable - to be covered by the GNU General Public License. - This exception does not however invalidate any other reasons why - the executable file might be covered by the GNU General Public License. */ - -/* This implements IEEE 754 format arithmetic, but does not provide a - mechanism for setting the rounding mode, or for generating or handling - exceptions. - - The original code by Steve Chamberlain, hacked by Mark Eichin and Jim - Wilson, all of Cygnus Support. */ - -/* The intended way to use this file is to make two copies, add `#define FLOAT' - to one copy, then compile both copies and add them to libgcc.a. */ - -/* Defining FINE_GRAINED_LIBRARIES allows one to select which routines - from this file are compiled via additional -D options. - - This avoids the need to pull in the entire fp emulation library - when only a small number of functions are needed. - - If FINE_GRAINED_LIBRARIES is not defined, then compile every - suitable routine. */ -#ifndef FINE_GRAINED_LIBRARIES -#define L_pack_df -#define L_unpack_df -#define L_pack_sf -#define L_unpack_sf -#define L_addsub_sf -#define L_addsub_df -#define L_mul_sf -#define L_mul_df -#define L_div_sf -#define L_div_df -#define L_fpcmp_parts_sf -#define L_fpcmp_parts_df -#define L_compare_sf -#define L_compare_df -#define L_eq_sf -#define L_eq_df -#define L_ne_sf -#define L_ne_df -#define L_gt_sf -#define L_gt_df -#define L_ge_sf -#define L_ge_df -#define L_lt_sf -#define L_lt_df -#define L_le_sf -#define L_le_df -#define L_si_to_sf -#define L_si_to_df -#define L_sf_to_si -#define L_df_to_si -#define L_f_to_usi -#define L_df_to_usi -#define L_negate_sf -#define L_negate_df -#define L_make_sf -#define L_make_df -#define L_sf_to_df -#define L_df_to_sf -#endif - -/* The following macros can be defined to change the behaviour of this file: - FLOAT: Implement a `float', aka SFmode, fp library. If this is not - defined, then this file implements a `double', aka DFmode, fp library. - FLOAT_ONLY: Used with FLOAT, to implement a `float' only library, i.e. - don't include float->double conversion which requires the double library. - This is useful only for machines which can't support doubles, e.g. some - 8-bit processors. - CMPtype: Specify the type that floating point compares should return. - This defaults to SItype, aka int. - US_SOFTWARE_GOFAST: This makes all entry points use the same names as the - US Software goFast library. If this is not defined, the entry points use - the same names as libgcc1.c. - _DEBUG_BITFLOAT: This makes debugging the code a little easier, by adding - two integers to the FLO_union_type. - NO_NANS: Disable nan and infinity handling - SMALL_MACHINE: Useful when operations on QIs and HIs are faster - than on an SI */ - -/* We don't currently support extended floats (long doubles) on machines - without hardware to deal with them. - - These stubs are just to keep the linker from complaining about unresolved - references which can be pulled in from libio & libstdc++, even if the - user isn't using long doubles. However, they may generate an unresolved - external to abort if abort is not used by the function, and the stubs - are referenced from within libc, since libgcc goes before and after the - system library. */ - -#ifdef EXTENDED_FLOAT_STUBS -__truncxfsf2 (){ abort(); } -__extendsfxf2 (){ abort(); } -__addxf3 (){ abort(); } -__divxf3 (){ abort(); } -__eqxf2 (){ abort(); } -__extenddfxf2 (){ abort(); } -__gtxf2 (){ abort(); } -__lexf2 (){ abort(); } -__ltxf2 (){ abort(); } -__mulxf3 (){ abort(); } -__negxf2 (){ abort(); } -__nexf2 (){ abort(); } -__subxf3 (){ abort(); } -__truncxfdf2 (){ abort(); } - -__trunctfsf2 (){ abort(); } -__extendsftf2 (){ abort(); } -__addtf3 (){ abort(); } -__divtf3 (){ abort(); } -__eqtf2 (){ abort(); } -__extenddftf2 (){ abort(); } -__gttf2 (){ abort(); } -__letf2 (){ abort(); } -__lttf2 (){ abort(); } -__multf3 (){ abort(); } -__negtf2 (){ abort(); } -__netf2 (){ abort(); } -__subtf3 (){ abort(); } -__trunctfdf2 (){ abort(); } -__gexf2 (){ abort(); } -__fixxfsi (){ abort(); } -__floatsixf (){ abort(); } -#else /* !EXTENDED_FLOAT_STUBS, rest of file */ - - -typedef float SFtype __attribute__ ((mode (SF))); -typedef float DFtype __attribute__ ((mode (DF))); - -typedef int HItype __attribute__ ((mode (HI))); -typedef int SItype __attribute__ ((mode (SI))); -typedef int DItype __attribute__ ((mode (DI))); - -/* The type of the result of a fp compare */ -#ifndef CMPtype -#define CMPtype SItype -#endif - -typedef unsigned int UHItype __attribute__ ((mode (HI))); -typedef unsigned int USItype __attribute__ ((mode (SI))); -typedef unsigned int UDItype __attribute__ ((mode (DI))); - -#define MAX_SI_INT ((SItype) ((unsigned) (~0)>>1)) -#define MAX_USI_INT ((USItype) ~0) - - -#ifdef FLOAT_ONLY -#define NO_DI_MODE -#endif - -#ifdef FLOAT -# define NGARDS 7L -# define GARDROUND 0x3f -# define GARDMASK 0x7f -# define GARDMSB 0x40 -# define EXPBITS 8 -# define EXPBIAS 127 -# define FRACBITS 23 -# define EXPMAX (0xff) -# define QUIET_NAN 0x100000L -# define FRAC_NBITS 32 -# define FRACHIGH 0x80000000L -# define FRACHIGH2 0xc0000000L -# define pack_d __pack_f -# define unpack_d __unpack_f -# define __fpcmp_parts __fpcmp_parts_f - typedef USItype fractype; - typedef UHItype halffractype; - typedef SFtype FLO_type; - typedef SItype intfrac; - -#else -# define PREFIXFPDP dp -# define PREFIXSFDF df -# define NGARDS 8L -# define GARDROUND 0x7f -# define GARDMASK 0xff -# define GARDMSB 0x80 -# define EXPBITS 11 -# define EXPBIAS 1023 -# define FRACBITS 52 -# define EXPMAX (0x7ff) -# define QUIET_NAN 0x8000000000000LL -# define FRAC_NBITS 64 -# define FRACHIGH 0x8000000000000000LL -# define FRACHIGH2 0xc000000000000000LL -# define pack_d __pack_d -# define unpack_d __unpack_d -# define __fpcmp_parts __fpcmp_parts_d - typedef UDItype fractype; - typedef USItype halffractype; - typedef DFtype FLO_type; - typedef DItype intfrac; -#endif - -#ifdef US_SOFTWARE_GOFAST -# ifdef FLOAT -# define add fpadd -# define sub fpsub -# define multiply fpmul -# define divide fpdiv -# define compare fpcmp -# define si_to_float sitofp -# define float_to_si fptosi -# define float_to_usi fptoui -# define negate __negsf2 -# define sf_to_df fptodp -# define dptofp dptofp -#else -# define add dpadd -# define sub dpsub -# define multiply dpmul -# define divide dpdiv -# define compare dpcmp -# define si_to_float litodp -# define float_to_si dptoli -# define float_to_usi dptoul -# define negate __negdf2 -# define df_to_sf dptofp -#endif -#else -# ifdef FLOAT -# define add __addsf3 -# define sub __subsf3 -# define multiply __mulsf3 -# define divide __divsf3 -# define compare __cmpsf2 -# define _eq_f2 __eqsf2 -# define _ne_f2 __nesf2 -# define _gt_f2 __gtsf2 -# define _ge_f2 __gesf2 -# define _lt_f2 __ltsf2 -# define _le_f2 __lesf2 -# define si_to_float __floatsisf -# define float_to_si __fixsfsi -# define float_to_usi __fixunssfsi -# define negate __negsf2 -# define sf_to_df __extendsfdf2 -#else -# define add __adddf3 -# define sub __subdf3 -# define multiply __muldf3 -# define divide __divdf3 -# define compare __cmpdf2 -# define _eq_f2 __eqdf2 -# define _ne_f2 __nedf2 -# define _gt_f2 __gtdf2 -# define _ge_f2 __gedf2 -# define _lt_f2 __ltdf2 -# define _le_f2 __ledf2 -# define si_to_float __floatsidf -# define float_to_si __fixdfsi -# define float_to_usi __fixunsdfsi -# define negate __negdf2 -# define df_to_sf __truncdfsf2 -# endif -#endif - - -#ifndef INLINE -#define INLINE __inline__ -#endif - -/* Preserve the sticky-bit when shifting fractions to the right. */ -#define LSHIFT(a) { a = (a & 1) | (a >> 1); } - -/* numeric parameters */ -/* F_D_BITOFF is the number of bits offset between the MSB of the mantissa - of a float and of a double. Assumes there are only two float types. - (double::FRAC_BITS+double::NGARDS-(float::FRAC_BITS-float::NGARDS)) - */ -#define F_D_BITOFF (52+8-(23+7)) - - -#define NORMAL_EXPMIN (-(EXPBIAS)+1) -#define IMPLICIT_1 (1LL<<(FRACBITS+NGARDS)) -#define IMPLICIT_2 (1LL<<(FRACBITS+1+NGARDS)) - -/* common types */ - -typedef enum -{ - CLASS_SNAN, - CLASS_QNAN, - CLASS_ZERO, - CLASS_NUMBER, - CLASS_INFINITY -} fp_class_type; - -typedef struct -{ -#ifdef SMALL_MACHINE - char class; - unsigned char sign; - short normal_exp; -#else - fp_class_type class; - unsigned int sign; - int normal_exp; -#endif - - union - { - fractype ll; - halffractype l[2]; - } fraction; -} fp_number_type; - -typedef union -{ - FLO_type value; - fractype value_raw; - -#ifndef FLOAT - halffractype words[2]; -#endif - -#ifdef FLOAT_BIT_ORDER_MISMATCH - struct - { - fractype fraction:FRACBITS __attribute__ ((packed)); - unsigned int exp:EXPBITS __attribute__ ((packed)); - unsigned int sign:1 __attribute__ ((packed)); - } - bits; -#endif - -#ifdef _DEBUG_BITFLOAT - struct - { - unsigned int sign:1 __attribute__ ((packed)); - unsigned int exp:EXPBITS __attribute__ ((packed)); - fractype fraction:FRACBITS __attribute__ ((packed)); - } - bits_big_endian; - - struct - { - fractype fraction:FRACBITS __attribute__ ((packed)); - unsigned int exp:EXPBITS __attribute__ ((packed)); - unsigned int sign:1 __attribute__ ((packed)); - } - bits_little_endian; -#endif -} -FLO_union_type; - - -/* end of header */ - -/* IEEE "special" number predicates */ - -#ifdef NO_NANS - -#define nan() 0 -#define isnan(x) 0 -#define isinf(x) 0 -#else - -INLINE -static fp_number_type * -nan () -{ - static fp_number_type thenan; - - return &thenan; -} - -INLINE -static int -isnan ( fp_number_type * x) -{ - return x->class == CLASS_SNAN || x->class == CLASS_QNAN; -} - -INLINE -static int -isinf ( fp_number_type * x) -{ - return x->class == CLASS_INFINITY; -} - -#endif - -INLINE -static int -iszero ( fp_number_type * x) -{ - return x->class == CLASS_ZERO; -} - -INLINE -static void -flip_sign ( fp_number_type * x) -{ - x->sign = !x->sign; -} - -extern FLO_type pack_d ( fp_number_type * ); - -#if defined(L_pack_df) || defined(L_pack_sf) -FLO_type -pack_d ( fp_number_type * src) -{ - FLO_union_type dst; - fractype fraction = src->fraction.ll; /* wasn't unsigned before? */ - int sign = src->sign; - int exp = 0; - - if (isnan (src)) - { - exp = EXPMAX; - if (src->class == CLASS_QNAN || 1) - { - fraction |= QUIET_NAN; - } - } - else if (isinf (src)) - { - exp = EXPMAX; - fraction = 0; - } - else if (iszero (src)) - { - exp = 0; - fraction = 0; - } - else if (fraction == 0) - { - exp = 0; - } - else - { - if (src->normal_exp < NORMAL_EXPMIN) - { - /* This number's exponent is too low to fit into the bits - available in the number, so we'll store 0 in the exponent and - shift the fraction to the right to make up for it. */ - - int shift = NORMAL_EXPMIN - src->normal_exp; - - exp = 0; - - if (shift > FRAC_NBITS - NGARDS) - { - /* No point shifting, since it's more that 64 out. */ - fraction = 0; - } - else - { - /* Shift by the value */ - fraction >>= shift; - } - fraction >>= NGARDS; - } - else if (src->normal_exp > EXPBIAS) - { - exp = EXPMAX; - fraction = 0; - } - else - { - exp = src->normal_exp + EXPBIAS; - /* IF the gard bits are the all zero, but the first, then we're - half way between two numbers, choose the one which makes the - lsb of the answer 0. */ - if ((fraction & GARDMASK) == GARDMSB) - { - if (fraction & (1 << NGARDS)) - fraction += GARDROUND + 1; - } - else - { - /* Add a one to the guards to round up */ - fraction += GARDROUND; - } - if (fraction >= IMPLICIT_2) - { - fraction >>= 1; - exp += 1; - } - fraction >>= NGARDS; - } - } - - /* We previously used bitfields to store the number, but this doesn't - handle little/big endian systems conveniently, so use shifts and - masks */ -#ifdef FLOAT_BIT_ORDER_MISMATCH - dst.bits.fraction = fraction; - dst.bits.exp = exp; - dst.bits.sign = sign; -#else - dst.value_raw = fraction & ((((fractype)1) << FRACBITS) - (fractype)1); - dst.value_raw |= ((fractype) (exp & ((1 << EXPBITS) - 1))) << FRACBITS; - dst.value_raw |= ((fractype) (sign & 1)) << (FRACBITS | EXPBITS); -#endif - -#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT) - { - halffractype tmp = dst.words[0]; - dst.words[0] = dst.words[1]; - dst.words[1] = tmp; - } -#endif - - return dst.value; -} -#endif - -extern void unpack_d (FLO_union_type *, fp_number_type *); - -#if defined(L_unpack_df) || defined(L_unpack_sf) -void -unpack_d (FLO_union_type * src, fp_number_type * dst) -{ - /* We previously used bitfields to store the number, but this doesn't - handle little/big endian systems conveniently, so use shifts and - masks */ - fractype fraction; - int exp; - int sign; - -#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT) - FLO_union_type swapped; - - swapped.words[0] = src->words[1]; - swapped.words[1] = src->words[0]; - src = &swapped; -#endif - -#ifdef FLOAT_BIT_ORDER_MISMATCH - fraction = src->bits.fraction; - exp = src->bits.exp; - sign = src->bits.sign; -#else - fraction = src->value_raw & ((((fractype)1) << FRACBITS) - (fractype)1); - exp = ((int)(src->value_raw >> FRACBITS)) & ((1 << EXPBITS) - 1); - sign = ((int)(src->value_raw >> (FRACBITS + EXPBITS))) & 1; -#endif - - dst->sign = sign; - if (exp == 0) - { - /* Hmm. Looks like 0 */ - if (fraction == 0) - { - /* tastes like zero */ - dst->class = CLASS_ZERO; - } - else - { - /* Zero exponent with non zero fraction - it's denormalized, - so there isn't a leading implicit one - we'll shift it so - it gets one. */ - dst->normal_exp = exp - EXPBIAS + 1; - fraction <<= NGARDS; - - dst->class = CLASS_NUMBER; -#if 1 - while (fraction < IMPLICIT_1) - { - fraction <<= 1; - dst->normal_exp--; - } -#endif - dst->fraction.ll = fraction; - } - } - else if (exp == EXPMAX) - { - /* Huge exponent*/ - if (fraction == 0) - { - /* Attached to a zero fraction - means infinity */ - dst->class = CLASS_INFINITY; - } - else - { - /* Non zero fraction, means nan */ - if (fraction & QUIET_NAN) - { - dst->class = CLASS_QNAN; - } - else - { - dst->class = CLASS_SNAN; - } - /* Keep the fraction part as the nan number */ - dst->fraction.ll = fraction; - } - } - else - { - /* Nothing strange about this number */ - dst->normal_exp = exp - EXPBIAS; - dst->class = CLASS_NUMBER; - dst->fraction.ll = (fraction << NGARDS) | IMPLICIT_1; - } -} -#endif - -#if defined(L_addsub_sf) || defined(L_addsub_df) -static fp_number_type * -_fpadd_parts (fp_number_type * a, - fp_number_type * b, - fp_number_type * tmp) -{ - intfrac tfraction; - - /* Put commonly used fields in local variables. */ - int a_normal_exp; - int b_normal_exp; - fractype a_fraction; - fractype b_fraction; - - if (isnan (a)) - { - return a; - } - if (isnan (b)) - { - return b; - } - if (isinf (a)) - { - /* Adding infinities with opposite signs yields a NaN. */ - if (isinf (b) && a->sign != b->sign) - return nan (); - return a; - } - if (isinf (b)) - { - return b; - } - if (iszero (b)) - { - if (iszero (a)) - { - *tmp = *a; - tmp->sign = a->sign & b->sign; - return tmp; - } - return a; - } - if (iszero (a)) - { - return b; - } - - /* Got two numbers. shift the smaller and increment the exponent till - they're the same */ - { - int diff; - - a_normal_exp = a->normal_exp; - b_normal_exp = b->normal_exp; - a_fraction = a->fraction.ll; - b_fraction = b->fraction.ll; - - diff = a_normal_exp - b_normal_exp; - - if (diff < 0) - diff = -diff; - if (diff < FRAC_NBITS) - { - /* ??? This does shifts one bit at a time. Optimize. */ - while (a_normal_exp > b_normal_exp) - { - b_normal_exp++; - LSHIFT (b_fraction); - } - while (b_normal_exp > a_normal_exp) - { - a_normal_exp++; - LSHIFT (a_fraction); - } - } - else - { - /* Somethings's up.. choose the biggest */ - if (a_normal_exp > b_normal_exp) - { - b_normal_exp = a_normal_exp; - b_fraction = 0; - } - else - { - a_normal_exp = b_normal_exp; - a_fraction = 0; - } - } - } - - if (a->sign != b->sign) - { - if (a->sign) - { - tfraction = -a_fraction + b_fraction; - } - else - { - tfraction = a_fraction - b_fraction; - } - if (tfraction >= 0) - { - tmp->sign = 0; - tmp->normal_exp = a_normal_exp; - tmp->fraction.ll = tfraction; - } - else - { - tmp->sign = 1; - tmp->normal_exp = a_normal_exp; - tmp->fraction.ll = -tfraction; - } - /* and renormalize it */ - - while (tmp->fraction.ll < IMPLICIT_1 && tmp->fraction.ll) - { - tmp->fraction.ll <<= 1; - tmp->normal_exp--; - } - } - else - { - tmp->sign = a->sign; - tmp->normal_exp = a_normal_exp; - tmp->fraction.ll = a_fraction + b_fraction; - } - tmp->class = CLASS_NUMBER; - /* Now the fraction is added, we have to shift down to renormalize the - number */ - - if (tmp->fraction.ll >= IMPLICIT_2) - { - LSHIFT (tmp->fraction.ll); - tmp->normal_exp++; - } - return tmp; - -} - -FLO_type -add (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - fp_number_type tmp; - fp_number_type *res; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - res = _fpadd_parts (&a, &b, &tmp); - - return pack_d (res); -} - -FLO_type -sub (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - fp_number_type tmp; - fp_number_type *res; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - b.sign ^= 1; - - res = _fpadd_parts (&a, &b, &tmp); - - return pack_d (res); -} -#endif - -#if defined(L_mul_sf) || defined(L_mul_df) -static INLINE fp_number_type * -_fpmul_parts ( fp_number_type * a, - fp_number_type * b, - fp_number_type * tmp) -{ - fractype low = 0; - fractype high = 0; - - if (isnan (a)) - { - a->sign = a->sign != b->sign; - return a; - } - if (isnan (b)) - { - b->sign = a->sign != b->sign; - return b; - } - if (isinf (a)) - { - if (iszero (b)) - return nan (); - a->sign = a->sign != b->sign; - return a; - } - if (isinf (b)) - { - if (iszero (a)) - { - return nan (); - } - b->sign = a->sign != b->sign; - return b; - } - if (iszero (a)) - { - a->sign = a->sign != b->sign; - return a; - } - if (iszero (b)) - { - b->sign = a->sign != b->sign; - return b; - } - - /* Calculate the mantissa by multiplying both 64bit numbers to get a - 128 bit number */ - { -#if defined(NO_DI_MODE) - { - fractype x = a->fraction.ll; - fractype ylow = b->fraction.ll; - fractype yhigh = 0; - int bit; - - /* ??? This does multiplies one bit at a time. Optimize. */ - for (bit = 0; bit < FRAC_NBITS; bit++) - { - int carry; - - if (x & 1) - { - carry = (low += ylow) < ylow; - high += yhigh + carry; - } - yhigh <<= 1; - if (ylow & FRACHIGH) - { - yhigh |= 1; - } - ylow <<= 1; - x >>= 1; - } - } -#elif defined(FLOAT) - { - /* Multiplying two 32 bit numbers to get a 64 bit number on - a machine with DI, so we're safe */ - - DItype answer = (DItype)(a->fraction.ll) * (DItype)(b->fraction.ll); - - high = answer >> 32; - low = answer; - } -#else - /* Doing a 64*64 to 128 */ - { - UDItype nl = a->fraction.ll & 0xffffffff; - UDItype nh = a->fraction.ll >> 32; - UDItype ml = b->fraction.ll & 0xffffffff; - UDItype mh = b->fraction.ll >>32; - UDItype pp_ll = ml * nl; - UDItype pp_hl = mh * nl; - UDItype pp_lh = ml * nh; - UDItype pp_hh = mh * nh; - UDItype res2 = 0; - UDItype res0 = 0; - UDItype ps_hh__ = pp_hl + pp_lh; - if (ps_hh__ < pp_hl) - res2 += 0x100000000LL; - pp_hl = (ps_hh__ << 32) & 0xffffffff00000000LL; - res0 = pp_ll + pp_hl; - if (res0 < pp_ll) - res2++; - res2 += ((ps_hh__ >> 32) & 0xffffffffL) + pp_hh; - high = res2; - low = res0; - } -#endif - } - - tmp->normal_exp = a->normal_exp + b->normal_exp; - tmp->sign = a->sign != b->sign; -#ifdef FLOAT - tmp->normal_exp += 2; /* ??????????????? */ -#else - tmp->normal_exp += 4; /* ??????????????? */ -#endif - while (high >= IMPLICIT_2) - { - tmp->normal_exp++; - if (high & 1) - { - low >>= 1; - low |= FRACHIGH; - } - high >>= 1; - } - while (high < IMPLICIT_1) - { - tmp->normal_exp--; - - high <<= 1; - if (low & FRACHIGH) - high |= 1; - low <<= 1; - } - /* rounding is tricky. if we only round if it won't make us round later. */ -#if 0 - if (low & FRACHIGH2) - { - if (((high & GARDMASK) != GARDMSB) - && (((high + 1) & GARDMASK) == GARDMSB)) - { - /* don't round, it gets done again later. */ - } - else - { - high++; - } - } -#endif - if ((high & GARDMASK) == GARDMSB) - { - if (high & (1 << NGARDS)) - { - /* half way, so round to even */ - high += GARDROUND + 1; - } - else if (low) - { - /* but we really weren't half way */ - high += GARDROUND + 1; - } - } - tmp->fraction.ll = high; - tmp->class = CLASS_NUMBER; - return tmp; -} - -FLO_type -multiply (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - fp_number_type tmp; - fp_number_type *res; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - res = _fpmul_parts (&a, &b, &tmp); - - return pack_d (res); -} -#endif - -#if defined(L_div_sf) || defined(L_div_df) -static INLINE fp_number_type * -_fpdiv_parts (fp_number_type * a, - fp_number_type * b) -{ - fractype bit; - fractype numerator; - fractype denominator; - fractype quotient; - - if (isnan (a)) - { - return a; - } - if (isnan (b)) - { - return b; - } - - a->sign = a->sign ^ b->sign; - - if (isinf (a) || iszero (a)) - { - if (a->class == b->class) - return nan (); - return a; - } - - if (isinf (b)) - { - a->fraction.ll = 0; - a->normal_exp = 0; - return a; - } - if (iszero (b)) - { - a->class = CLASS_INFINITY; - return a; - } - - /* Calculate the mantissa by multiplying both 64bit numbers to get a - 128 bit number */ - { - /* quotient = - ( numerator / denominator) * 2^(numerator exponent - denominator exponent) - */ - - a->normal_exp = a->normal_exp - b->normal_exp; - numerator = a->fraction.ll; - denominator = b->fraction.ll; - - if (numerator < denominator) - { - /* Fraction will be less than 1.0 */ - numerator *= 2; - a->normal_exp--; - } - bit = IMPLICIT_1; - quotient = 0; - /* ??? Does divide one bit at a time. Optimize. */ - while (bit) - { - if (numerator >= denominator) - { - quotient |= bit; - numerator -= denominator; - } - bit >>= 1; - numerator *= 2; - } - - if ((quotient & GARDMASK) == GARDMSB) - { - if (quotient & (1 << NGARDS)) - { - /* half way, so round to even */ - quotient += GARDROUND + 1; - } - else if (numerator) - { - /* but we really weren't half way, more bits exist */ - quotient += GARDROUND + 1; - } - } - - a->fraction.ll = quotient; - return (a); - } -} - -FLO_type -divide (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - fp_number_type *res; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - res = _fpdiv_parts (&a, &b); - - return pack_d (res); -} -#endif - -int __fpcmp_parts (fp_number_type * a, fp_number_type *b); - -#if defined(L_fpcmp_parts_sf) || defined(L_fpcmp_parts_df) -/* according to the demo, fpcmp returns a comparison with 0... thus - a -1 - a==b -> 0 - a>b -> +1 - */ - -int -__fpcmp_parts (fp_number_type * a, fp_number_type * b) -{ -#if 0 - /* either nan -> unordered. Must be checked outside of this routine. */ - if (isnan (a) && isnan (b)) - { - return 1; /* still unordered! */ - } -#endif - - if (isnan (a) || isnan (b)) - { - return 1; /* how to indicate unordered compare? */ - } - if (isinf (a) && isinf (b)) - { - /* +inf > -inf, but +inf != +inf */ - /* b \a| +inf(0)| -inf(1) - ______\+--------+-------- - +inf(0)| a==b(0)| ab(1) | a==b(0) - -------+--------+-------- - So since unordered must be non zero, just line up the columns... - */ - return b->sign - a->sign; - } - /* but not both... */ - if (isinf (a)) - { - return a->sign ? -1 : 1; - } - if (isinf (b)) - { - return b->sign ? 1 : -1; - } - if (iszero (a) && iszero (b)) - { - return 0; - } - if (iszero (a)) - { - return b->sign ? 1 : -1; - } - if (iszero (b)) - { - return a->sign ? -1 : 1; - } - /* now both are "normal". */ - if (a->sign != b->sign) - { - /* opposite signs */ - return a->sign ? -1 : 1; - } - /* same sign; exponents? */ - if (a->normal_exp > b->normal_exp) - { - return a->sign ? -1 : 1; - } - if (a->normal_exp < b->normal_exp) - { - return a->sign ? 1 : -1; - } - /* same exponents; check size. */ - if (a->fraction.ll > b->fraction.ll) - { - return a->sign ? -1 : 1; - } - if (a->fraction.ll < b->fraction.ll) - { - return a->sign ? 1 : -1; - } - /* after all that, they're equal. */ - return 0; -} -#endif - -#if defined(L_compare_sf) || defined(L_compare_df) -CMPtype -compare (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - return __fpcmp_parts (&a, &b); -} -#endif - -#ifndef US_SOFTWARE_GOFAST - -/* These should be optimized for their specific tasks someday. */ - -#if defined(L_eq_sf) || defined(L_eq_df) -CMPtype -_eq_f2 (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - if (isnan (&a) || isnan (&b)) - return 1; /* false, truth == 0 */ - - return __fpcmp_parts (&a, &b) ; -} -#endif - -#if defined(L_ne_sf) || defined(L_ne_df) -CMPtype -_ne_f2 (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - if (isnan (&a) || isnan (&b)) - return 1; /* true, truth != 0 */ - - return __fpcmp_parts (&a, &b) ; -} -#endif - -#if defined(L_gt_sf) || defined(L_gt_df) -CMPtype -_gt_f2 (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - if (isnan (&a) || isnan (&b)) - return -1; /* false, truth > 0 */ - - return __fpcmp_parts (&a, &b); -} -#endif - -#if defined(L_ge_sf) || defined(L_ge_df) -CMPtype -_ge_f2 (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - if (isnan (&a) || isnan (&b)) - return -1; /* false, truth >= 0 */ - return __fpcmp_parts (&a, &b) ; -} -#endif - -#if defined(L_lt_sf) || defined(L_lt_df) -CMPtype -_lt_f2 (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - if (isnan (&a) || isnan (&b)) - return 1; /* false, truth < 0 */ - - return __fpcmp_parts (&a, &b); -} -#endif - -#if defined(L_le_sf) || defined(L_le_df) -CMPtype -_le_f2 (FLO_type arg_a, FLO_type arg_b) -{ - fp_number_type a; - fp_number_type b; - - unpack_d ((FLO_union_type *) & arg_a, &a); - unpack_d ((FLO_union_type *) & arg_b, &b); - - if (isnan (&a) || isnan (&b)) - return 1; /* false, truth <= 0 */ - - return __fpcmp_parts (&a, &b) ; -} -#endif - -#endif /* ! US_SOFTWARE_GOFAST */ - -#if defined(L_si_to_sf) || defined(L_si_to_df) -FLO_type -si_to_float (SItype arg_a) -{ - fp_number_type in; - - in.class = CLASS_NUMBER; - in.sign = arg_a < 0; - if (!arg_a) - { - in.class = CLASS_ZERO; - } - else - { - in.normal_exp = FRACBITS + NGARDS; - if (in.sign) - { - /* Special case for minint, since there is no +ve integer - representation for it */ - if (arg_a == (SItype) 0x80000000) - { - return -2147483648.0; - } - in.fraction.ll = (-arg_a); - } - else - in.fraction.ll = arg_a; - - while (in.fraction.ll < (1LL << (FRACBITS + NGARDS))) - { - in.fraction.ll <<= 1; - in.normal_exp -= 1; - } - } - return pack_d (&in); -} -#endif - -#if defined(L_sf_to_si) || defined(L_df_to_si) -SItype -float_to_si (FLO_type arg_a) -{ - fp_number_type a; - SItype tmp; - - unpack_d ((FLO_union_type *) & arg_a, &a); - if (iszero (&a)) - return 0; - if (isnan (&a)) - return 0; - /* get reasonable MAX_SI_INT... */ - if (isinf (&a)) - return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT; - /* it is a number, but a small one */ - if (a.normal_exp < 0) - return 0; - if (a.normal_exp > 30) - return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT; - tmp = a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp); - return a.sign ? (-tmp) : (tmp); -} -#endif - -#if defined(L_sf_to_usi) || defined(L_df_to_usi) -#ifdef US_SOFTWARE_GOFAST -/* While libgcc2.c defines its own __fixunssfsi and __fixunsdfsi routines, - we also define them for GOFAST because the ones in libgcc2.c have the - wrong names and I'd rather define these here and keep GOFAST CYG-LOC's - out of libgcc2.c. We can't define these here if not GOFAST because then - there'd be duplicate copies. */ - -USItype -float_to_usi (FLO_type arg_a) -{ - fp_number_type a; - - unpack_d ((FLO_union_type *) & arg_a, &a); - if (iszero (&a)) - return 0; - if (isnan (&a)) - return 0; - /* it is a negative number */ - if (a.sign) - return 0; - /* get reasonable MAX_USI_INT... */ - if (isinf (&a)) - return MAX_USI_INT; - /* it is a number, but a small one */ - if (a.normal_exp < 0) - return 0; - if (a.normal_exp > 31) - return MAX_USI_INT; - else if (a.normal_exp > (FRACBITS + NGARDS)) - return a.fraction.ll << (a.normal_exp - (FRACBITS + NGARDS)); - else - return a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp); -} -#endif -#endif - -#if defined(L_negate_sf) || defined(L_negate_df) -FLO_type -negate (FLO_type arg_a) -{ - fp_number_type a; - - unpack_d ((FLO_union_type *) & arg_a, &a); - flip_sign (&a); - return pack_d (&a); -} -#endif - -#ifdef FLOAT - -#if defined(L_make_sf) -SFtype -__make_fp(fp_class_type class, - unsigned int sign, - int exp, - USItype frac) -{ - fp_number_type in; - - in.class = class; - in.sign = sign; - in.normal_exp = exp; - in.fraction.ll = frac; - return pack_d (&in); -} -#endif - -#ifndef FLOAT_ONLY - -/* This enables one to build an fp library that supports float but not double. - Otherwise, we would get an undefined reference to __make_dp. - This is needed for some 8-bit ports that can't handle well values that - are 8-bytes in size, so we just don't support double for them at all. */ - -extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype frac); - -#if defined(L_sf_to_df) -DFtype -sf_to_df (SFtype arg_a) -{ - fp_number_type in; - - unpack_d ((FLO_union_type *) & arg_a, &in); - return __make_dp (in.class, in.sign, in.normal_exp, - ((UDItype) in.fraction.ll) << F_D_BITOFF); -} -#endif - -#endif -#endif - -#ifndef FLOAT - -extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype); - -#if defined(L_make_df) -DFtype -__make_dp (fp_class_type class, unsigned int sign, int exp, UDItype frac) -{ - fp_number_type in; - - in.class = class; - in.sign = sign; - in.normal_exp = exp; - in.fraction.ll = frac; - return pack_d (&in); -} -#endif - -#if defined(L_df_to_sf) -SFtype -df_to_sf (DFtype arg_a) -{ - fp_number_type in; - USItype sffrac; - - unpack_d ((FLO_union_type *) & arg_a, &in); - - sffrac = in.fraction.ll >> F_D_BITOFF; - - /* We set the lowest guard bit in SFFRAC if we discarded any non - zero bits. */ - if ((in.fraction.ll & (((USItype) 1 << F_D_BITOFF) - 1)) != 0) - sffrac |= 1; - - return __make_fp (in.class, in.sign, in.normal_exp, sffrac); -} -#endif - -#endif -#endif /* !EXTENDED_FLOAT_STUBS */ diff --git a/gcc/fp-test.c b/gcc/fp-test.c deleted file mode 100755 index 667059c..0000000 --- a/gcc/fp-test.c +++ /dev/null @@ -1,231 +0,0 @@ -/* fp-test.c - Check that all floating-point operations are available. - Copyright (C) 1995 Free Software Foundation, Inc. - Contributed by Ronald F. Guilmette . - -This file is part of GNU CC. - -GNU CC is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2, or (at your option) -any later version. - -GNU CC is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with GNU CC; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -/* This is a trivial test program which may be useful to people who are - porting the GCC or G++ compilers to a new system. The intent here is - merely to check that all floating-point operations have been provided - by the port. (Note that I say ``provided'' rather than ``implemented''.) - - To use this file, simply compile it (with GCC or G++) and then try to - link it in the normal way (also using GCC or G++ respectively). If - all of the floating -point operations (including conversions) have - been provided, then this file will link without incident. If however - one or more of the primitive floating-point operations have not been - properly provided, you will get link-time errors indicating which - floating-point operations are unavailable. - - This file will typically be used when porting the GNU compilers to - some system which lacks floating-point hardware, and for which - software emulation routines (for FP ops) are needed in order to - complete the port. */ - -#if 0 -#include -#endif - -extern double acos (double); -extern double asin (double); -extern double atan (double); -extern double atan2 (double, double); -extern double cos (double); -extern double sin (double); -extern double tan (double); -extern double cosh (double); -extern double sinh (double); -extern double tanh (double); -extern double exp (double); -extern double frexp (double, int *); -extern double ldexp (double, int); -extern double log (double); -extern double log10 (double); -extern double modf (double, double *); -extern double pow (double, double); -extern double sqrt (double); -extern double ceil (double); -extern double fabs (double); -extern double floor (double); -extern double fmod (double, double); - -int i1, i2 = 2; - -volatile signed char sc; -volatile unsigned char uc; - -volatile signed short ss; -volatile unsigned short us; - -volatile signed int si; -volatile unsigned int ui; - -volatile signed long sl; -volatile unsigned long ul; - -volatile float f1 = 1.0, f2 = 1.0, f3 = 1.0; -volatile double d1 = 1.0, d2 = 1.0, d3 = 1.0; -volatile long double D1 = 1.0, D2 = 1.0, D3 = 1.0; - -int -main () -{ - /* TYPE: float */ - - f1 = -f2; - f1 = f2 + f3; - f1 = f2 - f3; - f1 = f2 * f3; - f1 = f2 / f3; - f1 += f2; - f1 -= f2; - f1 *= f2; - f1 /= f2; - - si = f1 == f2; - si = f1 != f2; - si = f1 > f2; - si = f1 < f2; - si = f1 >= f2; - si = f1 <= f2; - - sc = f1; - uc = f1; - ss = f1; - us = f1; - si = f1; - ui = f1; - sl = f1; - ul = f1; - d1 = f1; - D1 = f1; - - f1 = sc; - f1 = uc; - f1 = ss; - f1 = us; - f1 = si; - f1 = ui; - f1 = sl; - f1 = ul; - f1 = d1; - f1 = D1; - - d1 = -d2; - d1 = d2 + d3; - d1 = d2 - d3; - d1 = d2 * d3; - d1 = d2 / d3; - d1 += d2; - d1 -= d2; - d1 *= d2; - d1 /= d2; - - si = d1 == d2; - si = d1 != d2; - si = d1 > d2; - si = d1 < d2; - si = d1 >= d2; - si = d1 <= d2; - - sc = d1; - uc = d1; - ss = d1; - us = d1; - si = d1; - ui = d1; - sl = d1; - ul = d1; - f1 = d1; - D1 = d1; - - d1 = sc; - d1 = uc; - d1 = ss; - d1 = us; - d1 = si; - d1 = ui; - d1 = sl; - d1 = ul; - d1 = f1; - d1 = D1; - - D1 = -D2; - D1 = D2 + D3; - D1 = D2 - D3; - D1 = D2 * D3; - D1 = D2 / D3; - D1 += D2; - D1 -= D2; - D1 *= D2; - D1 /= D2; - - si = D1 == D2; - si = D1 != D2; - si = D1 > D2; - si = D1 < D2; - si = D1 >= D2; - si = D1 <= D2; - - sc = D1; - uc = D1; - ss = D1; - us = D1; - si = D1; - ui = D1; - sl = D1; - ul = D1; - f1 = D1; - d1 = D1; - - D1 = sc; - D1 = uc; - D1 = ss; - D1 = us; - D1 = si; - D1 = ui; - D1 = sl; - D1 = ul; - D1 = f1; - D1 = d1; - - d1 = acos (d2); - d1 = asin (d2); - d1 = atan (d2); - d1 = atan2 (d2, d3); - d1 = cos (d2); - d1 = sin (d2); - d1 = tan (d2); - d1 = cosh (d2); - d1 = sinh (d2); - d1 = tanh (d2); - d1 = exp (d2); - d1 = frexp (d2, &i1); - d1 = ldexp (d2, i2); - d1 = log (d2); - d1 = log10 (d2); - d1 = modf (d2, &d3); - d1 = pow (d2, d3); - d1 = sqrt (d2); - d1 = ceil (d2); - d1 = fabs (d2); - d1 = floor (d2); - d1 = fmod (d2, d3); - - return 0; -} diff --git a/gcc/libgcc1-test.c b/gcc/libgcc1-test.c deleted file mode 100755 index 0f59cbe..0000000 --- a/gcc/libgcc1-test.c +++ /dev/null @@ -1,117 +0,0 @@ -/* This small function uses all the arithmetic operators that - libgcc1.c can handle. If you can link it, then - you have provided replacements for all the libgcc1.c functions that - your target machine needs. */ - -int foo (); -double dfoo (); - -/* We don't want __main here because that can drag in atexit (among other - things) which won't necessarily exist yet. */ - -main_without__main () -{ - int a = foo (), b = foo (); - unsigned int au = foo (), bu = foo (); - float af = dfoo (), bf = dfoo (); - double ad = dfoo (), bd = dfoo (); - - discard (a * b); - discard (a / b); - discard (a % b); - - discard (au / bu); - discard (au % bu); - - discard (a >> b); - discard (a << b); - - discard (au >> bu); - discard (au << bu); - - ddiscard (ad + bd); - ddiscard (ad - bd); - ddiscard (ad * bd); - ddiscard (ad / bd); - ddiscard (-ad); - - ddiscard (af + bf); - ddiscard (af - bf); - ddiscard (af * bf); - ddiscard (af / bf); - ddiscard (-af); - - discard ((int) ad); - discard ((int) af); - - ddiscard ((double) a); - ddiscard ((float) a); - ddiscard ((float) ad); - - discard (ad == bd); - discard (ad < bd); - discard (ad > bd); - discard (ad != bd); - discard (ad <= bd); - discard (ad >= bd); - - discard (af == bf); - discard (af < bf); - discard (af > bf); - discard (af != bf); - discard (af <= bf); - discard (af >= bf); - - return 0; -} - -discard (x) - int x; -{} - -ddiscard (x) - double x; -{} - -foo () -{ - static int table[] = {20, 69, 4, 12}; - static int idx; - - return table[idx++]; -} - -double -dfoo () -{ - static double table[] = {20.4, 69.96, 4.4, 202.202}; - static int idx; - - return table[idx++]; -} - -/* Provide functions that some versions of the linker use to default - the start address if -e symbol is not used, to avoid the warning - message saying the start address is defaulted. */ -extern void start() __asm__("start"); -extern void _start() __asm__("_start"); -extern void __start() __asm__("__start"); - -/* Provide functions that might be needed by soft-float emulation routines. */ -void memcpy() {} - -void start() {} -void _start() {} -void __start() {} -void mainCRTStartup() {} - -/* CYGNUS LOCAL - duplicate definition of memcpy() removed. */ - -/* CYGNUS LOCAL v850 */ -#if defined __v850e__ || defined __v850ea__ -/* We need to use the symbol __ctbp in order to force the linker to define it. */ -extern int _ctbp; - -void _func() { _ctbp = 1; } -#endif -/* END CYGNUS LOCAL */ diff --git a/gcc/libgcc1.c b/gcc/libgcc1.c deleted file mode 100755 index bece500..0000000 --- a/gcc/libgcc1.c +++ /dev/null @@ -1,596 +0,0 @@ -/* Subroutines needed by GCC output code on some machines. */ -/* Compile this file with the Unix C compiler! */ -/* Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc. - -This file is free software; you can redistribute it and/or modify it -under the terms of the GNU General Public License as published by the -Free Software Foundation; either version 2, or (at your option) any -later version. - -In addition to the permissions in the GNU General Public License, the -Free Software Foundation gives you unlimited permission to link the -compiled version of this file with other programs, and to distribute -those programs without any restriction coming from the use of this -file. (The General Public License restrictions do apply in other -respects; for example, they cover modification of the file, and -distribution when not linked into another program.) - -This file is distributed in the hope that it will be useful, but -WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -/* As a special exception, if you link this library with other files, - some of which are compiled with GCC, to produce an executable, - this library does not by itself cause the resulting executable - to be covered by the GNU General Public License. - This exception does not however invalidate any other reasons why - the executable file might be covered by the GNU General Public License. */ - -#include "config.h" - -/* Don't use `fancy_abort' here even if config.h says to use it. */ -#ifdef abort -#undef abort -#endif - -/* On some machines, cc is really GCC. For these machines, we can't - expect these functions to be properly compiled unless GCC open codes - the operation (which is precisely when the function won't be used). - So allow tm.h to specify ways of accomplishing the operations - by defining the macros perform_*. - - On a machine where cc is some other compiler, there is usually no - reason to define perform_*. The other compiler normally has other ways - of implementing all of these operations. - - In some cases a certain machine may come with GCC installed as cc - or may have some other compiler. Then it may make sense for tm.h - to define perform_* only if __GNUC__ is defined. */ - -#ifndef perform_mulsi3 -#define perform_mulsi3(a, b) return a * b -#endif - -#ifndef perform_divsi3 -#define perform_divsi3(a, b) return a / b -#endif - -#ifndef perform_udivsi3 -#define perform_udivsi3(a, b) return a / b -#endif - -#ifndef perform_modsi3 -#define perform_modsi3(a, b) return a % b -#endif - -#ifndef perform_umodsi3 -#define perform_umodsi3(a, b) return a % b -#endif - -#ifndef perform_lshrsi3 -#define perform_lshrsi3(a, b) return a >> b -#endif - -#ifndef perform_ashrsi3 -#define perform_ashrsi3(a, b) return a >> b -#endif - -#ifndef perform_ashlsi3 -#define perform_ashlsi3(a, b) return a << b -#endif - -#ifndef perform_adddf3 -#define perform_adddf3(a, b) return a + b -#endif - -#ifndef perform_subdf3 -#define perform_subdf3(a, b) return a - b -#endif - -#ifndef perform_muldf3 -#define perform_muldf3(a, b) return a * b -#endif - -#ifndef perform_divdf3 -#define perform_divdf3(a, b) return a / b -#endif - -#ifndef perform_addsf3 -#define perform_addsf3(a, b) return INTIFY (a + b) -#endif - -#ifndef perform_subsf3 -#define perform_subsf3(a, b) return INTIFY (a - b) -#endif - -#ifndef perform_mulsf3 -#define perform_mulsf3(a, b) return INTIFY (a * b) -#endif - -#ifndef perform_divsf3 -#define perform_divsf3(a, b) return INTIFY (a / b) -#endif - -#ifndef perform_negdf2 -#define perform_negdf2(a) return -a -#endif - -#ifndef perform_negsf2 -#define perform_negsf2(a) return INTIFY (-a) -#endif - -#ifndef perform_fixdfsi -#define perform_fixdfsi(a) return (nongcc_SI_type) a; -#endif - -#ifndef perform_fixsfsi -#define perform_fixsfsi(a) return (nongcc_SI_type) a -#endif - -#ifndef perform_floatsidf -#define perform_floatsidf(a) return (double) a -#endif - -#ifndef perform_floatsisf -#define perform_floatsisf(a) return INTIFY ((float) a) -#endif - -#ifndef perform_extendsfdf2 -#define perform_extendsfdf2(a) return a -#endif - -#ifndef perform_truncdfsf2 -#define perform_truncdfsf2(a) return INTIFY (a) -#endif - -/* Note that eqdf2 returns a value for "true" that is == 0, - nedf2 returns a value for "true" that is != 0, - gtdf2 returns a value for "true" that is > 0, - and so on. */ - -#ifndef perform_eqdf2 -#define perform_eqdf2(a, b) return !(a == b) -#endif - -#ifndef perform_nedf2 -#define perform_nedf2(a, b) return a != b -#endif - -#ifndef perform_gtdf2 -#define perform_gtdf2(a, b) return a > b -#endif - -#ifndef perform_gedf2 -#define perform_gedf2(a, b) return (a >= b) - 1 -#endif - -#ifndef perform_ltdf2 -#define perform_ltdf2(a, b) return -(a < b) -#endif - -#ifndef perform_ledf2 -#define perform_ledf2(a, b) return 1 - (a <= b) -#endif - -#ifndef perform_eqsf2 -#define perform_eqsf2(a, b) return !(a == b) -#endif - -#ifndef perform_nesf2 -#define perform_nesf2(a, b) return a != b -#endif - -#ifndef perform_gtsf2 -#define perform_gtsf2(a, b) return a > b -#endif - -#ifndef perform_gesf2 -#define perform_gesf2(a, b) return (a >= b) - 1 -#endif - -#ifndef perform_ltsf2 -#define perform_ltsf2(a, b) return -(a < b) -#endif - -#ifndef perform_lesf2 -#define perform_lesf2(a, b) return 1 - (a <= b); -#endif - -/* Define the C data type to use for an SImode value. */ - -#ifndef nongcc_SI_type -#define nongcc_SI_type long int -#endif - -/* Define the C data type to use for a value of word size */ -#ifndef nongcc_word_type -#define nongcc_word_type nongcc_SI_type -#endif - -/* Define the type to be used for returning an SF mode value - and the method for turning a float into that type. - These definitions work for machines where an SF value is - returned in the same register as an int. */ - -#ifndef FLOAT_VALUE_TYPE -#define FLOAT_VALUE_TYPE int -#endif - -#ifndef INTIFY -#define INTIFY(FLOATVAL) (intify.f = (FLOATVAL), intify.i) -#endif - -#ifndef FLOATIFY -#define FLOATIFY(INTVAL) ((INTVAL).f) -#endif - -#ifndef FLOAT_ARG_TYPE -#define FLOAT_ARG_TYPE union flt_or_int -#endif - -union flt_or_value { FLOAT_VALUE_TYPE i; float f; }; - -union flt_or_int { int i; float f; }; - - -#ifdef L_mulsi3 -nongcc_SI_type -__mulsi3 (a, b) - nongcc_SI_type a, b; -{ - perform_mulsi3 (a, b); -} -#endif - -#ifdef L_udivsi3 -nongcc_SI_type -__udivsi3 (a, b) - unsigned nongcc_SI_type a, b; -{ - perform_udivsi3 (a, b); -} -#endif - -#ifdef L_divsi3 -nongcc_SI_type -__divsi3 (a, b) - nongcc_SI_type a, b; -{ - perform_divsi3 (a, b); -} -#endif - -#ifdef L_umodsi3 -nongcc_SI_type -__umodsi3 (a, b) - unsigned nongcc_SI_type a, b; -{ - perform_umodsi3 (a, b); -} -#endif - -#ifdef L_modsi3 -nongcc_SI_type -__modsi3 (a, b) - nongcc_SI_type a, b; -{ - perform_modsi3 (a, b); -} -#endif - -#ifdef L_lshrsi3 -nongcc_SI_type -__lshrsi3 (a, b) - unsigned nongcc_SI_type a, b; -{ - perform_lshrsi3 (a, b); -} -#endif - -#ifdef L_ashrsi3 -nongcc_SI_type -__ashrsi3 (a, b) - nongcc_SI_type a, b; -{ - perform_ashrsi3 (a, b); -} -#endif - -#ifdef L_ashlsi3 -nongcc_SI_type -__ashlsi3 (a, b) - nongcc_SI_type a, b; -{ - perform_ashlsi3 (a, b); -} -#endif - -#ifdef L_divdf3 -double -__divdf3 (a, b) - double a, b; -{ - perform_divdf3 (a, b); -} -#endif - -#ifdef L_muldf3 -double -__muldf3 (a, b) - double a, b; -{ - perform_muldf3 (a, b); -} -#endif - -#ifdef L_negdf2 -double -__negdf2 (a) - double a; -{ - perform_negdf2 (a); -} -#endif - -#ifdef L_adddf3 -double -__adddf3 (a, b) - double a, b; -{ - perform_adddf3 (a, b); -} -#endif - -#ifdef L_subdf3 -double -__subdf3 (a, b) - double a, b; -{ - perform_subdf3 (a, b); -} -#endif - -/* Note that eqdf2 returns a value for "true" that is == 0, - nedf2 returns a value for "true" that is != 0, - gtdf2 returns a value for "true" that is > 0, - and so on. */ - -#ifdef L_eqdf2 -nongcc_word_type -__eqdf2 (a, b) - double a, b; -{ - /* Value == 0 iff a == b. */ - perform_eqdf2 (a, b); -} -#endif - -#ifdef L_nedf2 -nongcc_word_type -__nedf2 (a, b) - double a, b; -{ - /* Value != 0 iff a != b. */ - perform_nedf2 (a, b); -} -#endif - -#ifdef L_gtdf2 -nongcc_word_type -__gtdf2 (a, b) - double a, b; -{ - /* Value > 0 iff a > b. */ - perform_gtdf2 (a, b); -} -#endif - -#ifdef L_gedf2 -nongcc_word_type -__gedf2 (a, b) - double a, b; -{ - /* Value >= 0 iff a >= b. */ - perform_gedf2 (a, b); -} -#endif - -#ifdef L_ltdf2 -nongcc_word_type -__ltdf2 (a, b) - double a, b; -{ - /* Value < 0 iff a < b. */ - perform_ltdf2 (a, b); -} -#endif - -#ifdef L_ledf2 -nongcc_word_type -__ledf2 (a, b) - double a, b; -{ - /* Value <= 0 iff a <= b. */ - perform_ledf2 (a, b); -} -#endif - -#ifdef L_fixdfsi -nongcc_SI_type -__fixdfsi (a) - double a; -{ - perform_fixdfsi (a); -} -#endif - -#ifdef L_fixsfsi -nongcc_SI_type -__fixsfsi (a) - FLOAT_ARG_TYPE a; -{ - union flt_or_value intify; - perform_fixsfsi (FLOATIFY (a)); -} -#endif - -#ifdef L_floatsidf -double -__floatsidf (a) - nongcc_SI_type a; -{ - perform_floatsidf (a); -} -#endif - -#ifdef L_floatsisf -FLOAT_VALUE_TYPE -__floatsisf (a) - nongcc_SI_type a; -{ - union flt_or_value intify; - perform_floatsisf (a); -} -#endif - -#ifdef L_addsf3 -FLOAT_VALUE_TYPE -__addsf3 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_value intify; - perform_addsf3 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_negsf2 -FLOAT_VALUE_TYPE -__negsf2 (a) - FLOAT_ARG_TYPE a; -{ - union flt_or_value intify; - perform_negsf2 (FLOATIFY (a)); -} -#endif - -#ifdef L_subsf3 -FLOAT_VALUE_TYPE -__subsf3 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_value intify; - perform_subsf3 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_eqsf2 -nongcc_word_type -__eqsf2 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_int intify; - /* Value == 0 iff a == b. */ - perform_eqsf2 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_nesf2 -nongcc_word_type -__nesf2 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_int intify; - /* Value != 0 iff a != b. */ - perform_nesf2 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_gtsf2 -nongcc_word_type -__gtsf2 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_int intify; - /* Value > 0 iff a > b. */ - perform_gtsf2 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_gesf2 -nongcc_word_type -__gesf2 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_int intify; - /* Value >= 0 iff a >= b. */ - perform_gesf2 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_ltsf2 -nongcc_word_type -__ltsf2 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_int intify; - /* Value < 0 iff a < b. */ - perform_ltsf2 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_lesf2 -nongcc_word_type -__lesf2 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_int intify; - /* Value <= 0 iff a <= b. */ - perform_lesf2 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_mulsf3 -FLOAT_VALUE_TYPE -__mulsf3 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_value intify; - perform_mulsf3 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_divsf3 -FLOAT_VALUE_TYPE -__divsf3 (a, b) - FLOAT_ARG_TYPE a, b; -{ - union flt_or_value intify; - perform_divsf3 (FLOATIFY (a), FLOATIFY (b)); -} -#endif - -#ifdef L_truncdfsf2 -FLOAT_VALUE_TYPE -__truncdfsf2 (a) - double a; -{ - union flt_or_value intify; - perform_truncdfsf2 (a); -} -#endif - -#ifdef L_extendsfdf2 -double -__extendsfdf2 (a) - FLOAT_ARG_TYPE a; -{ - union flt_or_value intify; - perform_extendsfdf2 (FLOATIFY (a)); -} -#endif diff --git a/gcc/libgcc2.c b/gcc/libgcc2.c deleted file mode 100755 index cf7231f..0000000 --- a/gcc/libgcc2.c +++ /dev/null @@ -1,946 +0,0 @@ -/* More subroutines needed by GCC output code on some machines. */ -/* Compile this one with gcc. */ -/* Copyright (C) 1989, 92-97, 1998 Free Software Foundation, Inc. - -This file is part of GNU CC. - -GNU CC is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2, or (at your option) -any later version. - -GNU CC is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with GNU CC; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -/* As a special exception, if you link this library with other files, - some of which are compiled with GCC, to produce an executable, - this library does not by itself cause the resulting executable - to be covered by the GNU General Public License. - This exception does not however invalidate any other reasons why - the executable file might be covered by the GNU General Public License. */ - -#include - -/* Don't use `fancy_abort' here even if config.h says to use it. */ -#ifdef abort -#undef abort -#endif - -/* In the first part of this file, we are interfacing to calls generated - by the compiler itself. These calls pass values into these routines - which have very specific modes (rather than very specific types), and - these compiler-generated calls also expect any return values to have - very specific modes (rather than very specific types). Thus, we need - to avoid using regular C language type names in this part of the file - because the sizes for those types can be configured to be anything. - Instead we use the following special type names. */ - -typedef unsigned int UQItype __attribute__ ((mode (QI))); -typedef int SItype __attribute__ ((mode (SI))); -typedef unsigned int USItype __attribute__ ((mode (SI))); -typedef int DItype __attribute__ ((mode (DI))); -typedef unsigned int UDItype __attribute__ ((mode (DI))); - -typedef float SFtype __attribute__ ((mode (SF))); -typedef float DFtype __attribute__ ((mode (DF))); - -typedef int word_type __attribute__ ((mode (__word__))); - -/* Make sure that we don't accidentally use any normal C language built-in - type names in the first part of this file. Instead we want to use *only* - the type names defined above. The following macro definitions insure - that if we *do* accidentally use some normal C language built-in type name, - we will get a syntax error. */ - -#define char bogus_type -#define short bogus_type -#define int bogus_type -#define long bogus_type -#define unsigned bogus_type -#define float bogus_type -#define double bogus_type - -#define SI_TYPE_SIZE (sizeof (SItype) * 8) - -struct DIstruct {SItype low, high;}; - -/* We need this union to unpack/pack DImode values, since we don't have - any arithmetic yet. Incoming DImode parameters are stored into the - `ll' field, and the unpacked result is read from the struct `s'. */ - -typedef union -{ - struct DIstruct s; - DItype ll; -} DIunion; - -#if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\ - || defined (L_divdi3) || defined (L_udivdi3) \ - || defined (L_moddi3) || defined (L_umoddi3)) - -#include "longlong.h" - -#endif /* udiv or mul */ - -extern DItype __fixunssfdi (SFtype a); -extern DItype __fixunsdfdi (DFtype a); - -#if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3) -#if defined (L_divdi3) || defined (L_moddi3) -static inline -#endif -DItype -__negdi2 (DItype u) -{ - DIunion w; - DIunion uu; - - uu.ll = u; - - w.s.low = -uu.s.low; - w.s.high = -uu.s.high - ((USItype) w.s.low > 0); - - return w.ll; -} -#endif - -/* Unless shift functions are defined whith full ANSI prototypes, - parameter b will be promoted to int if word_type is smaller than an int. */ -#ifdef L_lshrdi3 -DItype -__lshrdi3 (DItype u, word_type b) -{ - DIunion w; - word_type bm; - DIunion uu; - - if (b == 0) - return u; - - uu.ll = u; - - bm = (sizeof (SItype) * 8) - b; - if (bm <= 0) - { - w.s.high = 0; - w.s.low = (USItype)uu.s.high >> -bm; - } - else - { - USItype carries = (USItype)uu.s.high << bm; - w.s.high = (USItype)uu.s.high >> b; - w.s.low = ((USItype)uu.s.low >> b) | carries; - } - - return w.ll; -} -#endif - -#ifdef L_ashldi3 -DItype -__ashldi3 (DItype u, word_type b) -{ - DIunion w; - word_type bm; - DIunion uu; - - if (b == 0) - return u; - - uu.ll = u; - - bm = (sizeof (SItype) * 8) - b; - if (bm <= 0) - { - w.s.low = 0; - w.s.high = (USItype)uu.s.low << -bm; - } - else - { - USItype carries = (USItype)uu.s.low >> bm; - w.s.low = (USItype)uu.s.low << b; - w.s.high = ((USItype)uu.s.high << b) | carries; - } - - return w.ll; -} -#endif - -#ifdef L_ashrdi3 -DItype -__ashrdi3 (DItype u, word_type b) -{ - DIunion w; - word_type bm; - DIunion uu; - - if (b == 0) - return u; - - uu.ll = u; - - bm = (sizeof (SItype) * 8) - b; - if (bm <= 0) - { - /* w.s.high = 1..1 or 0..0 */ - w.s.high = uu.s.high >> (sizeof (SItype) * 8 - 1); - w.s.low = uu.s.high >> -bm; - } - else - { - USItype carries = (USItype)uu.s.high << bm; - w.s.high = uu.s.high >> b; - w.s.low = ((USItype)uu.s.low >> b) | carries; - } - - return w.ll; -} -#endif - -#ifdef L_ffsdi2 -DItype -__ffsdi2 (DItype u) -{ - DIunion uu, w; - uu.ll = u; - w.s.high = 0; - w.s.low = ffs (uu.s.low); - if (w.s.low != 0) - return w.ll; - w.s.low = ffs (uu.s.high); - if (w.s.low != 0) - { - w.s.low += 8 * sizeof (SItype); - return w.ll; - } - return w.ll; -} -#endif - -#ifdef L_muldi3 -DItype -__muldi3 (DItype u, DItype v) -{ - DIunion w; - DIunion uu, vv; - - uu.ll = u, - vv.ll = v; - - w.ll = __umulsidi3 (uu.s.low, vv.s.low); - w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high - + (USItype) uu.s.high * (USItype) vv.s.low); - - return w.ll; -} -#endif - -#ifdef L_udiv_w_sdiv -#if defined (sdiv_qrnnd) -USItype -__udiv_w_sdiv (USItype *rp, USItype a1, USItype a0, USItype d) -{ - USItype q, r; - USItype c0, c1, b1; - - if ((SItype) d >= 0) - { - if (a1 < d - a1 - (a0 >> (SI_TYPE_SIZE - 1))) - { - /* dividend, divisor, and quotient are nonnegative */ - sdiv_qrnnd (q, r, a1, a0, d); - } - else - { - /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */ - sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (SI_TYPE_SIZE - 1)); - /* Divide (c1*2^32 + c0) by d */ - sdiv_qrnnd (q, r, c1, c0, d); - /* Add 2^31 to quotient */ - q += (USItype) 1 << (SI_TYPE_SIZE - 1); - } - } - else - { - b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */ - c1 = a1 >> 1; /* A/2 */ - c0 = (a1 << (SI_TYPE_SIZE - 1)) + (a0 >> 1); - - if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */ - { - sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */ - - r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */ - if ((d & 1) != 0) - { - if (r >= q) - r = r - q; - else if (q - r <= d) - { - r = r - q + d; - q--; - } - else - { - r = r - q + 2*d; - q -= 2; - } - } - } - else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */ - { - c1 = (b1 - 1) - c1; - c0 = ~c0; /* logical NOT */ - - sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */ - - q = ~q; /* (A/2)/b1 */ - r = (b1 - 1) - r; - - r = 2*r + (a0 & 1); /* A/(2*b1) */ - - if ((d & 1) != 0) - { - if (r >= q) - r = r - q; - else if (q - r <= d) - { - r = r - q + d; - q--; - } - else - { - r = r - q + 2*d; - q -= 2; - } - } - } - else /* Implies c1 = b1 */ - { /* Hence a1 = d - 1 = 2*b1 - 1 */ - if (a0 >= -d) - { - q = -1; - r = a0 + d; - } - else - { - q = -2; - r = a0 + 2*d; - } - } - } - - *rp = r; - return q; -} -#else -/* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */ -USItype -__udiv_w_sdiv (USItype *rp __attribute__ ((__unused__)), - USItype a1 __attribute__ ((__unused__)), - USItype a0 __attribute__ ((__unused__)), - USItype d __attribute__ ((__unused__))) -{ - return 0; -} -#endif -#endif - -#if (defined (L_udivdi3) || defined (L_divdi3) || \ - defined (L_umoddi3) || defined (L_moddi3)) -#define L_udivmoddi4 -#endif - -#ifdef L_udivmoddi4 -static const UQItype __clz_tab[] = -{ - 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, - 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, - 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, - 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, - 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, - 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, - 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, - 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, -}; - -#if (defined (L_udivdi3) || defined (L_divdi3) || \ - defined (L_umoddi3) || defined (L_moddi3)) -static inline -#endif -UDItype -__udivmoddi4 (UDItype n, UDItype d, UDItype *rp) -{ - DIunion ww; - DIunion nn, dd; - DIunion rr; - USItype d0, d1, n0, n1, n2; - USItype q0, q1; - USItype b, bm; - - nn.ll = n; - dd.ll = d; - - d0 = dd.s.low; - d1 = dd.s.high; - n0 = nn.s.low; - n1 = nn.s.high; - -#if !UDIV_NEEDS_NORMALIZATION - if (d1 == 0) - { - if (d0 > n1) - { - /* 0q = nn / 0D */ - - udiv_qrnnd (q0, n0, n1, n0, d0); - q1 = 0; - - /* Remainder in n0. */ - } - else - { - /* qq = NN / 0d */ - - if (d0 == 0) - d0 = 1 / d0; /* Divide intentionally by zero. */ - - udiv_qrnnd (q1, n1, 0, n1, d0); - udiv_qrnnd (q0, n0, n1, n0, d0); - - /* Remainder in n0. */ - } - - if (rp != 0) - { - rr.s.low = n0; - rr.s.high = 0; - *rp = rr.ll; - } - } - -#else /* UDIV_NEEDS_NORMALIZATION */ - - if (d1 == 0) - { - if (d0 > n1) - { - /* 0q = nn / 0D */ - - count_leading_zeros (bm, d0); - - if (bm != 0) - { - /* Normalize, i.e. make the most significant bit of the - denominator set. */ - - d0 = d0 << bm; - n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm)); - n0 = n0 << bm; - } - - udiv_qrnnd (q0, n0, n1, n0, d0); - q1 = 0; - - /* Remainder in n0 >> bm. */ - } - else - { - /* qq = NN / 0d */ - - if (d0 == 0) - d0 = 1 / d0; /* Divide intentionally by zero. */ - - count_leading_zeros (bm, d0); - - if (bm == 0) - { - /* From (n1 >= d0) /\ (the most significant bit of d0 is set), - conclude (the most significant bit of n1 is set) /\ (the - leading quotient digit q1 = 1). - - This special case is necessary, not an optimization. - (Shifts counts of SI_TYPE_SIZE are undefined.) */ - - n1 -= d0; - q1 = 1; - } - else - { - /* Normalize. */ - - b = SI_TYPE_SIZE - bm; - - d0 = d0 << bm; - n2 = n1 >> b; - n1 = (n1 << bm) | (n0 >> b); - n0 = n0 << bm; - - udiv_qrnnd (q1, n1, n2, n1, d0); - } - - /* n1 != d0... */ - - udiv_qrnnd (q0, n0, n1, n0, d0); - - /* Remainder in n0 >> bm. */ - } - - if (rp != 0) - { - rr.s.low = n0 >> bm; - rr.s.high = 0; - *rp = rr.ll; - } - } -#endif /* UDIV_NEEDS_NORMALIZATION */ - - else - { - if (d1 > n1) - { - /* 00 = nn / DD */ - - q0 = 0; - q1 = 0; - - /* Remainder in n1n0. */ - if (rp != 0) - { - rr.s.low = n0; - rr.s.high = n1; - *rp = rr.ll; - } - } - else - { - /* 0q = NN / dd */ - - count_leading_zeros (bm, d1); - if (bm == 0) - { - /* From (n1 >= d1) /\ (the most significant bit of d1 is set), - conclude (the most significant bit of n1 is set) /\ (the - quotient digit q0 = 0 or 1). - - This special case is necessary, not an optimization. */ - - /* The condition on the next line takes advantage of that - n1 >= d1 (true due to program flow). */ - if (n1 > d1 || n0 >= d0) - { - q0 = 1; - sub_ddmmss (n1, n0, n1, n0, d1, d0); - } - else - q0 = 0; - - q1 = 0; - - if (rp != 0) - { - rr.s.low = n0; - rr.s.high = n1; - *rp = rr.ll; - } - } - else - { - USItype m1, m0; - /* Normalize. */ - - b = SI_TYPE_SIZE - bm; - - d1 = (d1 << bm) | (d0 >> b); - d0 = d0 << bm; - n2 = n1 >> b; - n1 = (n1 << bm) | (n0 >> b); - n0 = n0 << bm; - - udiv_qrnnd (q0, n1, n2, n1, d1); - umul_ppmm (m1, m0, q0, d0); - - if (m1 > n1 || (m1 == n1 && m0 > n0)) - { - q0--; - sub_ddmmss (m1, m0, m1, m0, d1, d0); - } - - q1 = 0; - - /* Remainder in (n1n0 - m1m0) >> bm. */ - if (rp != 0) - { - sub_ddmmss (n1, n0, n1, n0, m1, m0); - rr.s.low = (n1 << b) | (n0 >> bm); - rr.s.high = n1 >> bm; - *rp = rr.ll; - } - } - } - } - - ww.s.low = q0; - ww.s.high = q1; - return ww.ll; -} -#endif - -#ifdef L_divdi3 -UDItype __udivmoddi4 (); - -DItype -__divdi3 (DItype u, DItype v) -{ - word_type c = 0; - DIunion uu, vv; - DItype w; - - uu.ll = u; - vv.ll = v; - - if (uu.s.high < 0) - c = ~c, - uu.ll = __negdi2 (uu.ll); - if (vv.s.high < 0) - c = ~c, - vv.ll = __negdi2 (vv.ll); - - w = __udivmoddi4 (uu.ll, vv.ll, (UDItype *) 0); - if (c) - w = __negdi2 (w); - - return w; -} -#endif - -#ifdef L_moddi3 -UDItype __udivmoddi4 (); -DItype -__moddi3 (DItype u, DItype v) -{ - word_type c = 0; - DIunion uu, vv; - DItype w; - - uu.ll = u; - vv.ll = v; - - if (uu.s.high < 0) - c = ~c, - uu.ll = __negdi2 (uu.ll); - if (vv.s.high < 0) - vv.ll = __negdi2 (vv.ll); - - (void) __udivmoddi4 (uu.ll, vv.ll, &w); - if (c) - w = __negdi2 (w); - - return w; -} -#endif - -#ifdef L_umoddi3 -UDItype __udivmoddi4 (); -UDItype -__umoddi3 (UDItype u, UDItype v) -{ - UDItype w; - - (void) __udivmoddi4 (u, v, &w); - - return w; -} -#endif - -#ifdef L_udivdi3 -UDItype __udivmoddi4 (); -UDItype -__udivdi3 (UDItype n, UDItype d) -{ - return __udivmoddi4 (n, d, (UDItype *) 0); -} -#endif - -#ifdef L_cmpdi2 -word_type -__cmpdi2 (DItype a, DItype b) -{ - DIunion au, bu; - - au.ll = a, bu.ll = b; - - if (au.s.high < bu.s.high) - return 0; - else if (au.s.high > bu.s.high) - return 2; - if ((USItype) au.s.low < (USItype) bu.s.low) - return 0; - else if ((USItype) au.s.low > (USItype) bu.s.low) - return 2; - return 1; -} -#endif - -#ifdef L_ucmpdi2 -word_type -__ucmpdi2 (DItype a, DItype b) -{ - DIunion au, bu; - - au.ll = a, bu.ll = b; - - if ((USItype) au.s.high < (USItype) bu.s.high) - return 0; - else if ((USItype) au.s.high > (USItype) bu.s.high) - return 2; - if ((USItype) au.s.low < (USItype) bu.s.low) - return 0; - else if ((USItype) au.s.low > (USItype) bu.s.low) - return 2; - return 1; -} -#endif - -#ifdef L_fixunsdfdi -#define WORD_SIZE (sizeof (SItype) * 8) -#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE) - -DItype -__fixunsdfdi (DFtype a) -{ - DFtype b; - UDItype v; - - if (a < 0) - return 0; - - /* Compute high word of result, as a flonum. */ - b = (a / HIGH_WORD_COEFF); - /* Convert that to fixed (but not to DItype!), - and shift it into the high word. */ - v = (USItype) b; - v <<= WORD_SIZE; - /* Remove high part from the DFtype, leaving the low part as flonum. */ - a -= (DFtype)v; - /* Convert that to fixed (but not to DItype!) and add it in. - Sometimes A comes out negative. This is significant, since - A has more bits than a long int does. */ - if (a < 0) - v -= (USItype) (- a); - else - v += (USItype) a; - return v; -} -#endif - -#ifdef L_fixdfdi -DItype -__fixdfdi (DFtype a) -{ - if (a < 0) - return - __fixunsdfdi (-a); - return __fixunsdfdi (a); -} -#endif - -#ifdef L_fixunssfdi -#define WORD_SIZE (sizeof (SItype) * 8) -#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE) - -DItype -__fixunssfdi (SFtype original_a) -{ - /* Convert the SFtype to a DFtype, because that is surely not going - to lose any bits. Some day someone else can write a faster version - that avoids converting to DFtype, and verify it really works right. */ - DFtype a = original_a; - DFtype b; - UDItype v; - - if (a < 0) - return 0; - - /* Compute high word of result, as a flonum. */ - b = (a / HIGH_WORD_COEFF); - /* Convert that to fixed (but not to DItype!), - and shift it into the high word. */ - v = (USItype) b; - v <<= WORD_SIZE; - /* Remove high part from the DFtype, leaving the low part as flonum. */ - a -= (DFtype)v; - /* Convert that to fixed (but not to DItype!) and add it in. - Sometimes A comes out negative. This is significant, since - A has more bits than a long int does. */ - if (a < 0) - v -= (USItype) (- a); - else - v += (USItype) a; - return v; -} -#endif - -#ifdef L_fixsfdi -DItype -__fixsfdi (SFtype a) -{ - if (a < 0) - return - __fixunssfdi (-a); - return __fixunssfdi (a); -} -#endif - -#ifdef L_floatdidf -#define WORD_SIZE (sizeof (SItype) * 8) -#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2)) -#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE) - -DFtype -__floatdidf (DItype u) -{ - DFtype d; - - d = (SItype) (u >> WORD_SIZE); - d *= HIGH_HALFWORD_COEFF; - d *= HIGH_HALFWORD_COEFF; - d += (USItype) (u & (HIGH_WORD_COEFF - 1)); - - return d; -} -#endif - -#ifdef L_floatdisf -#define WORD_SIZE (sizeof (SItype) * 8) -#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2)) -#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE) -#define DI_SIZE (sizeof (DItype) * 8) - -/* Define codes for all the float formats that we know of. Note - that this is copied from real.h. */ - -#define UNKNOWN_FLOAT_FORMAT 0 -#define IEEE_FLOAT_FORMAT 1 -#define VAX_FLOAT_FORMAT 2 -#define IBM_FLOAT_FORMAT 3 - -/* Default to IEEE float if not specified. Nearly all machines use it. */ -#ifndef HOST_FLOAT_FORMAT -#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT -#endif - -#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT -#define DF_SIZE 53 -#define SF_SIZE 24 -#endif - -#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT -#define DF_SIZE 56 -#define SF_SIZE 24 -#endif - -#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT -#define DF_SIZE 56 -#define SF_SIZE 24 -#endif - -SFtype -__floatdisf (DItype u) -{ - /* Do the calculation in DFmode - so that we don't lose any of the precision of the high word - while multiplying it. */ - DFtype f; - - /* Protect against double-rounding error. - Represent any low-order bits, that might be truncated in DFmode, - by a bit that won't be lost. The bit can go in anywhere below the - rounding position of the SFmode. A fixed mask and bit position - handles all usual configurations. It doesn't handle the case - of 128-bit DImode, however. */ - if (DF_SIZE < DI_SIZE - && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE)) - { -#define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE)) - if (! (- ((DItype) 1 << DF_SIZE) < u - && u < ((DItype) 1 << DF_SIZE))) - { - if ((USItype) u & (REP_BIT - 1)) - u |= REP_BIT; - } - } - f = (SItype) (u >> WORD_SIZE); - f *= HIGH_HALFWORD_COEFF; - f *= HIGH_HALFWORD_COEFF; - f += (USItype) (u & (HIGH_WORD_COEFF - 1)); - - return (SFtype) f; -} -#endif - -#ifdef L_fixunsdfsi -/* Reenable the normal types, in case limits.h needs them. */ -#undef char -#undef short -#undef int -#undef long -#undef unsigned -#undef float -#undef double -#undef MIN -#undef MAX -#include - -USItype -__fixunsdfsi (DFtype a) -{ - if (a >= - (DFtype) LONG_MIN) - return (SItype) (a + LONG_MIN) - LONG_MIN; - return (SItype) a; -} -#endif - -#ifdef L_fixunssfsi -/* Reenable the normal types, in case limits.h needs them. */ -#undef char -#undef short -#undef int -#undef long -#undef unsigned -#undef float -#undef double -#undef MIN -#undef MAX -#include - -USItype -__fixunssfsi (SFtype a) -{ - if (a >= - (SFtype) LONG_MIN) - return (SItype) (a + LONG_MIN) - LONG_MIN; - return (SItype) a; -} -#endif - -/* From here on down, the routines use normal data types. */ - -#define SItype bogus_type -#define USItype bogus_type -#define DItype bogus_type -#define UDItype bogus_type -#define SFtype bogus_type -#define DFtype bogus_type - -#undef char -#undef short -#undef int -#undef long -#undef unsigned -#undef float -#undef double diff --git a/gcc/telf.h b/gcc/telf.h new file mode 100755 index 0000000..d4c15b0 --- /dev/null +++ b/gcc/telf.h @@ -0,0 +1,368 @@ +/* CYGNUS LOCAL (entire file) clm/arm-elf */ +/* Definitions of target machine for GNU compiler, + for Thumb with ELF obj format. + Copyright (C) 1995, 1996 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +#define OBJECT_FORMAT_ELF + +#include "thumb.h" +#include "tree.h" + +/* Run-time Target Specification. */ +#undef TARGET_VERSION +#define TARGET_VERSION fputs (" (Thumb/elf)", stderr) + +#define MULTILIB_DEFAULTS { "mlittle-endian" } + +/* Setting this to 32 produces more efficient code, but the value set in previous + versions of this toolchain was 8, which produces more compact structures. The + command line option -mstructure_size_boundary= can be used to change this + value. */ +#undef STRUCTURE_SIZE_BOUNDARY +#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary + +extern int arm_structure_size_boundary; + +/* Debug */ +#define DWARF2_DEBUGGING_INFO + + +/* Note - it is important that these definitions match those in semi.h for the ARM port. */ +#undef LOCAL_LABEL_PREFIX +#define LOCAL_LABEL_PREFIX "." + + +/* A C statement to output assembler commands which will identify the + object file as having been compiled with GNU CC (or another GNU + compiler). */ +#define ASM_IDENTIFY_GCC(STREAM) \ + fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX ) + +#undef ASM_FILE_START +#define ASM_FILE_START(STREAM) \ +do { \ + extern char *version_string; \ + fprintf ((STREAM), "%s Generated by gcc %s for Thumb/elf\n", \ + ASM_COMMENT_START, version_string); \ + fprintf ((STREAM), ASM_APP_OFF); \ +} while (0) + +/* A C statement to output something to the assembler file to switch to section + NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or + NULL_TREE. Some target formats do not support arbitrary sections. Do not + define this macro in such cases. */ +#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \ +do { \ + if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \ + fprintf (STREAM, "\t.section %s,\"ax\",%%progbits\n", (NAME)); \ + else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \ + fprintf (STREAM, "\t.section %s,\"a\"\n", (NAME)); \ + else if (0 == strncmp((NAME), ".bss", sizeof(".bss") - 1)) \ + fprintf (STREAM, "\t.section %s,\"aw\",%%nobits\n", (NAME)); \ + else \ + fprintf (STREAM, "\t.section %s,\"aw\"\n", (NAME)); \ +} while (0) + +#undef INIT_SECTION_ASM_OP + +/* Define this macro if jump tables (for `tablejump' insns) should be + output in the text section, along with the assembler instructions. + Otherwise, the readonly data section is used. */ +#define JUMP_TABLES_IN_TEXT_SECTION 1 + +#undef READONLY_DATA_SECTION +#define READONLY_DATA_SECTION rdata_section +#undef RDATA_SECTION_ASM_OP +#define RDATA_SECTION_ASM_OP "\t.section .rodata" + +/* If defined, a C expression whose value is a string containing the + assembler operation to identify the following data as + uninitialized global data. If not defined, and neither + `ASM_OUTPUT_BSS' nor `ASM_OUTPUT_ALIGNED_BSS' are defined, + uninitialized global data will be output in the data section if + `-fno-common' is passed, otherwise `ASM_OUTPUT_COMMON' will be + used. */ +#ifndef BSS_SECTION_ASM_OP +#define BSS_SECTION_ASM_OP ".section\t.bss" +#endif + +/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a + separate, explicit argument. If you define this macro, it is used + in place of `ASM_OUTPUT_BSS', and gives you more flexibility in + handling the required alignment of the variable. The alignment is + specified as the number of bits. + + Try to use function `asm_output_aligned_bss' defined in file + `varasm.c' when defining this macro. */ +#ifndef ASM_OUTPUT_ALIGNED_BSS +#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \ + asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN) +#endif + +/* Don't know how to order these. UNALIGNED_WORD_ASM_OP is in + dwarf2.out. */ +#define UNALIGNED_WORD_ASM_OP ".4byte" + +#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE,ADDR) \ + if (((ADDR)[0] == '.') && ((ADDR)[1] == 'L')) \ + fprintf ((FILE), "\t%s\t%s", UNALIGNED_WORD_ASM_OP, (ADDR)); \ + else \ + fprintf ((FILE), "\t%s\t%s", \ + UNALIGNED_WORD_ASM_OP, (ADDR)) + +#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE,RTX) \ +do { \ + fprintf ((FILE), "\t%s\t", UNALIGNED_WORD_ASM_OP); \ + output_addr_const ((FILE), (RTX)); \ + fputc ('\n', (FILE)); \ +} while (0) + +/* This is how to equate one symbol to another symbol. The syntax used is + `SYM1=SYM2'. Note that this is different from the way equates are done + with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */ + +#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \ + do { fprintf ((FILE), "\t"); \ + assemble_name (FILE, LABEL1); \ + fprintf (FILE, " = "); \ + assemble_name (FILE, LABEL2); \ + fprintf (FILE, "\n"); \ + } while (0) + +/* For aliases of functions we use .thumb_set instead. */ +#define ASM_OUTPUT_DEF_FROM_DECLS(FILE,DECL1,DECL2) \ + do \ + { \ + char * LABEL1 = XSTR (XEXP (DECL_RTL (decl), 0), 0); \ + char * LABEL2 = IDENTIFIER_POINTER (DECL2); \ + \ + if (TREE_CODE (DECL1) == FUNCTION_DECL) \ + { \ + fprintf (FILE, "\t.thumb_set "); \ + assemble_name (FILE, LABEL1); \ + fprintf (FILE, ","); \ + assemble_name (FILE, LABEL2); \ + fprintf (FILE, "\n"); \ + } \ + else \ + ASM_OUTPUT_DEF (FILE, LABEL1, LABEL2); \ + } \ + while (0) + +/* A list of other sections which the compiler might be "in" at any + given time. */ +#undef EXTRA_SECTIONS +#define EXTRA_SECTIONS in_rdata + +/* A list of extra section function definitions. */ + +#undef EXTRA_SECTION_FUNCTIONS +#define EXTRA_SECTION_FUNCTIONS \ + RDATA_SECTION_FUNCTION + +#define RDATA_SECTION_FUNCTION \ +void \ +rdata_section () \ +{ \ + if (in_section != in_rdata) \ + { \ + fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \ + in_section = in_rdata; \ + } \ +} + +#define INT_ASM_OP ".word" + +#define INVOKE__main + +#undef STARTFILE_SPEC +#define STARTFILE_SPEC "crtbegin%O%s crt0%O%s" + +#undef ENDFILE_SPEC +#define ENDFILE_SPEC "crtend%O%s" + +/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS + is a valid machine specific attribute for DECL. + The attributes in ATTRIBUTES have previously been assigned to DECL. */ +extern int arm_valid_machine_decl_attribute(tree decl, tree attributes, tree attr, tree args); +#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \ + arm_valid_machine_decl_attribute(DECL, ATTRIBUTES, IDENTIFIER, ARGS) + +/* The ARM development system defines __main. */ +#define NAME__MAIN "__gccmain" +#define SYMBOL__MAIN __gccmain + +#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL)) +#define UNIQUE_SECTION(DECL,RELOC) \ +do { \ + int len; \ + char * name, * string, * prefix; \ + \ + name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \ + \ + if (! DECL_ONE_ONLY (DECL)) \ + { \ + prefix = "."; \ + if (TREE_CODE (DECL) == FUNCTION_DECL) \ + prefix = ".text."; \ + else if (DECL_READONLY_SECTION (DECL, RELOC)) \ + prefix = ".rodata."; \ + else \ + prefix = ".data."; \ + } \ + else if (TREE_CODE (DECL) == FUNCTION_DECL) \ + prefix = ".gnu.linkonce.t."; \ + else if (DECL_READONLY_SECTION (DECL, RELOC)) \ + prefix = ".gnu.linkonce.r."; \ + else \ + prefix = ".gnu.linkonce.d."; \ + \ + len = strlen (name) + strlen (prefix); \ + string = alloca (len + 1); \ + sprintf (string, "%s%s", prefix, name); \ + \ + DECL_SECTION_NAME (DECL) = build_string (len, string); \ +} while (0) + +/* This is how we tell the assembler that a symbol is weak. */ +#ifndef ASM_WEAKEN_LABEL +#define ASM_WEAKEN_LABEL(FILE, NAME) \ + do \ + { \ + fputs ("\t.weak\t", FILE); \ + assemble_name (FILE, NAME); \ + fputc ('\n', FILE); \ + } \ + while (0) +#endif + +#ifndef TYPE_ASM_OP + +/* These macros generate the special .type and .size directives which + are used to set the corresponding fields of the linker symbol table + entries in an ELF object file under SVR4. These macros also output + the starting labels for the relevant functions/objects. */ +#define TYPE_ASM_OP ".type" +#define SIZE_ASM_OP ".size" + +/* The following macro defines the format used to output the second + operand of the .type assembler directive. Different svr4 assemblers + expect various different forms for this operand. The one given here + is just a default. You may need to override it in your machine- + specific tm.h file (depending upon the particulars of your assembler). */ +#define TYPE_OPERAND_FMT "%s" + +/* Write the extra assembler code needed to declare a function's result. + Most svr4 assemblers don't require any special declaration of the + result value, but there are exceptions. */ +#ifndef ASM_DECLARE_RESULT +#define ASM_DECLARE_RESULT(FILE, RESULT) +#endif + +/* Write the extra assembler code needed to declare a function properly. + Some svr4 assemblers need to also have something extra said about the + function's return value. We allow for that here. */ +#undef ASM_DECLARE_FUNCTION_NAME +#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \ + do \ + { \ + fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \ + assemble_name (FILE, NAME); \ + putc (',', FILE); \ + fprintf (FILE, TYPE_OPERAND_FMT, "function"); \ + putc ('\n', FILE); \ + ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \ + fprintf (FILE, "\t.thumb_func\n") ; \ + ASM_OUTPUT_LABEL(FILE, NAME); \ + } \ + while (0) + +/* Write the extra assembler code needed to declare an object properly. */ +#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \ + do \ + { \ + fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \ + assemble_name (FILE, NAME); \ + putc (',', FILE); \ + fprintf (FILE, TYPE_OPERAND_FMT, "object"); \ + putc ('\n', FILE); \ + size_directive_output = 0; \ + if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \ + { \ + size_directive_output = 1; \ + fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \ + assemble_name (FILE, NAME); \ + putc (',', FILE); \ + fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \ + int_size_in_bytes (TREE_TYPE (DECL))); \ + fputc ('\n', FILE); \ + } \ + ASM_OUTPUT_LABEL(FILE, NAME); \ + } \ + while (0) + +/* Output the size directive for a decl in rest_of_decl_compilation + in the case where we did not do so before the initializer. + Once we find the error_mark_node, we know that the value of + size_directive_output was set + by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */ +#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \ + do \ + { \ + char * name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \ + if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \ + && ! AT_END && TOP_LEVEL \ + && DECL_INITIAL (DECL) == error_mark_node \ + && !size_directive_output) \ + { \ + size_directive_output = 1; \ + fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \ + assemble_name (FILE, name); \ + putc (',', FILE); \ + fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \ + int_size_in_bytes (TREE_TYPE (DECL))); \ + fputc ('\n', FILE); \ + } \ + } \ + while (0) + +/* This is how to declare the size of a function. */ +#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \ + do \ + { \ + if (!flag_inhibit_size_directive) \ + { \ + char label[256]; \ + static int labelno; \ + labelno ++; \ + ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \ + ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \ + fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \ + assemble_name (FILE, (FNAME)); \ + fprintf (FILE, ","); \ + assemble_name (FILE, label); \ + fprintf (FILE, "-"); \ + assemble_name (FILE, (FNAME)); \ + putc ('\n', FILE); \ + } \ + } \ + while (0) + +#endif /* TYPE_ASM_OP */ diff --git a/gcc/testsuite/fp-test.c b/gcc/testsuite/fp-test.c new file mode 100755 index 0000000..667059c --- /dev/null +++ b/gcc/testsuite/fp-test.c @@ -0,0 +1,231 @@ +/* fp-test.c - Check that all floating-point operations are available. + Copyright (C) 1995 Free Software Foundation, Inc. + Contributed by Ronald F. Guilmette . + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* This is a trivial test program which may be useful to people who are + porting the GCC or G++ compilers to a new system. The intent here is + merely to check that all floating-point operations have been provided + by the port. (Note that I say ``provided'' rather than ``implemented''.) + + To use this file, simply compile it (with GCC or G++) and then try to + link it in the normal way (also using GCC or G++ respectively). If + all of the floating -point operations (including conversions) have + been provided, then this file will link without incident. If however + one or more of the primitive floating-point operations have not been + properly provided, you will get link-time errors indicating which + floating-point operations are unavailable. + + This file will typically be used when porting the GNU compilers to + some system which lacks floating-point hardware, and for which + software emulation routines (for FP ops) are needed in order to + complete the port. */ + +#if 0 +#include +#endif + +extern double acos (double); +extern double asin (double); +extern double atan (double); +extern double atan2 (double, double); +extern double cos (double); +extern double sin (double); +extern double tan (double); +extern double cosh (double); +extern double sinh (double); +extern double tanh (double); +extern double exp (double); +extern double frexp (double, int *); +extern double ldexp (double, int); +extern double log (double); +extern double log10 (double); +extern double modf (double, double *); +extern double pow (double, double); +extern double sqrt (double); +extern double ceil (double); +extern double fabs (double); +extern double floor (double); +extern double fmod (double, double); + +int i1, i2 = 2; + +volatile signed char sc; +volatile unsigned char uc; + +volatile signed short ss; +volatile unsigned short us; + +volatile signed int si; +volatile unsigned int ui; + +volatile signed long sl; +volatile unsigned long ul; + +volatile float f1 = 1.0, f2 = 1.0, f3 = 1.0; +volatile double d1 = 1.0, d2 = 1.0, d3 = 1.0; +volatile long double D1 = 1.0, D2 = 1.0, D3 = 1.0; + +int +main () +{ + /* TYPE: float */ + + f1 = -f2; + f1 = f2 + f3; + f1 = f2 - f3; + f1 = f2 * f3; + f1 = f2 / f3; + f1 += f2; + f1 -= f2; + f1 *= f2; + f1 /= f2; + + si = f1 == f2; + si = f1 != f2; + si = f1 > f2; + si = f1 < f2; + si = f1 >= f2; + si = f1 <= f2; + + sc = f1; + uc = f1; + ss = f1; + us = f1; + si = f1; + ui = f1; + sl = f1; + ul = f1; + d1 = f1; + D1 = f1; + + f1 = sc; + f1 = uc; + f1 = ss; + f1 = us; + f1 = si; + f1 = ui; + f1 = sl; + f1 = ul; + f1 = d1; + f1 = D1; + + d1 = -d2; + d1 = d2 + d3; + d1 = d2 - d3; + d1 = d2 * d3; + d1 = d2 / d3; + d1 += d2; + d1 -= d2; + d1 *= d2; + d1 /= d2; + + si = d1 == d2; + si = d1 != d2; + si = d1 > d2; + si = d1 < d2; + si = d1 >= d2; + si = d1 <= d2; + + sc = d1; + uc = d1; + ss = d1; + us = d1; + si = d1; + ui = d1; + sl = d1; + ul = d1; + f1 = d1; + D1 = d1; + + d1 = sc; + d1 = uc; + d1 = ss; + d1 = us; + d1 = si; + d1 = ui; + d1 = sl; + d1 = ul; + d1 = f1; + d1 = D1; + + D1 = -D2; + D1 = D2 + D3; + D1 = D2 - D3; + D1 = D2 * D3; + D1 = D2 / D3; + D1 += D2; + D1 -= D2; + D1 *= D2; + D1 /= D2; + + si = D1 == D2; + si = D1 != D2; + si = D1 > D2; + si = D1 < D2; + si = D1 >= D2; + si = D1 <= D2; + + sc = D1; + uc = D1; + ss = D1; + us = D1; + si = D1; + ui = D1; + sl = D1; + ul = D1; + f1 = D1; + d1 = D1; + + D1 = sc; + D1 = uc; + D1 = ss; + D1 = us; + D1 = si; + D1 = ui; + D1 = sl; + D1 = ul; + D1 = f1; + D1 = d1; + + d1 = acos (d2); + d1 = asin (d2); + d1 = atan (d2); + d1 = atan2 (d2, d3); + d1 = cos (d2); + d1 = sin (d2); + d1 = tan (d2); + d1 = cosh (d2); + d1 = sinh (d2); + d1 = tanh (d2); + d1 = exp (d2); + d1 = frexp (d2, &i1); + d1 = ldexp (d2, i2); + d1 = log (d2); + d1 = log10 (d2); + d1 = modf (d2, &d3); + d1 = pow (d2, d3); + d1 = sqrt (d2); + d1 = ceil (d2); + d1 = fabs (d2); + d1 = floor (d2); + d1 = fmod (d2, d3); + + return 0; +} diff --git a/gcc/thumb.c b/gcc/thumb.c new file mode 100755 index 0000000..0310a51 --- /dev/null +++ b/gcc/thumb.c @@ -0,0 +1,1527 @@ +/* Output routines for GCC for ARM/Thumb + Copyright (C) 1996 Cygnus Software Technologies Ltd + The basis of this contribution was generated by + Richard Earnshaw, Advanced RISC Machines Ltd + + This file is part of GNU CC. + + GNU CC is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + GNU CC is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with GNU CC; see the file COPYING. If not, write to + the Free Software Foundation, 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +#include +#include +#include "config.h" +#include "rtl.h" +#include "hard-reg-set.h" +#include "regs.h" +#include "output.h" +#include "insn-flags.h" +#include "insn-attr.h" +#include "insn-config.h" +#include "flags.h" +#include "tree.h" +#include "expr.h" +#include "toplev.h" +#include "recog.h" + +int current_function_anonymous_args = 0; +static int current_function_has_far_jump = 0; + +/* Used to parse -mstructure_size_boundary command line option. */ +char *structure_size_string = NULL; +int arm_structure_size_boundary = 32; /* Used to be 8 */ + +/* Predicates */ + +/* Return nonzero if op is suitable for the RHS of a cmp instruction. */ +int +thumb_cmp_operand(rtx op, enum machine_mode mode) +{ + return ((GET_CODE(op) == CONST_INT + && (HOST_WIDE_UINT) (INTVAL(op)) < 256) + || register_operand(op, mode)); +} + +int +thumb_shiftable_const(HOST_WIDE_INT val) +{ + HOST_WIDE_UINT x = val; + HOST_WIDE_UINT mask = 0xff; + int i; + + for (i = 0; i < 25; i++) + if ((val & (mask << i)) == val) + return 1; + + return 0; +} + +/* Routines for handling the constant pool */ +/* This is unashamedly hacked from the version in sh.c, since the problem is + extremely similar. */ + +/* Thumb instructions cannot load a large constant into a register, + constants have to come from a pc relative load. The reference of a pc + relative load instruction must be less than 1k infront of the instruction. + This means that we often have to dump a constant inside a function, and + generate code to branch around it. + + It is important to minimize this, since the branches will slow things + down and make things bigger. + + Worst case code looks like: + + ldr rn, L1 + b L2 + align + L1: .long value + L2: + .. + + ldr rn, L3 + b L4 + align + L3: .long value + L4: + .. + + We fix this by performing a scan before scheduling, which notices which + instructions need to have their operands fetched from the constant table + and builds the table. + + + The algorithm is: + + scan, find an instruction which needs a pcrel move. Look forward, find the + last barrier which is within MAX_COUNT bytes of the requirement. + If there isn't one, make one. Process all the instructions between + the find and the barrier. + + In the above example, we can tell that L3 is within 1k of L1, so + the first move can be shrunk from the 2 insn+constant sequence into + just 1 insn, and the constant moved to L3 to make: + + ldr rn, L1 + .. + ldr rn, L3 + b L4 + align + L1: .long value + L3: .long value + L4: + + Then the second move becomes the target for the shortening process. + + */ + +typedef struct +{ + rtx value; /* Value in table */ + HOST_WIDE_INT next_offset; + enum machine_mode mode; /* Mode of value */ +} pool_node; + +/* The maximum number of constants that can fit into one pool, since + the pc relative range is 0...1020 bytes and constants are at least 4 + bytes long */ + +#define MAX_POOL_SIZE (1020/4) +static pool_node pool_vector[MAX_POOL_SIZE]; +static int pool_size; +static rtx pool_vector_label; + +/* Add a constant to the pool and return its label. */ + +static HOST_WIDE_INT +add_constant(rtx x, enum machine_mode mode) +{ + int i; + rtx lab; + HOST_WIDE_INT offset; + + if (mode == SImode && GET_CODE(x) == MEM && CONSTANT_P(XEXP(x, 0)) + && CONSTANT_POOL_ADDRESS_P(XEXP(x, 0))) + x = get_pool_constant(XEXP(x, 0)); + + /* First see if we've already got it */ + + for (i = 0; i < pool_size; i++) + { + if (x->code == pool_vector[i].value->code + && mode == pool_vector[i].mode) + { + if (x->code == CODE_LABEL) + { + if (XINT(x, 3) != XINT(pool_vector[i].value, 3)) + continue; + } + if (rtx_equal_p(x, pool_vector[i].value)) + return pool_vector[i].next_offset - GET_MODE_SIZE(mode); + } + } + + /* Need a new one */ + + pool_vector[pool_size].next_offset = GET_MODE_SIZE(mode); + offset = 0; + if (pool_size == 0) + pool_vector_label = gen_label_rtx(); + else + pool_vector[pool_size].next_offset + += (offset = pool_vector[pool_size - 1].next_offset); + + pool_vector[pool_size].value = x; + pool_vector[pool_size].mode = mode; + pool_size++; + return offset; +} + +/* Output the literal table */ + +static void +dump_table(rtx scan) +{ + int i; + + scan = emit_label_after(gen_label_rtx(), scan); + scan = emit_insn_after(gen_align_4(), scan); + scan = emit_label_after(pool_vector_label, scan); + + for (i = 0; i < pool_size; i++) + { + pool_node *p = pool_vector + i; + + switch (GET_MODE_SIZE(p->mode)) + { + case 4: + scan = emit_insn_after(gen_consttable_4(p->value), scan); + break; + + case 8: + scan = emit_insn_after(gen_consttable_8(p->value), scan); + break; + + default: + abort(); + break; + } + } + + scan = emit_insn_after(gen_consttable_end(), scan); + scan = emit_barrier_after(scan); + pool_size = 0; +} + +/* Non zero if the src operand needs to be fixed up */ +static +int +fixit(rtx src, enum machine_mode mode) +{ + return ((CONSTANT_P(src) + && (GET_CODE(src) != CONST_INT + || !(CONST_OK_FOR_LETTER_P(INTVAL(src), 'I') + || CONST_OK_FOR_LETTER_P(INTVAL(src), 'J') + || (mode != DImode + && CONST_OK_FOR_LETTER_P(INTVAL(src), 'K'))))) + || (mode == SImode && GET_CODE(src) == MEM + && GET_CODE(XEXP(src, 0)) == SYMBOL_REF + && CONSTANT_POOL_ADDRESS_P(XEXP(src, 0)))); +} + +/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */ + +#define MAX_COUNT_SI 1000 + +static rtx +find_barrier(rtx from) +{ + int count = 0; + rtx found_barrier = 0; + rtx label; + + while (from && count < MAX_COUNT_SI) + { + if (GET_CODE(from) == BARRIER) + return from; + + /* Count the length of this insn */ + if (GET_CODE(from) == INSN + && GET_CODE(PATTERN(from)) == SET + && CONSTANT_P(SET_SRC(PATTERN(from))) + && CONSTANT_POOL_ADDRESS_P(SET_SRC(PATTERN(from)))) + { + rtx src = SET_SRC(PATTERN(from)); + count += 2; + } + else + count += get_attr_length(from); + + from = NEXT_INSN(from); + } + + /* We didn't find a barrier in time to + dump our stuff, so we'll make one */ + label = gen_label_rtx(); + + if (from) + from = PREV_INSN(from); + else + from = get_last_insn(); + + /* Walk back to be just before any jump */ + while (GET_CODE(from) == JUMP_INSN + || GET_CODE(from) == NOTE + || GET_CODE(from) == CODE_LABEL) + from = PREV_INSN(from); + + from = emit_jump_insn_after(gen_jump(label), from); + JUMP_LABEL(from) = label; + found_barrier = emit_barrier_after(from); + emit_label_after(label, found_barrier); + return found_barrier; +} + +/* Non zero if the insn is a move instruction which needs to be fixed. */ + +static int +broken_move(rtx insn) +{ + if (!INSN_DELETED_P(insn) + && GET_CODE(insn) == INSN + && GET_CODE(PATTERN(insn)) == SET) + { + rtx pat = PATTERN(insn); + rtx src = SET_SRC(pat); + rtx dst = SET_DEST(pat); + enum machine_mode mode = GET_MODE(dst); + if (dst == pc_rtx) + return 0; + return fixit(src, mode); + } + return 0; +} + +/* Recursively search through all of the blocks in a function + checking to see if any of the variables created in that + function match the RTX called 'orig'. If they do then + replace them with the RTX called 'replacement'. */ + +static void +replace_symbols_in_block(tree block, rtx orig, rtx replacement) +{ + for (; block; block = BLOCK_CHAIN(block)) + { + tree sym; + + if (!TREE_USED(block)) + continue; + + for (sym = BLOCK_VARS(block); sym; sym = TREE_CHAIN(sym)) + { + if ( (DECL_NAME(sym) == 0 && TREE_CODE(sym) != TYPE_DECL) + || DECL_IGNORED_P(sym) + || TREE_CODE(sym) != VAR_DECL + || DECL_EXTERNAL(sym) + || !rtx_equal_p(DECL_RTL(sym), orig) + ) + continue; + + DECL_RTL(sym) = replacement; + } + + replace_symbols_in_block(BLOCK_SUBBLOCKS(block), orig, replacement); + } +} + +void +thumb_reorg(rtx first) +{ + rtx insn; + for (insn = first; insn; insn = NEXT_INSN(insn)) + { + if (broken_move(insn)) + { + /* This is a broken move instruction, scan ahead looking for + a barrier to stick the constant table behind */ + rtx scan; + rtx barrier = find_barrier(insn); + + /* Now find all the moves between the points and modify them */ + for (scan = insn; scan != barrier; scan = NEXT_INSN(scan)) + { + if (broken_move(scan)) + { + /* This is a broken move instruction, add it to the pool */ + rtx pat = PATTERN(scan); + rtx src = SET_SRC(pat); + rtx dst = SET_DEST(pat); + enum machine_mode mode = GET_MODE(dst); + HOST_WIDE_INT offset; + rtx newinsn; + rtx newsrc; + + /* If this is an HImode constant load, convert it into + an SImode constant load. Since the register is always + 32 bits this is safe. We have to do this, since the + load pc-relative instruction only does a 32-bit load. */ + if (mode == HImode) + { + mode = SImode; + if (GET_CODE(dst) != REG) + abort(); + PUT_MODE(dst, SImode); + } + + offset = add_constant(src, mode); + newsrc = gen_rtx(MEM, mode, + plus_constant(gen_rtx(LABEL_REF, + VOIDmode, + pool_vector_label), + offset)); + + /* Build a jump insn wrapper around the move instead + of an ordinary insn, because we want to have room for + the target label rtx in fld[7], which an ordinary + insn doesn't have. */ + newinsn = emit_jump_insn_after(gen_rtx(SET, VOIDmode, + dst, newsrc), scan); + JUMP_LABEL(newinsn) = pool_vector_label; + + /* But it's still an ordinary insn */ + PUT_CODE(newinsn, INSN); + + /* If debugging information is going to be emitted + then we must make sure that any refences to + symbols which are removed by the above code are + also removed in the descriptions of the + function's variables. Failure to do this means + that the debugging information emitted could + refer to symbols which are not emited by + output_constant_pool() because + mark_constant_pool() never sees them as being + used. */ + + + /* These are the tests used in + output_constant_pool() to decide if the constant + pool will be marked. Only necessary if debugging + info is being emitted. Only necessary for + references to memory whose address is given by a + symbol. */ + + if (optimize > 0 + && flag_expensive_optimizations + && write_symbols != NO_DEBUG + && GET_CODE(src) == MEM + && GET_CODE(XEXP(src, 0)) == SYMBOL_REF) + replace_symbols_in_block + (DECL_INITIAL(current_function_decl), src, newsrc); + + /* Kill old insn */ + delete_insn(scan); + scan = newinsn; + } + } + dump_table(barrier); + } + } +} + +/* Routines for generating rtl */ + +void +thumb_expand_movstrqi(rtx *operands) +{ + rtx out = copy_to_mode_reg(SImode, XEXP(operands[0], 0)); + rtx in = copy_to_mode_reg(SImode, XEXP(operands[1], 0)); + HOST_WIDE_INT len = INTVAL(operands[2]); + HOST_WIDE_INT offset = 0; + + while (len >= 12) + { + emit_insn(gen_movmem12b(out, in)); + len -= 12; + } + if (len >= 8) + { + emit_insn(gen_movmem8b(out, in)); + len -= 8; + } + if (len >= 4) + { + rtx reg = gen_reg_rtx(SImode); + emit_insn(gen_movsi(reg, gen_rtx(MEM, SImode, in))); + emit_insn(gen_movsi(gen_rtx(MEM, SImode, out), reg)); + len -= 4; + offset += 4; + } + if (len >= 2) + { + rtx reg = gen_reg_rtx(HImode); + emit_insn(gen_movhi(reg, gen_rtx(MEM, HImode, + plus_constant(in, offset)))); + emit_insn(gen_movhi(gen_rtx(MEM, HImode, plus_constant(out, offset)), + reg)); + len -= 2; + offset += 2; + } + if (len) + { + rtx reg = gen_reg_rtx(QImode); + emit_insn(gen_movqi(reg, gen_rtx(MEM, QImode, + plus_constant(in, offset)))); + emit_insn(gen_movqi(gen_rtx(MEM, QImode, plus_constant(out, offset)), + reg)); + } +} + +/* Routines for reloading */ + +void +thumb_reload_out_si(rtx operands) +{ + abort(); +} + +static int +arm_naked_function_p(tree func) +{ + tree a; + + if (TREE_CODE(func) != FUNCTION_DECL) + abort(); + + a = lookup_attribute("naked", DECL_MACHINE_ATTRIBUTES(func)); + return a != NULL_TREE; +} + +/* Routines for emitting code */ + +void +final_prescan_insn(rtx insn) +{ + extern int *insn_addresses; + + if (flag_print_asm_name) + fprintf(asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START, + insn_addresses[INSN_UID(insn)]); +} + + +static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */ + +static inline int +number_of_first_bit_set(int mask) +{ + int bit; + + for (bit = 0; + (mask & (1 << bit)) == 0; + ++bit) + continue; + + return bit; +} + +#define ARG_1_REGISTER 0 +#define ARG_2_REGISTER 1 +#define ARG_3_REGISTER 2 +#define ARG_4_REGISTER 3 +#define WORK_REGISTER 7 +#define FRAME_POINTER 11 +#define IP_REGISTER 12 +#define STACK_POINTER STACK_POINTER_REGNUM +#define LINK_REGISTER 14 +#define PROGRAM_COUNTER 15 + +/* Generate code to return from a thumb function. + If 'reg_containing_return_addr' is -1, then the return address is + actually on the stack, at the stack pointer. */ +static void +thumb_exit(FILE *f, int reg_containing_return_addr) +{ + int reg_available_for_popping; + int mode; + int size; + int restore_a4 = FALSE; + + if (reg_containing_return_addr != -1) + { + /* If the return address is in a register, + then just emit the BX instruction and return. */ + asm_fprintf(f, "\tbx\t%s\n", reg_names[reg_containing_return_addr]); + return; + } + + if (!TARGET_THUMB_INTERWORK) + { + /* If we are not supporting interworking, + then just pop the return address straight into the PC. */ + asm_fprintf(f, "\tpop\t{pc}\n" ); + return; + } + + /* If we can deduce the registers used from the function's return value. + This is more reliable that examining regs_ever_live[] because that + will be set if the register is ever used in the function, not just if + the register is used to hold a return value. */ + + if (current_function_return_rtx != 0) + mode = GET_MODE(current_function_return_rtx); + else + mode = DECL_MODE(DECL_RESULT(current_function_decl)); + + size = GET_MODE_SIZE(mode); + + if (size == 0) + { + /* In a void function we can use any argument register. + In a function that returns a structure on the stack + we can use the second and third argument registers. */ + if (mode == VOIDmode) + reg_available_for_popping = ARG_1_REGISTER; + else + reg_available_for_popping = ARG_2_REGISTER; + } + else if (size <= 4) + { + reg_available_for_popping = ARG_2_REGISTER; + } + else if (size <= 8) + { + reg_available_for_popping = ARG_3_REGISTER; + } + else + { + reg_available_for_popping = ARG_4_REGISTER; + + if (size > 12) + { + /* Register a4 is being used to hold part of the return value, + but we have dire need of a free, low register. */ + restore_a4 = TRUE; + + asm_fprintf(f, "\tmov\t%s, %s\n", + reg_names[IP_REGISTER], reg_names[ARG_4_REGISTER]); + } + } + + /* Pop the return address. */ + thumb_pushpop(f, (1 << reg_available_for_popping), FALSE); + + reg_containing_return_addr = reg_available_for_popping; + + /* If necessary restore the a4 register. */ + if (restore_a4) + { + asm_fprintf(f, "\tmov\t%s, %s\n", + reg_names[LINK_REGISTER], reg_names[ARG_4_REGISTER]); + + reg_containing_return_addr = LINK_REGISTER; + + asm_fprintf(f, "\tmov\t%s, %s\n", + reg_names[ARG_4_REGISTER], reg_names[IP_REGISTER]); + } + + /* Return to caller. */ + asm_fprintf(f, "\tbx\t%s\n", reg_names[reg_containing_return_addr]); +} + +/* Emit code to push or pop registers to or from the stack. */ +static void +thumb_pushpop(FILE *f, int mask, int push) +{ + int regno; + int lo_mask = mask & 0xFF; + + if (lo_mask == 0 && !push && (mask & (1 << 15))) + { + /* Special case. Do not generate a POP PC statement here, do it in + thumb_exit() */ + + thumb_exit(f, -1); + return; + } + + asm_fprintf(f, "\t%s\t{", push ? "push" : "pop"); + + /* Look at the low registers first. */ + + for (regno = 0; regno < 8; regno++, lo_mask >>= 1) + { + if (lo_mask & 1) + { + asm_fprintf(f, reg_names[regno]); + + if ((lo_mask & ~1) != 0) + asm_fprintf(f, ", "); + } + } + + if (push && (mask & (1 << 14))) + { + /* Catch pushing the LR. */ + + if (mask & 0xFF) + asm_fprintf(f, ", "); + + asm_fprintf(f, reg_names[14]); + } + else if (!push && (mask & (1 << 15))) + { + /* Catch popping the PC. */ + + if (TARGET_THUMB_INTERWORK) + { + /* The PC is never popped directly, instead + it is popped into r0-r3 and then BX is used. */ + + asm_fprintf(f, "}\n"); + + thumb_exit(f, -1); + + return; + } + else + { + if (mask & 0xFF) + asm_fprintf(f, ", "); + + asm_fprintf(f, reg_names[15]); + } + } + + asm_fprintf(f, "}\n"); +} + +/* Returns non-zero if the current function contains a far jump */ + +int +far_jump_used_p() +{ + rtx insn; + + if (current_function_has_far_jump) + return 1; + + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) + { + if (GET_CODE(insn) == JUMP_INSN + /* Ignore tablejump patterns. */ + && GET_CODE(PATTERN(insn)) != ADDR_VEC + && GET_CODE(PATTERN(insn)) != ADDR_DIFF_VEC + && get_attr_far_jump(insn) == FAR_JUMP_YES) + { + current_function_has_far_jump = 1; + return 1; + } + } + + return 0; +} + +static int return_used_this_function = 0; + +void +thumb_function_prologue(FILE *f, int frame_size) +{ + int amount = frame_size + current_function_outgoing_args_size; + int live_regs_mask = 0; + int high_regs_pushed = 0; + int store_arg_regs = 0; + int regno; + + if (arm_naked_function_p(current_function_decl)) + return; + + if (current_function_anonymous_args && current_function_pretend_args_size) + store_arg_regs = 1; + + if (current_function_pretend_args_size) + { + if (store_arg_regs) + { + asm_fprintf(f, "\tpush\t{"); + for (regno = 4 - current_function_pretend_args_size / 4; regno < 4; + regno++) + asm_fprintf(f, "%s%s", reg_names[regno], regno == 3 ? "" : ", "); + asm_fprintf(f, "}\n"); + } + else + asm_fprintf(f, "\tsub\t%Rsp, %Rsp, #%d\n", + current_function_pretend_args_size); + } + + for (regno = 0; regno < 8; regno++) + if (regs_ever_live[regno] && !call_used_regs[regno]) + live_regs_mask |= 1 << regno; + + if (live_regs_mask || !leaf_function_p() || far_jump_used_p()) + live_regs_mask |= 1 << 14; + + if (live_regs_mask) + thumb_pushpop(f, live_regs_mask, 1); + + for (regno = 8; regno < 13; regno++) + { + if (regs_ever_live[regno] && !call_used_regs[regno]) + high_regs_pushed++; + } + + if (high_regs_pushed) + { + int pushable_regs = 0; + int mask = live_regs_mask & 0xff; + int next_hi_reg; + + for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--) + { + if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]) + break; + } + + pushable_regs = mask; + + if (pushable_regs == 0) + { + /* desperation time -- this probably will never happen */ + if (regs_ever_live[3] || !call_used_regs[3]) + asm_fprintf(f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]); + mask = 1 << 3; + } + + while (high_regs_pushed > 0) + { + for (regno = 7; regno >= 0; regno--) + { + if (mask & (1 << regno)) + { + asm_fprintf(f, "\tmov\t%s, %s\n", reg_names[regno], + reg_names[next_hi_reg]); + high_regs_pushed--; + if (high_regs_pushed) + for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--) + { + if (regs_ever_live[next_hi_reg] + && !call_used_regs[next_hi_reg]) + break; + } + else + { + mask &= ~((1 << regno) - 1); + break; + } + } + } + thumb_pushpop(f, mask, 1); + } + + if (pushable_regs == 0 && (regs_ever_live[3] || !call_used_regs[3])) + asm_fprintf(f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]); + } +} + +void +thumb_expand_prologue() +{ + HOST_WIDE_INT amount = (get_frame_size() + + current_function_outgoing_args_size); + int regno; + int live_regs_mask; + + if (arm_naked_function_p(current_function_decl)) + return; + + if (amount) + { + live_regs_mask = 0; + for (regno = 0; regno < 8; regno++) + if (regs_ever_live[regno] && !call_used_regs[regno]) + live_regs_mask |= 1 << regno; + + if (amount < 512) + emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx, + GEN_INT(-amount))); + else + { + rtx reg, spare; + + if ((live_regs_mask & 0xff) == 0) /* Very unlikely */ + emit_insn(gen_movsi(spare = gen_rtx(REG, SImode, 12), + reg = gen_rtx(REG, SImode, 4))); + else + { + for (regno = 0; regno < 8; regno++) + if (live_regs_mask & (1 << regno)) + break; + reg = gen_rtx(REG, SImode, regno); + } + + emit_insn(gen_movsi(reg, GEN_INT(-amount))); + emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx, reg)); + if ((live_regs_mask & 0xff) == 0) + emit_insn(gen_movsi(reg, spare)); + } + } + + if (frame_pointer_needed) + { + if (current_function_outgoing_args_size) + { + rtx offset = GEN_INT(current_function_outgoing_args_size); + + if (current_function_outgoing_args_size < 1024) + emit_insn(gen_addsi3(frame_pointer_rtx, stack_pointer_rtx, + offset)); + else + { + emit_insn(gen_movsi(frame_pointer_rtx, offset)); + emit_insn(gen_addsi3(frame_pointer_rtx, frame_pointer_rtx, + stack_pointer_rtx)); + } + } + else + emit_insn(gen_movsi(frame_pointer_rtx, stack_pointer_rtx)); + } + + /* if (profile_flag || profile_block_flag) */ + emit_insn(gen_blockage()); +} + +void +thumb_expand_epilogue() +{ + HOST_WIDE_INT amount = (get_frame_size() + + current_function_outgoing_args_size); + int regno; + + if (arm_naked_function_p(current_function_decl)) + return; + + if (amount) + { + if (amount < 512) + emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx, + GEN_INT(amount))); + else + { + rtx reg = gen_rtx(REG, SImode, 3); /* Always free in the epilogue */ + + emit_insn(gen_movsi(reg, GEN_INT(amount))); + emit_insn(gen_addsi3(stack_pointer_rtx, stack_pointer_rtx, reg)); + } + /* if (profile_flag || profile_block_flag) */ + emit_insn(gen_blockage()); + } +} + +void +thumb_function_epilogue(FILE *f, int frame_size) +{ + /* ??? Probably not safe to set this here, since it assumes that a + function will be emitted as assembly immediately after we generate + RTL for it. This does not happen for inline functions. */ + return_used_this_function = 0; + current_function_has_far_jump = 0; +#if 0 /* TODO : comment not really needed */ + fprintf(f, "%s THUMB Epilogue\n", ASM_COMMENT_START); +#endif +} + +/* The bits which aren't usefully expanded as rtl. */ +char * +thumb_unexpanded_epilogue() +{ + int regno; + int live_regs_mask = 0; + int high_regs_pushed = 0; + int leaf_function = leaf_function_p(); + int had_to_push_lr; + + if (arm_naked_function_p(current_function_decl) + || return_used_this_function) + return ""; + + for (regno = 0; regno < 8; regno++) + if (regs_ever_live[regno] && !call_used_regs[regno]) + live_regs_mask |= 1 << regno; + + for (regno = 8; regno < 13; regno++) + { + if (regs_ever_live[regno] && !call_used_regs[regno]) + high_regs_pushed++; + } + + /* The prolog may have pushed some high registers to use as + work registers. eg the testuite file: + gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c + compiles to produce: + push {r4, r5, r6, r7, lr} + mov r7, r9 + mov r6, r8 + push {r6, r7} + as part of the prolog. We have to undo that pushing here. */ + + if (high_regs_pushed) + { + int mask = live_regs_mask; + int next_hi_reg; + int size; + int mode; + + /* If we can deduce the registers used from the function's return value. + This is more reliable that examining regs_ever_live[] because that + will be set if the register is ever used in the function, not just if + the register is used to hold a return value. */ + + if (current_function_return_rtx != 0) + { + mode = GET_MODE(current_function_return_rtx); + } + else + { + mode = DECL_MODE(DECL_RESULT(current_function_decl)); + } + + size = GET_MODE_SIZE(mode); + + /* Unless we are returning a type of size > 12 register r3 is available. */ + if (size < 13) + mask |= 1 << 3; + + if (mask == 0) + { + /* Oh dear! We have no low registers into which we can pop high registers! */ + + fatal("No low registers available for popping high registers"); + } + + for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++) + if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]) + break; + + while (high_regs_pushed) + { + /* Find low register(s) into which the high register(s) can be popped. */ + for (regno = 0; regno < 8; regno++) + { + if (mask & (1 << regno)) + high_regs_pushed--; + if (high_regs_pushed == 0) + break; + } + + mask &= (2 << regno) - 1; /* A noop if regno == 8 */ + + /* Pop the values into the low register(s). */ + thumb_pushpop(asm_out_file, mask, 0); + + /* Move the value(s) into the high registers. */ + for (regno = 0; regno < 8; regno++) + { + if (mask & (1 << regno)) + { + asm_fprintf(asm_out_file, "\tmov\t%s, %s\n", + reg_names[next_hi_reg], reg_names[regno]); + for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++) + if (regs_ever_live[next_hi_reg] && + !call_used_regs[next_hi_reg]) + break; + } + } + } + } + + had_to_push_lr = (live_regs_mask || !leaf_function || far_jump_used_p()); + + if (current_function_pretend_args_size == 0) + { + if (had_to_push_lr) + live_regs_mask |= 1 << PROGRAM_COUNTER; + + /* No argument registers were pushed, so just pop everything. */ + + if (live_regs_mask) + thumb_pushpop(asm_out_file, live_regs_mask, FALSE); + + /* We have either just popped the return address into the + PC or it is was kept in LR for the entire function or + it is still on the stack because we do not want to + return by doing a pop {pc}. */ + + if ((live_regs_mask & (1 << PROGRAM_COUNTER)) == 0) + thumb_exit(asm_out_file, LINK_REGISTER); + } + else + { + /* Pop everything but the return address. */ + live_regs_mask &= ~(1 << PROGRAM_COUNTER); + + if (live_regs_mask) + thumb_pushpop(asm_out_file, live_regs_mask, FALSE); + + if (had_to_push_lr) + { + /* Get the return address into a temporary register. */ + thumb_pushpop(asm_out_file, 1 << ARG_4_REGISTER, 0); + } + + /* Remove the argument registers that were pushed onto the stack. */ + asm_fprintf(asm_out_file, "\tadd\t%s, %s, #%d\n", + reg_names[STACK_POINTER], + reg_names[STACK_POINTER], + current_function_pretend_args_size); + + thumb_exit(asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER); + } + + return ""; +} + +/* Handle the case of a double word load into a low register from + a computed memory address. The computed address may involve a + register which is overwritten by the load. */ + +char * +thumb_load_double_from_address(rtx *operands) +{ + rtx addr; + rtx base; + rtx offset; + rtx arg1; + rtx arg2; + + if (GET_CODE(operands[0]) != REG) + fatal("thumb_load_double_from_address: destination is not a register"); + + if (GET_CODE(operands[1]) != MEM) + fatal("thumb_load_double_from_address: source is not a computed memory address"); + + /* Get the memory address. */ + + addr = XEXP(operands[1], 0); + + /* Work out how the memory address is computed. */ + + switch (GET_CODE(addr)) + { + case REG: + operands[2] = gen_rtx(MEM, SImode, plus_constant(XEXP(operands[1], 0), 4)); + + if (REGNO(operands[0]) == REGNO(addr)) + { + output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); + output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); + } + else + { + output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); + output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); + } + break; + + case CONST: + /* Compute
+ 4 for the high order load. */ + + operands[2] = gen_rtx(MEM, SImode, plus_constant(XEXP(operands[1], 0), 4)); + + output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); + output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); + break; + + case PLUS: + arg1 = XEXP(addr, 0); + arg2 = XEXP(addr, 1); + + if (CONSTANT_P(arg1)) + base = arg2, offset = arg1; + else + base = arg1, offset = arg2; + + if (GET_CODE(base) != REG) + fatal("thumb_load_double_from_address: base is not a register"); + + /* Catch the case of
= + */ + + if (GET_CODE(offset) == REG) + { + int reg_offset = REGNO(offset); + int reg_base = REGNO(base); + int reg_dest = REGNO(operands[0]); + + /* Add the base and offset registers together into the higher destination register. */ + + fprintf(asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address", + reg_names[ reg_dest + 1 ], + reg_names[ reg_base ], + reg_names[ reg_offset ], + ASM_COMMENT_START); + + /* Load the lower destination register from the address in the higher destination register. */ + + fprintf(asm_out_file, "\tldr\t%s,[%s, #0]\t\t%s created by thumb_load_double_from_address", + reg_names[ reg_dest ], + reg_names[ reg_dest + 1], + ASM_COMMENT_START); + + /* Load the higher destination register from its own address plus 4. */ + + fprintf(asm_out_file, "\tldr\t%s,[%s, #4]\t\t%s created by thumb_load_double_from_address", + reg_names[ reg_dest + 1 ], + reg_names[ reg_dest + 1 ], + ASM_COMMENT_START); + } + else + { + /* Compute
+ 4 for the high order load. */ + + operands[2] = gen_rtx(MEM, SImode, plus_constant(XEXP(operands[1], 0), 4)); + + /* If the computed address is held in the low order register + then load the high order register first, otherwise always + load the low order register first. */ + + if (REGNO(operands[0]) == REGNO(base)) + { + output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); + output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); + } + else + { + output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); + output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); + } + } + break; + + case LABEL_REF: + /* With no registers to worry about we can just load the value directly. */ + operands[2] = gen_rtx(MEM, SImode, plus_constant(XEXP(operands[1], 0), 4)); + + output_asm_insn("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands); + output_asm_insn("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands); + break; + + default: + debug_rtx(operands[1]); + fatal("thumb_load_double_from_address: Unhandled address calculation"); + break; + } + + return ""; +} + +char * +output_move_mem_multiple(int n, rtx *operands) +{ + rtx tmp; + + switch (n) + { + case 2: + if (REGNO(operands[2]) > REGNO(operands[3])) + { + tmp = operands[2]; + operands[2] = operands[3]; + operands[3] = tmp; + } + output_asm_insn("ldmia\t%1!, {%2, %3}", operands); + output_asm_insn("stmia\t%0!, {%2, %3}", operands); + break; + + case 3: + if (REGNO(operands[2]) > REGNO(operands[3])) + { + tmp = operands[2]; + operands[2] = operands[3]; + operands[3] = tmp; + } + if (REGNO(operands[3]) > REGNO(operands[4])) + { + tmp = operands[3]; + operands[3] = operands[4]; + operands[4] = tmp; + } + if (REGNO(operands[2]) > REGNO(operands[3])) + { + tmp = operands[2]; + operands[2] = operands[3]; + operands[3] = tmp; + } + output_asm_insn("ldmia\t%1!, {%2, %3, %4}", operands); + output_asm_insn("stmia\t%0!, {%2, %3, %4}", operands); + break; + + default: + abort(); + } + + return ""; +} + +static char *conds[] = +{ + "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc", + "hi", "ls", "ge", "lt", "gt", "le" +}; + +static char * +thumb_condition_code(rtx x, int invert) +{ + int val; + + switch (GET_CODE(x)) + { + case EQ: val = 0; break; + case NE: val = 1; break; + case GEU: val = 2; break; + case LTU: val = 3; break; + case GTU: val = 8; break; + case LEU: val = 9; break; + case GE: val = 10; break; + case LT: val = 11; break; + case GT: val = 12; break; + case LE: val = 13; break; + default: + abort(); + } + + return conds[val ^ invert]; +} + +void +thumb_print_operand(FILE *f, rtx x, int code) +{ + if (code) + { + switch (code) + { + case '@': + fputs(ASM_COMMENT_START, f); + return; + + case 'D': + if (x) + fputs(thumb_condition_code(x, 1), f); + return; + + case 'd': + if (x) + fputs(thumb_condition_code(x, 0), f); + return; + + /* An explanation of the 'Q', 'R' and 'H' register operands: + + In a pair of registers containing a DI or DF value the 'Q' + operand returns the register number of the register containing + the least signficant part of the value. The 'R' operand returns + the register number of the register containing the most + significant part of the value. + + The 'H' operand returns the higher of the two register numbers. + On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the + same as the 'Q' operand, since the most signficant part of the + value is held in the lower number register. The reverse is true + on systems where WORDS_BIG_ENDIAN is false. + + The purpose of these operands is to distinguish between cases + where the endian-ness of the values is important (for example + when they are added together), and cases where the endian-ness + is irrelevant, but the order of register operations is important. + For example when loading a value from memory into a register + pair, the endian-ness does not matter. Provided that the value + from the lower memory address is put into the lower numbered + register, and the value from the higher address is put into the + higher numbered register, the load will work regardless of whether + the value being loaded is big-wordian or little-wordian. The + order of the two register loads can matter however, if the address + of the memory location is actually held in one of the registers + being overwritten by the load. */ + case 'Q': + if (REGNO(x) > 15) + abort(); + fputs(reg_names[REGNO(x)], f); + return; + + case 'R': + if (REGNO(x) > 15) + abort(); + fputs(reg_names[REGNO(x) + 1], f); + return; + + case 'H': + if (REGNO(x) > 15) + abort(); + fputs(reg_names[REGNO(x) + 1], f); + return; + + case 'c': + /* We use 'c' operands with symbols for .vtinherit */ + if (GET_CODE(x) == SYMBOL_REF) + output_addr_const(f, x); + return; + + default: + abort(); + } + } + if (GET_CODE(x) == REG) + fputs(reg_names[REGNO(x)], f); + else if (GET_CODE(x) == MEM) + output_address(XEXP(x, 0)); + else if (GET_CODE(x) == CONST_INT) + { + fputc('#', f); + output_addr_const(f, x); + } + else + abort(); +} + +/* Decide whether a type should be returned in memory (true) + or in a register (false). This is called by the macro + RETURN_IN_MEMORY. */ + +int +thumb_return_in_memory(tree type) +{ + if (!AGGREGATE_TYPE_P(type)) + { + /* All simple types are returned in registers. */ + + return 0; + } + else if (int_size_in_bytes(type) > 4) + { + /* All structures/unions bigger than one word are returned in memory. */ + + return 1; + } + else if (TREE_CODE(type) == RECORD_TYPE) + { + tree field; + + /* For a struct the APCS says that we must return in a register if + every addressable element has an offset of zero. For practical + purposes this means that the structure can have at most one non- + bit-field element and that this element must be the first one in + the structure. */ + + /* Find the first field, ignoring non FIELD_DECL things which will + have been created by C++. */ + for (field = TYPE_FIELDS(type); + field && TREE_CODE(field) != FIELD_DECL; + field = TREE_CHAIN(field)) + continue; + + if (field == NULL) + return 0; /* An empty structure. Allowed by an extension to ANSI C. */ + + /* Now check the remaining fields, if any. */ + for (field = TREE_CHAIN(field); field; field = TREE_CHAIN(field)) + { + if (TREE_CODE(field) != FIELD_DECL) + continue; + + if (!DECL_BIT_FIELD_TYPE(field)) + return 1; + } + + return 0; + } + else if (TREE_CODE(type) == UNION_TYPE) + { + tree field; + + /* Unions can be returned in registers if every element is + integral, or can be returned in an integer register. */ + + for (field = TYPE_FIELDS(type); + field; + field = TREE_CHAIN(field)) + { + if (TREE_CODE(field) != FIELD_DECL) + continue; + + if (RETURN_IN_MEMORY(TREE_TYPE(field))) + return 1; + } + + return 0; + } + /* XXX Not sure what should be done for other aggregates, so put them in + memory. */ + return 1; +} + +void +thumb_override_options() +{ + if (structure_size_string != NULL) + { + int size = strtol(structure_size_string, NULL, 0); + + if (size == 8 || size == 32) + arm_structure_size_boundary = size; + else + warning("Structure size boundary can only be set to 8 or 32"); + } +} + +/* Return nonzero if ATTR is a valid attribute for DECL. + ATTRIBUTES are any existing attributes and ARGS are the arguments + supplied with ATTR. + + Supported attributes: + + naked: don't output any prologue or epilogue code, the user is assumed + to do the right thing. + */ +int +arm_valid_machine_decl_attribute(tree decl, tree attributes, tree attr, tree args) +{ + if (args != NULL_TREE) + return 0; + + if (is_attribute_p("naked", attr)) + return TREE_CODE(decl) == FUNCTION_DECL; + + return 0; +} + +/* s_register_operand is the same as register_operand, but it doesn't accept + (SUBREG (MEM)...). + + This function exists because at the time it was put in it led to better + code. SUBREG(MEM) always needs a reload in the places where + s_register_operand is used, and this seemed to lead to excessive + reloading. */ + +int +s_register_operand(rtx op, enum machine_mode mode) +{ + if (GET_MODE(op) != mode && mode != VOIDmode) + return 0; + + if (GET_CODE(op) == SUBREG) + op = SUBREG_REG(op); + + /* We don't consider registers whose class is NO_REGS + to be a register operand. */ + /* XXX might have to check for lo regs only for thumb ??? */ + return (GET_CODE(op) == REG + && (REGNO(op) >= FIRST_PSEUDO_REGISTER + || REGNO_REG_CLASS(REGNO(op)) != NO_REGS)); +} diff --git a/gcc/thumb.h b/gcc/thumb.h new file mode 100755 index 0000000..e5918a1 --- /dev/null +++ b/gcc/thumb.h @@ -0,0 +1,1137 @@ +/* Definitions of target machine for GNU compiler, for ARM/Thumb. + Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc. + The basis of this contribution was generated by + Richard Earnshaw, Advanced RISC Machines Ltd + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */ + +/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced + gcc hacker in their entirety. */ + +/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm + files, which will lead to many maintenance problems. These files are + likely missing all bug fixes made to the arm port since they diverged. */ + +/* ??? Many patterns in the md file accept operands that will require a + reload. These should be eliminated if possible by tightening the + predicates and/or constraints. This will give faster/smaller code. */ + +/* ??? There is no pattern for the TST instuction. Check for other unsupported + instructions. */ + +#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr); + +#define ARM_FLAG_THUMB 0x1000 /* same as in arm.h */ +#define THUMB_FLAG_CALLER_SUPER_INTERWORKING 0x80000 + +/* Nonzero if all call instructions should be indirect. */ +#define ARM_FLAG_LONG_CALLS (0x10000) /* same as in arm.h */ + + +/* Run-time compilation parameters selecting different hardware/software subsets. */ +extern int target_flags; +#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */ +#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB) + +/* Set if calls via function pointers should assume that their + destination is non-Thumb aware. */ +#define TARGET_CALLER_INTERWORKING \ + (target_flags & THUMB_FLAG_CALLER_SUPER_INTERWORKING) + +#define TARGET_LONG_CALLS (target_flags & ARM_FLAG_LONG_CALLS) + +/* SUBTARGET_SWITCHES is used to add flags on a per-config basis. */ +#ifndef SUBTARGET_SWITCHES +#define SUBTARGET_SWITCHES +#endif + +#define TARGET_SWITCHES \ +{ \ + {"thumb-interwork", ARM_FLAG_THUMB}, \ + {"no-thumb-interwork", -ARM_FLAG_THUMB}, \ + {"caller-super-interworking", THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \ + {"no-caller-super-interworking", -THUMB_FLAG_CALLER_SUPER_INTERWORKING}, \ + {"long-calls", ARM_FLAG_LONG_CALLS, \ + "Generate all call instructions as indirect calls"}, \ + {"no-long-calls", -ARM_FLAG_LONG_CALLS, ""}, \ + SUBTARGET_SWITCHES \ + {"", TARGET_DEFAULT} \ +} + +#define TARGET_OPTIONS \ +{ \ + { "structure-size-boundary=", & structure_size_string }, \ +} + +#define REGISTER_PREFIX "" + +#define CAN_DEBUG_WITHOUT_FP 1 + +#define ASM_APP_ON "" +#define ASM_APP_OFF "\t.code\t16\n" + +/* Output a gap. In fact we fill it with nulls. */ +#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \ + fprintf ((STREAM), "\t.space\t%u\n", (NBYTES)) + +/* This is how to output an assembler line + that says to advance the location counter + to a multiple of 2**LOG bytes. */ + +#ifdef OLD_ASM + +#define ASM_OUTPUT_ALIGN(STREAM,LOG) \ +{ \ + if ((LOG) > 0) \ + fprintf (STREAM, "\t.align\t%d\n", (LOG)); \ +} + +#else + +#define ASM_OUTPUT_ALIGN(STREAM,LOG) \ +{ \ + if ((LOG) > 0) \ + fprintf (STREAM, "\t.align\t%d, 0\n", (LOG)); \ +} + +#endif + +/* Output a common block */ +#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \ + (fprintf ((STREAM), "\t.comm\t"), \ + assemble_name ((STREAM), (NAME)), \ + fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE))) + +#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \ + sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM)) + +/* This is how to output an internal numbered label where + PREFIX is the class of label and NUM is the number within the class. */ +#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \ + fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM)) + +/* This is how to output a label which precedes a jumptable. Since + instructions are 2 bytes, we need explicit alignment here. */ + +#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \ + do { \ + ASM_OUTPUT_ALIGN (FILE, 2); \ + ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \ + } while (0) + +/* This says how to define a local common symbol (ie, not visible to + linker). */ +#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \ + (fprintf((STREAM),"\n\t.lcomm\t"), \ + assemble_name((STREAM),(NAME)), \ + fprintf((STREAM),",%u\n",(SIZE))) + +/* Output a reference to a label. */ +#define ASM_OUTPUT_LABELREF(STREAM,NAME) \ + fprintf ((STREAM), "%s", (NAME)) + +/* This is how to output an assembler line for a numeric constant byte. */ +#define ASM_OUTPUT_BYTE(STREAM,VALUE) \ + fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE)) + +#define ASM_OUTPUT_INT(STREAM,VALUE) \ +{ \ + fprintf (STREAM, "\t.word\t"); \ + output_addr_const (STREAM, (VALUE)); \ + fprintf (STREAM, "\n"); \ +} + +#define ASM_OUTPUT_SHORT(STREAM,VALUE) \ +{ \ + fprintf (STREAM, "\t.short\t"); \ + output_addr_const (STREAM, (VALUE)); \ + fprintf (STREAM, "\n"); \ +} + +#define ASM_OUTPUT_CHAR(STREAM,VALUE) \ +{ \ + fprintf (STREAM, "\t.byte\t"); \ + output_addr_const (STREAM, (VALUE)); \ + fprintf (STREAM, "\n"); \ +} + +#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \ +do { char dstr[30]; \ + long l[2]; \ + REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \ + REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \ + fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \ + l[1], ASM_COMMENT_START, dstr); \ + } while (0) + +#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \ +do { char dstr[30]; \ + long l; \ + REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \ + REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \ + fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \ + ASM_COMMENT_START, dstr); \ + } while (0); + +/* Define results of standard character escape sequences. */ +#define TARGET_BELL 007 +#define TARGET_BS 010 +#define TARGET_TAB 011 +#define TARGET_NEWLINE 012 +#define TARGET_VT 013 +#define TARGET_FF 014 +#define TARGET_CR 015 + +/* This is how to output a string. */ +#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \ +do { \ + register int i, c, len = (LEN), cur_pos = 17; \ + register unsigned char *string = (unsigned char *)(STRING); \ + fprintf ((STREAM), "\t.ascii\t\""); \ + for (i = 0; i < len; i++) \ + { \ + register int c = string[i]; \ + \ + switch (c) \ + { \ + case '\"': \ + case '\\': \ + putc ('\\', (STREAM)); \ + putc (c, (STREAM)); \ + cur_pos += 2; \ + break; \ + \ + case TARGET_NEWLINE: \ + fputs ("\\n", (STREAM)); \ + if (i+1 < len \ + && (((c = string[i+1]) >= '\040' && c <= '~') \ + || c == TARGET_TAB)) \ + cur_pos = 32767; /* break right here */ \ + else \ + cur_pos += 2; \ + break; \ + \ + case TARGET_TAB: \ + fputs ("\\t", (STREAM)); \ + cur_pos += 2; \ + break; \ + \ + case TARGET_FF: \ + fputs ("\\f", (STREAM)); \ + cur_pos += 2; \ + break; \ + \ + case TARGET_BS: \ + fputs ("\\b", (STREAM)); \ + cur_pos += 2; \ + break; \ + \ + case TARGET_CR: \ + fputs ("\\r", (STREAM)); \ + cur_pos += 2; \ + break; \ + \ + default: \ + if (c >= ' ' && c < 0177) \ + { \ + putc (c, (STREAM)); \ + cur_pos++; \ + } \ + else \ + { \ + fprintf ((STREAM), "\\%03o", c); \ + cur_pos += 4; \ + } \ + } \ + \ + if (cur_pos > 72 && i+1 < len) \ + { \ + cur_pos = 17; \ + fprintf ((STREAM), "\"\n\t.ascii\t\""); \ + } \ + } \ + fprintf ((STREAM), "\"\n"); \ +} while (0) + +/* Output and Generation of Labels */ +#define ASM_OUTPUT_LABEL(STREAM,NAME) \ + (assemble_name ((STREAM), (NAME)), \ + fprintf ((STREAM), ":\n")) + +#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \ + (fprintf ((STREAM), "\t.globl\t"), \ + assemble_name ((STREAM), (NAME)), \ + fputc ('\n', (STREAM))) + +/* Construct a private name. */ +#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \ + ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \ + sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER))) + +/* Switch to the text or data segment. */ +#define TEXT_SECTION_ASM_OP ".text" +#define DATA_SECTION_ASM_OP ".data" +#define BSS_SECTION_ASM_OP ".bss" + +/* The assembler's names for the registers. */ +#ifndef REGISTER_NAMES +#define REGISTER_NAMES \ +{ \ + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \ + "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \ +} +#endif + +#ifndef ADDITIONAL_REGISTER_NAMES +#define ADDITIONAL_REGISTER_NAMES \ +{ \ + {"a1", 0}, \ + {"a2", 1}, \ + {"a3", 2}, \ + {"a4", 3}, \ + {"v1", 4}, \ + {"v2", 5}, \ + {"v3", 6}, \ + {"v4", 7}, \ + {"v5", 8}, \ + {"v6", 9}, \ + {"sb", 9}, \ + {"v7", 10}, \ + {"r10", 10}, /* sl */ \ + {"r11", 11}, /* fp */ \ + {"r12", 12}, /* ip */ \ + {"r13", 13}, /* sp */ \ + {"r14", 14}, /* lr */ \ + {"r15", 15} /* pc */ \ +} +#endif + +/* The assembler's parentheses characters. */ +#define ASM_OPEN_PAREN "(" +#define ASM_CLOSE_PAREN ")" + +#ifndef ASM_COMMENT_START +#define ASM_COMMENT_START "@" +#endif + +/* Output an element of a dispatch table. */ +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \ + fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE)) + +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \ + fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE)) + +/* Storage Layout */ + +#define FLOAT_WORDS_BIG_ENDIAN 1 + +#define BITS_PER_UNIT 8 +#define BITS_PER_WORD 32 + +#define UNITS_PER_WORD 4 + +#define POINTER_SIZE 32 + +#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \ +{ \ + if (GET_MODE_CLASS (MODE) == MODE_INT \ + && GET_MODE_SIZE (MODE) < 4) \ + { \ + (UNSIGNEDP) = 1; \ + (MODE) = SImode; \ + } \ +} + +#define PARM_BOUNDARY 32 +#define STACK_BOUNDARY 32 + +#define FUNCTION_BOUNDARY 32 +#define BIGGEST_ALIGNMENT 32 + +/* Make strings word-aligned so strcpy from constants will be faster. */ +#define CONSTANT_ALIGNMENT(EXP, ALIGN) \ + (TREE_CODE (EXP) == STRING_CST \ + && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN)) + +#define EMPTY_FIELD_BOUNDARY 32 + +#define STRUCTURE_SIZE_BOUNDARY 32 + +/* Used when parsing command line option -mstructure_size_boundary. */ +extern char * structure_size_string; + +#define STRICT_ALIGNMENT 1 + +#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT + + +/* Layout of Source Language Data Types */ + +#define TARGET_BELL 007 +#define TARGET_BS 010 +#define TARGET_TAB 011 +#define TARGET_NEWLINE 012 +#define TARGET_VT 013 +#define TARGET_FF 014 +#define TARGET_CR 015 + + +/* Register Usage */ + +/* Note there are 16 hard registers on the Thumb. We invent a 17th register + which is assigned to ARG_POINTER_REGNUM, but this is later removed by + elimination passes in the compiler. */ +#define FIRST_PSEUDO_REGISTER 17 + +/* ??? This is questionable. */ +#define FIXED_REGISTERS \ +{ \ + 0,0,0,0, \ + 0,0,0,0, \ + 0,0,0,1, \ + 0,1,1,1,1 \ +} + +/* ??? This is questionable. */ +#define CALL_USED_REGISTERS \ +{ \ + 1,1,1,1, \ + 0,0,0,0, \ + 0,0,0,1, \ + 1,1,1,1,1 \ +} + +#define HARD_REGNO_NREGS(REGNO,MODE) \ + ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \ + / UNITS_PER_WORD) + +/* ??? Probably should only allow DImode/DFmode in even numbered registers. */ +#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1) + +#define MODES_TIEABLE_P(MODE1,MODE2) 1 + +/* The NOARG_LO_REGS class is the set of LO_REGS that are not used for passing + arguments to functions. These are the registers that are available for + spilling during reload. The code in reload1.c:init_reload() will detect this + class and place it into 'reload_address_base_reg_class'. */ + +enum reg_class +{ + NO_REGS, + NONARG_LO_REGS, + LO_REGS, + STACK_REG, + BASE_REGS, + HI_REGS, + ALL_REGS, + LIM_REG_CLASSES +}; + +#define GENERAL_REGS ALL_REGS + +#define N_REG_CLASSES (int) LIM_REG_CLASSES + +#define REG_CLASS_NAMES \ +{ \ + "NO_REGS", \ + "NONARG_LO_REGS", \ + "LO_REGS", \ + "STACK_REG", \ + "BASE_REGS", \ + "HI_REGS", \ + "ALL_REGS" \ +} + +#define REG_CLASS_CONTENTS \ +{ \ + 0x00000, \ + 0x000f0, \ + 0x000ff, \ + 0x02000, \ + 0x020ff, \ + 0x0ff00, \ + 0x1ffff, \ +} + +#define REGNO_REG_CLASS(REGNO) \ + ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \ + : (REGNO) < 8 ? ((REGNO) < 4 ? LO_REGS \ + : NONARG_LO_REGS) \ + : HI_REGS) + +#define BASE_REG_CLASS BASE_REGS + +#define MODE_BASE_REG_CLASS(MODE) \ + ((MODE) != QImode && (MODE) != HImode \ + ? BASE_REGS : LO_REGS) + +#define INDEX_REG_CLASS LO_REGS + +/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows + registers explicitly used in the rtl to be used as spill registers + but prevents the compiler from extending the lifetime of these + registers. */ + +#define SMALL_REGISTER_CLASSES 1 + +#define REG_CLASS_FROM_LETTER(C) \ + ((C) == 'l' ? LO_REGS \ + : (C) == 'h' ? HI_REGS \ + : (C) == 'b' ? BASE_REGS \ + : (C) == 'k' ? STACK_REG \ + : NO_REGS) + +#define REGNO_OK_FOR_BASE_P(REGNO) \ + ((REGNO) < 8 \ + || (REGNO) == STACK_POINTER_REGNUM \ + || (unsigned) reg_renumber[REGNO] < 8 \ + || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM) + +#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \ + ((REGNO) < 8 \ + || (unsigned) reg_renumber[REGNO] < 8 \ + || (GET_MODE_SIZE (MODE) >= 4 \ + && ((REGNO) == STACK_POINTER_REGNUM \ + || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM))) + +#define REGNO_OK_FOR_INDEX_P(REGNO) \ + ((REGNO) < 8 \ + || (unsigned) reg_renumber[REGNO] < 8) + +/* ??? This looks suspiciously wrong. */ +/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save + lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class) + and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS + says to allocate a LO_REGS spill instead, then this mismatch gives an + abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS + to be LO_REGS instead of BASE_REGS. It is not clear what affect this + change would have. */ +/* ??? This looks even more suspiciously wrong. PREFERRED_RELOAD_CLASS + must always return a strict subset of the input class. Just blindly + returning LO_REGS is safe only if the input class is a superset of LO_REGS, + but there is no check for this. Added another exception for NONARG_LO_REGS + because it is not a superset of LO_REGS. */ +/* ??? We now use NONARG_LO_REGS for caller_save_spill_class, so the + comments about BASE_REGS are now obsolete. */ +#define PREFERRED_RELOAD_CLASS(X,CLASS) \ + ((CLASS) == BASE_REGS || (CLASS) == NONARG_LO_REGS ? (CLASS) \ + : LO_REGS) +/* + ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \ + && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \ + : (GET_CODE ((X)) == CONST_INT \ + && (HOST_WIDE_UINT) INTVAL ((X)) > 255) ? NO_REGS \ + : LO_REGS) */ + +/* Must leave BASE_REGS and NONARG_LO_REGS reloads alone, see comment + above. */ +#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \ + ((CLASS) != LO_REGS && (CLASS) != BASE_REGS && (CLASS) != NONARG_LO_REGS \ + ? ((true_regnum (X) == -1 ? LO_REGS \ + : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \ + : NO_REGS)) \ + : NO_REGS) + +#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE)) + +int thumb_shiftable_const (); + +#define CONST_OK_FOR_LETTER_P(VAL,C) \ + ((C) == 'I' ? (HOST_WIDE_UINT) (VAL) < 256 \ + : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \ + : (C) == 'K' ? thumb_shiftable_const (VAL) \ + : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \ + : (C) == 'M' ? ((HOST_WIDE_UINT) (VAL) < 1024 \ + && ((VAL) & 3) == 0) \ + : (C) == 'N' ? ((HOST_WIDE_UINT) (VAL) < 32) \ + : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \ + : 0) + +#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0 + +#define EXTRA_CONSTRAINT(X,C) \ + ((C) == 'Q' ? (GET_CODE (X) == MEM \ + && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0) + +/* Stack Layout and Calling Conventions */ + +#define STACK_GROWS_DOWNWARD 1 + +/* #define FRAME_GROWS_DOWNWARD 1 */ + +/* #define ARGS_GROW_DOWNWARD 1 */ + +#define STARTING_FRAME_OFFSET 0 + +#define FIRST_PARM_OFFSET(FNDECL) 0 + +/* Registers that address the stack frame */ + +#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */ + +#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */ + +#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */ + +#define STATIC_CHAIN_REGNUM 9 + +#define FRAME_POINTER_REQUIRED 0 + +#define ELIMINABLE_REGS \ +{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ + {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \ + {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}} + +/* On the Thumb we always want to perform the eliminations as we + actually only have one real register pointing to the stashed + variables: the stack pointer, and we never use the frame pointer. */ +#define CAN_ELIMINATE(FROM,TO) 1 + +/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */ +#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \ +{ \ + (OFFSET) = 0; \ + if ((FROM) == ARG_POINTER_REGNUM) \ + { \ + int count_regs = 0; \ + int regno; \ + (OFFSET) += get_frame_size (); \ + for (regno = 8; regno < 13; regno++) \ + if (regs_ever_live[regno] && ! call_used_regs[regno]) \ + count_regs++; \ + if (count_regs) \ + (OFFSET) += 4 * count_regs; \ + count_regs = 0; \ + for (regno = 0; regno < 8; regno++) \ + if (regs_ever_live[regno] && ! call_used_regs[regno]) \ + count_regs++; \ + if (count_regs || ! leaf_function_p () || far_jump_used_p()) \ + (OFFSET) += 4 * (count_regs + 1); \ + } \ + if ((TO) == STACK_POINTER_REGNUM) \ + (OFFSET) += current_function_outgoing_args_size; \ +} + +/* Passing Arguments on the stack */ + +#define PROMOTE_PROTOTYPES 1 + +#define ACCUMULATE_OUTGOING_ARGS 1 + +#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0 + +#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \ + ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \ + : 0) + +#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \ + (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \ + ? int_size_in_bytes (TYPE) \ + : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \ + ? 4 - (CUM) / 4 : 0) + +#define CUMULATIVE_ARGS int + +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \ + ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0) + +#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \ + (CUM) += ((((MODE) == BLKmode) \ + ? int_size_in_bytes (TYPE) \ + : GET_MODE_SIZE (MODE)) + 3) & ~3 + +#define FUNCTION_ARG_REGNO_P(REGNO) \ + ((REGNO) >=0 && (REGNO) <= 3) + +#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0) + +#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0) + +#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0) + + /* How large values are returned */ +/* A C expression which can inhibit the returning of certain function values + in registers, based on the type of value. */ +#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE) + +/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return + values must be in memory. On the ARM, they need only do so if larger + than a word, or if they contain elements offset from zero in the struct. */ +#define DEFAULT_PCC_STRUCT_RETURN 0 + + +#define STRUCT_VALUE_REGNUM 0 + +#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE)) + +#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE)) + +/* Implementing the Varargs Macros */ + +#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \ +{ \ + extern int current_function_anonymous_args; \ + current_function_anonymous_args = 1; \ + if ((CUM) < 16) \ + (PRETEND_SIZE) = 16 - (CUM); \ +} + +/* Trampolines for nested functions */ + +/* Output assembler code for a block containing the constant parts of + a trampoline, leaving space for the variable parts. + + On the Thumb we always switch into ARM mode to execute the trampoline. + Why - because it is easier. This code will always be branched to via + a BX instruction and since the compiler magically generates the address + of the function the linker has no opportunity to ensure that the + bottom bit is set. Thus the processor will be in ARM mode when it + reaches this code. So we duplicate the ARM trampoline code and add + a switch into Thumb mode as well. + + On the ARM, (if r8 is the static chain regnum, and remembering that + referencing pc adds an offset of 8) the trampoline looks like: + ldr r8, [pc, #0] + ldr pc, [pc] + .word static chain value + .word function's address + ??? FIXME: When the trampoline returns, r8 will be clobbered. */ +#define TRAMPOLINE_TEMPLATE(FILE) \ +{ \ + fprintf ((FILE), "\t.code 32\n"); \ + fprintf ((FILE), ".Ltrampoline_start:\n"); \ + fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \ + reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \ + fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \ + REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \ + REGISTER_PREFIX, REGISTER_PREFIX); \ + fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \ + fprintf ((FILE), "\t.word\t0\n"); \ + fprintf ((FILE), "\t.word\t0\n"); \ + fprintf ((FILE), "\t.code 16\n"); \ +} + +/* Length in units of the trampoline for entering a nested function. */ +#define TRAMPOLINE_SIZE 24 + +/* Alignment required for a trampoline in units. */ +#define TRAMPOLINE_ALIGN 4 + +#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \ +{ \ + emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \ + (CHAIN)); \ + emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \ + (FNADDR)); \ +} + + +/* Implicit Calls to Library Routines */ + +#define TARGET_MEM_FUNCTIONS 1 + +#define OVERRIDE_OPTIONS thumb_override_options () + + +/* Addressing Modes */ + +#define HAVE_POST_INCREMENT 1 + +#define CONSTANT_ADDRESS_P(X) \ + (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X)) + +#define MAX_REGS_PER_ADDRESS 2 + +#ifdef REG_OK_STRICT + +#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) +#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X)) + +#define REG_MODE_OK_FOR_BASE_P(X,MODE) \ + REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE) + +#else /* REG_OK_STRICT */ + +#define REG_OK_FOR_BASE_P(X) \ + (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \ + || (X) == arg_pointer_rtx \ + || REGNO (X) >= FIRST_PSEUDO_REGISTER) + +#define REG_MODE_OK_FOR_BASE_P(X,MODE) \ + (REGNO (X) < 8 \ + || REGNO (X) >= FIRST_PSEUDO_REGISTER \ + || (GET_MODE_SIZE (MODE) >= 4 \ + && (REGNO (X) == STACK_POINTER_REGNUM \ + || (X) == arg_pointer_rtx))) + +#define REG_OK_FOR_INDEX_P(X) \ + (REGNO (X) < 8 \ + || REGNO (X) >= FIRST_PSEUDO_REGISTER) + +#endif /* REG_OK_STRICT */ + +/* In a REG+REG address, both must be INDEX registers. */ +#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X) + +#define LEGITIMATE_OFFSET(MODE,VAL) \ +(GET_MODE_SIZE (MODE) == 1 ? ((HOST_WIDE_UINT) (VAL) < 32) \ + : GET_MODE_SIZE (MODE) == 2 ? ((HOST_WIDE_UINT) (VAL) < 64 \ + && ((VAL) & 1) == 0) \ + : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \ + && ((VAL) & 3) == 0)) + +/* The AP may be eliminated to either the SP or the FP, so we use the + least common denominator, e.g. SImode, and offsets from 0 to 64. */ + +/* ??? Verify whether the above is the right approach. */ + +/* ??? Also, the FP may be eliminated to the SP, so perhaps that + needs special handling also. */ + +/* ??? Look at how the mips16 port solves this problem. It probably uses + better ways to solve some of these problems. */ + +/* Although it is not incorrect, we don't accept QImode and HImode + addresses based on the frame pointer or arg pointer until the reload pass starts. + This is so that eliminating such addresses into stack based ones + won't produce impossible code. */ +#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \ +{ \ + /* ??? Not clear if this is right. Experiment. */ \ + if (GET_MODE_SIZE (MODE) < 4 \ + && ! (reload_in_progress || reload_completed) \ + && (reg_mentioned_p (frame_pointer_rtx, X) \ + || reg_mentioned_p (arg_pointer_rtx, X) \ + || reg_mentioned_p (virtual_incoming_args_rtx, X) \ + || reg_mentioned_p (virtual_outgoing_args_rtx, X) \ + || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \ + || reg_mentioned_p (virtual_stack_vars_rtx, X))) \ + ; \ + /* Accept any base register. SP only in SImode or larger. */ \ + else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \ + goto WIN; \ + /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \ + else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \ + && CONSTANT_POOL_ADDRESS_P (X)) \ + goto WIN; \ + /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \ + else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \ + && (GET_CODE (X) == LABEL_REF \ + || (GET_CODE (X) == CONST \ + && GET_CODE (XEXP (X, 0)) == PLUS \ + && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \ + && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \ + goto WIN; \ + /* Post-inc indexing only supported for SImode and larger. */ \ + else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \ + && GET_CODE (XEXP (X, 0)) == REG \ + && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \ + goto WIN; \ + else if (GET_CODE (X) == PLUS) \ + { \ + /* REG+REG address can be any two index registers. */ \ + /* ??? REG+REG addresses have been completely disabled before \ + reload completes, because we do not have enough available \ + reload registers. We only have 3 guaranteed reload registers \ + (NONARG_LO_REGS - the frame pointer), but we need at least 4 \ + to support REG+REG addresses. We have left them enabled after \ + reload completes, in the hope that reload_cse_regs and related \ + routines will be able to create them after the fact. It is \ + probably possible to support REG+REG addresses with additional \ + reload work, but I do not not have enough time to attempt such \ + a change at this time. */ \ + /* ??? Normally checking the mode here is wrong, since it isn't \ + impossible to use REG+REG with DFmode. However, the movdf \ + pattern requires offsettable addresses, and REG+REG is not \ + offsettable, so it must be rejected somehow. Trying to use \ + 'o' fails, because offsettable_address_p does a QImode check. \ + QImode is not valid for stack addresses, and has a smaller \ + range for non-stack bases, and this causes valid addresses \ + to be rejected. So we just eliminate REG+REG here by checking \ + the mode. */ \ + /* We also disallow FRAME+REG addressing since we know that FRAME \ + will be replaced with STACK, and SP relative addressing only \ + permits SP+OFFSET. */ \ + if (GET_MODE_SIZE (MODE) <= 4 \ + /* ??? See comment above. */ \ + && reload_completed \ + && GET_CODE (XEXP (X, 0)) == REG \ + && GET_CODE (XEXP (X, 1)) == REG \ + && XEXP (X, 0) != frame_pointer_rtx \ + && XEXP (X, 1) != frame_pointer_rtx \ + && XEXP (X, 0) != virtual_stack_vars_rtx \ + && XEXP (X, 1) != virtual_stack_vars_rtx \ + && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \ + && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \ + goto WIN; \ + /* REG+const has 5-7 bit offset for non-SP registers. */ \ + else if (GET_CODE (XEXP (X, 0)) == REG \ + && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \ + || XEXP (X, 0) == arg_pointer_rtx) \ + && GET_CODE (XEXP (X, 1)) == CONST_INT \ + && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \ + goto WIN; \ + /* REG+const has 10 bit offset for SP, but only SImode and \ + larger is supported. */ \ + /* ??? Should probably check for DI/DFmode overflow here \ + just like GO_IF_LEGITIMATE_OFFSET does. */ \ + else if (GET_CODE (XEXP (X, 0)) == REG \ + && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \ + && GET_MODE_SIZE (MODE) >= 4 \ + && GET_CODE (XEXP (X, 1)) == CONST_INT \ + && (HOST_WIDE_UINT) INTVAL (XEXP (X, 1)) < 1024 \ + && (INTVAL (XEXP (X, 1)) & 3) == 0) \ + goto WIN; \ + } \ +} + +/* ??? If an HImode FP+large_offset address is converted to an HImode + SP+large_offset address, then reload won't know how to fix it. It sees + only that SP isn't valid for HImode, and so reloads the SP into an index + register, but the resulting address is still invalid because the offset + is too big. We fix it here instead by reloading the entire address. */ +/* We could probably achieve better results by defining PROMOTE_MODE to help + cope with the variances between the Thumb's signed and unsigned byte and + halfword load instructions. */ +#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \ +{ \ + if (GET_CODE (X) == PLUS \ + && GET_MODE_SIZE (MODE) < 4 \ + && GET_CODE (XEXP (X, 0)) == REG \ + && XEXP (X, 0) == stack_pointer_rtx \ + && GET_CODE (XEXP (X, 1)) == CONST_INT \ + && ! LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \ + { \ + rtx orig_X = X; \ + X = copy_rtx (X); \ + push_reload (orig_X, NULL_RTX, &X, NULL, \ + BASE_REG_CLASS, \ + Pmode, VOIDmode, 0, 0, OPNUM, TYPE); \ + goto WIN; \ + } \ +} + +#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) + +#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) + +#define LEGITIMATE_CONSTANT_P(X) \ + (GET_CODE (X) == CONST_INT \ + || GET_CODE (X) == CONST_DOUBLE \ + || CONSTANT_ADDRESS_P (X)) + + +/* Condition Code Status */ + +#define NOTICE_UPDATE_CC(EXP,INSN) \ +{ \ + if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \ + CC_STATUS_INIT; \ +} + + +/* Describing Relative Costs of Operations */ + +#define SLOW_BYTE_ACCESS 0 + +#define SLOW_UNALIGNED_ACCESS 1 + +#define NO_FUNCTION_CSE 1 + +#define NO_RECURSIVE_FUNCTION_CSE 1 + +#define REGISTER_MOVE_COST(FROM,TO) \ + (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2) + +#define MEMORY_MOVE_COST(M,CLASS,IN) \ + ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2)) + +/* This will allow better space optimization when compiling with -O */ +#define BRANCH_COST (optimize > 1 ? 1 : 0) + +#define RTX_COSTS(X,CODE,OUTER) \ + case MULT: \ + if (GET_CODE (XEXP (X, 1)) == CONST_INT) \ + { \ + int cycles = 0; \ + HOST_WIDE_UINT i = INTVAL (XEXP (X, 1)); \ + while (i) \ + { \ + i >>= 2; \ + cycles++; \ + } \ + return COSTS_N_INSNS (2) + cycles; \ + } \ + return COSTS_N_INSNS (1) + 16; \ + case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \ + case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \ + return COSTS_N_INSNS (1); \ + case SET: \ + return (COSTS_N_INSNS (1) \ + + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \ + + GET_CODE (SET_DEST (X)) == MEM)) + +#define CONST_COSTS(X,CODE,OUTER) \ + case CONST_INT: \ + if ((OUTER) == SET) \ + { \ + if ((HOST_WIDE_UINT) INTVAL (X) < 256) \ + return 0; \ + if (thumb_shiftable_const (INTVAL (X))) \ + return COSTS_N_INSNS (2); \ + return COSTS_N_INSNS (3); \ + } \ + else if (OUTER == PLUS \ + && INTVAL (X) < 256 && INTVAL (X) > -256) \ + return 0; \ + else if (OUTER == COMPARE \ + && (HOST_WIDE_UINT) INTVAL (X) < 256) \ + return 0; \ + else if (OUTER == ASHIFT || OUTER == ASHIFTRT \ + || OUTER == LSHIFTRT) \ + return 0; \ + return COSTS_N_INSNS (2); \ + case CONST: \ + case CONST_DOUBLE: \ + case LABEL_REF: \ + case SYMBOL_REF: \ + return COSTS_N_INSNS(3); + +#define ADDRESS_COST(X) \ + ((GET_CODE (X) == REG \ + || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \ + && GET_CODE (XEXP (X, 1)) == CONST_INT)) \ + ? 1 : 2) + + +/* Position Independent Code */ + +#define PRINT_OPERAND(STREAM,X,CODE) \ + thumb_print_operand((STREAM), (X), (CODE)) + +#define PRINT_OPERAND_ADDRESS(STREAM,X) \ +{ \ + if (GET_CODE ((X)) == REG) \ + fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \ + else if (GET_CODE ((X)) == POST_INC) \ + fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \ + else if (GET_CODE ((X)) == PLUS) \ + { \ + if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \ + fprintf ((STREAM), "[%s, #%d]", \ + reg_names[REGNO (XEXP ((X), 0))], \ + (int) INTVAL (XEXP ((X), 1))); \ + else \ + fprintf ((STREAM), "[%s, %s]", \ + reg_names[REGNO (XEXP ((X), 0))], \ + reg_names[REGNO (XEXP ((X), 1))]); \ + } \ + else \ + output_addr_const ((STREAM), (X)); \ +} + +#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@' || ((CODE) == '_')) + +#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \ + asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)]) + +#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \ + fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)]) + +#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \ + final_prescan_insn((INSN)) + +/* Controlling Debugging Information Format */ +#define DBX_REGISTER_NUMBER(REGNO) (REGNO) + +/* Specific options for DBX Output */ + +#define DBX_DEBUGGING_INFO 1 + +#define DEFAULT_GDB_EXTENSIONS 1 + + +/* Cross Compilation and Floating Point */ + +#define REAL_ARITHMETIC + + +/* Miscellaneous Parameters */ + +#define PREDICATE_CODES \ + {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}}, + +#define CASE_VECTOR_MODE Pmode + +#define WORD_REGISTER_OPERATIONS + +#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND + +#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR + +#define EASY_DIV_EXPR TRUNC_DIV_EXPR + +#define MOVE_MAX 4 + +#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1 + +#define STORE_FLAG_VALUE 1 + +#define Pmode SImode + +#define FUNCTION_MODE SImode + +#define NO_DOLLAR_IN_LABEL 1 + +#define HAVE_ATEXIT + +/* The literal pool needs to reside in the text area due to the + limited PC addressing range: */ +#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN)) + +#include + +enum machine_mode; + +struct rtx_def; +typedef struct rtx_def *rtx; + +union tree_node; +typedef union tree_node *tree; + +extern int thumb_cmp_operand(rtx, enum machine_mode); +extern void thumb_reorg(rtx first); +extern void thumb_expand_movstrqi(rtx *); +extern void thumb_reload_out_si(rtx); +extern void final_prescan_insn(rtx); +extern int far_jump_used_p(); +extern void thumb_function_prologue(FILE *, int); +extern void thumb_expand_prologue(); +extern void thumb_function_epilogue(FILE *, int); +extern void thumb_expand_epilogue(); +extern char *thumb_unexpanded_epilogue(); +extern char *thumb_load_double_from_address(); +extern char *output_move_mem_multiple(); +extern void thumb_print_operand(FILE *, rtx, int); +extern int thumb_return_in_memory(tree); +extern void thumb_override_options(); +extern int arm_valid_machine_decl_attribute(tree, tree, tree, tree); +extern int s_register_operand(rtx, enum machine_mode); diff --git a/gcc/thumb.md b/gcc/thumb.md new file mode 100755 index 0000000..fe37c9c --- /dev/null +++ b/gcc/thumb.md @@ -0,0 +1,1157 @@ +;; thumb.md Machine description for ARM/Thumb processors +;; Copyright (C) 1996, 1997, 1998, 2002 Free Software Foundation, Inc. +;; The basis of this contribution was generated by +;; Richard Earnshaw, Advanced RISC Machines Ltd + +;; This file is part of GNU CC. + +;; GNU CC is free software; you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation; either version 2, or (at your option) +;; any later version. + +;; GNU CC is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GNU CC; see the file COPYING. If not, write to +;; the Free Software Foundation, 59 Temple Place - Suite 330, +;; Boston, MA 02111-1307, USA. + +;; LENGTH of an instruction is 2 bytes +(define_attr "length" "" (const_int 2)) + +;; CONDS is set to UNCHANGED when an insn does not affect the condition codes +;; Most insns change the condition codes +(define_attr "conds" "changed,unchanged" (const_string "changed")) + +;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a +;; distant label. +(define_attr "far_jump" "yes,no" (const_string "no")) + +;; Start with move insns + +(define_expand "movsi" + [(set (match_operand:SI 0 "general_operand" "") + (match_operand:SI 1 "general_operand" ""))] + "" + " + if (! (reload_in_progress || reload_completed)) + { + if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (SImode, operands[1]); + } +") + +(define_insn "*movsi_insn" + [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h") + (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))] + "register_operand (operands[0], SImode) + || register_operand (operands[1], SImode)" + "@ + add\\t%0, %1, #0 + mov\\t%0, %1 + # + # + ldmia\\t%1, {%0} + stmia\\t%0, {%1} + ldr\\t%0, %1 + str\\t%1, %0 + mov\\t%0, %1 + mov\\t%0, %1" +[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")]) + +(define_split + [(set (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "const_int_operand" ""))] + "thumb_shiftable_const (INTVAL (operands[1]))" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))] + " +{ + HOST_WIDE_UINT val = INTVAL (operands[1]); + HOST_WIDE_UINT mask = 0xff; + int i; + for (i = 0; i < 25; i++) + if ((val & (mask << i)) == val) + break; + + if (i == 0) + FAIL; + + operands[1] = GEN_INT (val >> i); + operands[2] = GEN_INT (i); +}") + +(define_split + [(set (match_operand:SI 0 "register_operand" "") + (match_operand:SI 1 "const_int_operand" ""))] + "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256" + [(set (match_dup 0) (match_dup 1)) + (set (match_dup 0) (neg:SI (match_dup 0)))] + " + operands[1] = GEN_INT (- INTVAL (operands[1])); +") + +(define_expand "movhi" + [(set (match_operand:HI 0 "general_operand" "") + (match_operand:HI 1 "general_operand" ""))] + "" + " +{ + if (! (reload_in_progress || reload_completed)) + { + if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (HImode, operands[1]); + + /* ??? We shouldn't really get invalid addresses here, but this can + happen if we are passed a SP (never OK for HImode/QImode) or virtual + register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode) + relative address. */ + /* ??? This should perhaps be fixed elsewhere, for instance, in + fixup_stack_1, by checking for other kinds of invalid addresses, + e.g. a bare reference to a virtual register. This may confuse the + alpha though, which must handle this case differently. */ + if (GET_CODE (operands[0]) == MEM + && ! memory_address_p (GET_MODE (operands[0]), + XEXP (operands[0], 0))) + { + rtx temp = copy_to_reg (XEXP (operands[0], 0)); + operands[0] = change_address (operands[0], VOIDmode, temp); + } + if (GET_CODE (operands[1]) == MEM + && ! memory_address_p (GET_MODE (operands[1]), + XEXP (operands[1], 0))) + { + rtx temp = copy_to_reg (XEXP (operands[1], 0)); + operands[1] = change_address (operands[1], VOIDmode, temp); + } + } + /* Handle loading a large integer during reload */ + else if (GET_CODE (operands[1]) == CONST_INT + && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I')) + { + /* Writing a constant to memory needs a scratch, which should + be handled with SECONDARY_RELOADs. */ + if (GET_CODE (operands[0]) != REG) + abort (); + + operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0); + emit_insn (gen_movsi (operands[0], operands[1])); + DONE; + } +}") + +(define_insn "*movhi_insn" + [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l") + (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))] + "register_operand (operands[0], HImode) + || register_operand (operands[1], HImode)" + "@ + add\\t%0, %1, #0 + ldrh\\t%0, %1 + strh\\t%1, %0 + mov\\t%0, %1 + mov\\t%0, %1 + mov\\t%0, %1") + +(define_expand "movqi" + [(set (match_operand:QI 0 "general_operand" "") + (match_operand:QI 1 "general_operand" ""))] + "" + " +{ + if (! (reload_in_progress || reload_completed)) + { + if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (QImode, operands[1]); + + /* ??? We shouldn't really get invalid addresses here, but this can + happen if we are passed a SP (never OK for HImode/QImode) or virtual + register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode) + relative address. */ + /* ??? This should perhaps be fixed elsewhere, for instance, in + fixup_stack_1, by checking for other kinds of invalid addresses, + e.g. a bare reference to a virtual register. This may confuse the + alpha though, which must handle this case differently. */ + if (GET_CODE (operands[0]) == MEM + && ! memory_address_p (GET_MODE (operands[0]), + XEXP (operands[0], 0))) + { + rtx temp = copy_to_reg (XEXP (operands[0], 0)); + operands[0] = change_address (operands[0], VOIDmode, temp); + } + if (GET_CODE (operands[1]) == MEM + && ! memory_address_p (GET_MODE (operands[1]), + XEXP (operands[1], 0))) + { + rtx temp = copy_to_reg (XEXP (operands[1], 0)); + operands[1] = change_address (operands[1], VOIDmode, temp); + } + } + /* Handle loading a large integer during reload */ + else if (GET_CODE (operands[1]) == CONST_INT + && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I')) + { + /* Writing a constant to memory needs a scratch, which should + be handled with SECONDARY_RELOADs. */ + if (GET_CODE (operands[0]) != REG) + abort (); + + operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0); + emit_insn (gen_movsi (operands[0], operands[1])); + DONE; + } +}") + +(define_insn "*movqi_insn" + [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l") + (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))] + "register_operand (operands[0], QImode) + || register_operand (operands[1], QImode)" + "@ + add\\t%0, %1, #0 + ldrb\\t%0, %1 + strb\\t%1, %0 + mov\\t%0, %1 + mov\\t%0, %1 + mov\\t%0, %1") + +(define_expand "movdi" + [(set (match_operand:DI 0 "general_operand" "") + (match_operand:DI 1 "general_operand" ""))] + "" + " + if (! (reload_in_progress || reload_completed)) + { + if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (DImode, operands[1]); + } +") + +;;; ??? This should have alternatives for constants. +;;; ??? This was originally identical to the movdf_insn pattern. +;;; ??? The 'i' constraint looks funny, but it should always be replaced by +;;; thumb_reorg with a memory reference. +(define_insn "*movdi_insn" + [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r") + (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))] + "register_operand (operands[0], DImode) + || register_operand (operands[1], DImode)" + "* +{ + switch (which_alternative) + { + case 0: + if (REGNO (operands[1]) == REGNO (operands[0]) + 1) + return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\"; + return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\"; + case 1: + return \"mov\\t%Q0, %1\;mov\\t%R0, #0\"; + case 2: + operands[1] = GEN_INT (- INTVAL (operands[1])); + return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\"; + case 3: + return \"ldmia\\t%1, {%0, %H0}\"; + case 4: + return \"stmia\\t%0, {%1, %H1}\"; + case 5: + return thumb_load_double_from_address (operands); + case 6: + operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4)); + output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands); + return \"\"; + case 7: + if (REGNO (operands[1]) == REGNO (operands[0]) + 1) + return \"mov\\t%0, %1\;mov\\t%H0, %H1\"; + return \"mov\\t%H0, %H1\;mov\\t%0, %1\"; + } +}"[(set_attr "length" "4,4,6,2,2,6,4,4")]) + +(define_expand "movdf" + [(set (match_operand:DF 0 "general_operand" "") + (match_operand:DF 1 "general_operand" ""))] + "" + " + if (! (reload_in_progress || reload_completed)) + { + if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (DFmode, operands[1]); + } +") + +;;; ??? This should have alternatives for constants. +;;; ??? This was originally identical to the movdi_insn pattern. +;;; ??? The 'F' constraint looks funny, but it should always be replaced by +;;; thumb_reorg with a memory reference. +(define_insn "*movdf_insn" + [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r") + (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))] + "register_operand (operands[0], DFmode) + || register_operand (operands[1], DFmode)" + "* + switch (which_alternative) + { + case 0: + if (REGNO (operands[1]) == REGNO (operands[0]) + 1) + return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\"; + return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\"; + case 1: + return \"ldmia\\t%1, {%0, %H0}\"; + case 2: + return \"stmia\\t%0, {%1, %H1}\"; + case 3: + return thumb_load_double_from_address (operands); + case 4: + operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4)); + output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands); + return \"\"; + case 5: + if (REGNO (operands[1]) == REGNO (operands[0]) + 1) + return \"mov\\t%0, %1\;mov\\t%H0, %H1\"; + return \"mov\\t%H0, %H1\;mov\\t%0, %1\"; + } +"[(set_attr "length" "4,2,2,6,4,4")]) + +(define_expand "movsf" + [(set (match_operand:SF 0 "general_operand" "") + (match_operand:SF 1 "general_operand" ""))] + "" + " + if (! (reload_in_progress || reload_completed)) + { + if (GET_CODE (operands[0]) != REG) + operands[1] = force_reg (SFmode, operands[1]); + } +") + +;;; ??? This should have alternatives for constants. +(define_insn "*movsf_insn" + [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h") + (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))] + "register_operand (operands[0], SFmode) + || register_operand (operands[1], SFmode)" + "@ + add\\t%0, %1, #0 + ldmia\\t%1, {%0} + stmia\\t%0, {%1} + ldr\\t%0, %1 + str\\t%1, %0 + mov\\t%0, %1 + mov\\t%0, %1") + +;; Widening move insns + +(define_expand "zero_extendhisi2" + [(set (match_operand:SI 0 "s_register_operand" "") + (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))] + "" + " + if (GET_CODE (operands[1]) != MEM) + { + rtx temp = gen_reg_rtx (SImode); + + operands[1] = force_reg (HImode, operands[1]); + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16))); + emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16))); + DONE; + } +") + +(define_insn "*zero_extendhisi2_insn" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))] + "" + "ldrh\\t%0, %1") + +(define_expand "zero_extendqisi2" + [(set (match_operand:SI 0 "s_register_operand" "") + (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))] + "" + " + if (GET_CODE (operands[1]) != MEM) + { + rtx temp = gen_reg_rtx (SImode); + + operands[1] = force_reg (QImode, operands[1]); + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24))); + emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24))); + DONE; + } +") + +(define_insn "*zero_extendqisi2_insn" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))] + "" + "ldrb\\t%0, %1") + +(define_expand "extendhisi2" + [(parallel [(set (match_operand:SI 0 "s_register_operand" "") + (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" ""))) + (clobber (match_scratch:SI 2 ""))])] + "" + " + if (GET_CODE (operands[1]) != MEM) + { + rtx temp = gen_reg_rtx (SImode); + + operands[1] = force_reg (HImode, operands[1]); + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16))); + emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16))); + DONE; + } +") + +(define_insn "*extendhisi2_insn" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (sign_extend:SI (match_operand:HI 1 "memory_operand" "m"))) + (clobber (match_scratch:SI 2 "=&l"))] + "" + "* +{ + rtx ops[4]; + /* This code used to try to use 'V', and fix the address only if it was + offsettable, but this fails for e.g. REG+48 because 48 is outside the + range of QImode offsets, and offsettable_address_p does a QImode + address check. */ + + if (GET_CODE (XEXP (operands[1], 0)) == PLUS) + { + ops[1] = XEXP (XEXP (operands[1], 0), 0); + ops[2] = XEXP (XEXP (operands[1], 0), 1); + } + else + { + ops[1] = XEXP (operands[1], 0); + ops[2] = const0_rtx; + } + if (GET_CODE (ops[2]) == REG) + return \"ldrsh\\t%0, %1\"; + + ops[0] = operands[0]; + ops[3] = operands[2]; + output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops); + return \"\"; +}" +[(set_attr "length" "4")]) + +(define_expand "extendqisi2" + [(set (match_operand:SI 0 "s_register_operand" "") + (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))] + "" + " + if (GET_CODE (operands[1]) != MEM) + { + rtx temp = gen_reg_rtx (SImode); + + operands[1] = force_reg (QImode, operands[1]); + operands[1] = gen_lowpart (SImode, operands[1]); + emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24))); + emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24))); + DONE; + } +") + +(define_insn "*extendqisi2_insn" + [(set (match_operand:SI 0 "s_register_operand" "=l,l") + (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))] + "" + "* +{ + rtx ops[3]; + + if (which_alternative == 0) + return \"ldrsb\\t%0, %1\"; + ops[0] = operands[0]; + if (GET_CODE (XEXP (operands[1], 0)) == PLUS) + { + ops[1] = XEXP (XEXP (operands[1], 0), 0); + ops[2] = XEXP (XEXP (operands[1], 0), 1); + + if (GET_CODE (ops[1]) == REG && GET_CODE (ops[2]) == REG) + output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops); + else if (GET_CODE (ops[1]) == REG) + { + if (REGNO (ops[1]) == REGNO (operands[0])) + output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops); + else + output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops); + } + else + { + if (REGNO (ops[2]) == REGNO (operands[0])) + output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops); + else + output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops); + } + } + else if (REGNO (operands[0]) == REGNO (XEXP (operands[1], 0))) + { + output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops); + } + else + { + ops[1] = XEXP (operands[1], 0); + ops[2] = const0_rtx; + output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops); + } + return \"\"; +}" +[(set_attr "length" "2,6")]) + +;; We don't really have extzv, but defining this using shifts helps +;; to reduce register pressure later on. + +(define_expand "extzv" + [(set (match_dup 4) + (ashift:SI (match_operand:SI 1 "register_operand" "") + (match_operand:SI 2 "const_int_operand" ""))) + (set (match_operand:SI 0 "register_operand" "") + (lshiftrt:SI (match_dup 4) + (match_operand:SI 3 "const_int_operand" "")))] + "" + " +{ + HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]); + HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]); + operands[3] = GEN_INT (rshift); + if (lshift == 0) + { + emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3])); + DONE; + } + operands[2] = GEN_INT (lshift); + operands[4] = gen_reg_rtx (SImode); +} +") + +;; Block-move insns + +(define_expand "movstrqi" + [(match_operand:BLK 0 "general_operand" "") + (match_operand:BLK 1 "general_operand" "") + (match_operand:SI 2 "" "") + (match_operand:SI 3 "const_int_operand" "")] + "" + " + if (INTVAL (operands[3]) != 4 + || GET_CODE (operands[2]) != CONST_INT + || INTVAL (operands[2]) > 48) + FAIL; + + thumb_expand_movstrqi (operands); + DONE; +") + +(define_insn "movmem12b" + [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l")) + (mem:SI (match_operand:SI 1 "register_operand" "+&l"))) + (set (mem:SI (plus:SI (match_dup 0) (const_int 4))) + (mem:SI (plus:SI (match_dup 1) (const_int 4)))) + (set (mem:SI (plus:SI (match_dup 0) (const_int 8))) + (mem:SI (plus:SI (match_dup 1) (const_int 8)))) + (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12))) + (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12))) + (clobber (match_scratch:SI 2 "=&l")) + (clobber (match_scratch:SI 3 "=&l")) + (clobber (match_scratch:SI 4 "=&l"))] + "" + "* return output_move_mem_multiple (3, operands);" +[(set_attr "length" "4")]) + +(define_insn "movmem8b" + [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l")) + (mem:SI (match_operand:SI 1 "register_operand" "+&l"))) + (set (mem:SI (plus:SI (match_dup 0) (const_int 4))) + (mem:SI (plus:SI (match_dup 1) (const_int 4)))) + (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8))) + (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8))) + (clobber (match_scratch:SI 2 "=&l")) + (clobber (match_scratch:SI 3 "=&l"))] + "" + "* return output_move_mem_multiple (2, operands);" +[(set_attr "length" "4")]) + +;; Arithmetic insns + +(define_insn "adddi3" + [(set (match_operand:DI 0 "s_register_operand" "=l") + (plus:DI (match_operand:DI 1 "s_register_operand" "%0") + (match_operand:DI 2 "s_register_operand" "l")))] + "" + "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2" +[(set_attr "conds" "changed") + (set_attr "length" "8")]) + +;; register group 'k' is a single register group containing only the stack +;; register. Trying to reload it will always fail catastrophically, +;; so never allow those alternatives to match if reloading is needed. +(define_insn "addsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l,l,l,*r,*h,l,!k") + (plus:SI (match_operand:SI 1 "s_register_operand" "%0,0,l,*0,*0,!k,!k") + (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))] + "" + "* + static char *asms[] = +{ + \"add\\t%0, %0, %2\", + \"sub\\t%0, %0, #%n2\", + \"add\\t%0, %1, %2\", + \"add\\t%0, %0, %2\", + \"add\\t%0, %0, %2\", + \"add\\t%0, %1, %2\", + \"add\\t%0, %1, %2\" +}; + if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT + && INTVAL (operands[2]) < 0) + return \"sub\\t%0, %1, #%n2\"; + return asms[which_alternative]; +") + +; reloading and elimination of the frame pointer can sometimes cause this +; optimization to be missed. +(define_peephole + [(set (match_operand:SI 0 "register_operand" "=l") + (match_operand:SI 1 "const_int_operand" "M")) + (set (match_dup 0) + (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))] + "REGNO (operands[2]) == STACK_POINTER_REGNUM + && (HOST_WIDE_UINT) (INTVAL (operands[1])) < 1024 + && (INTVAL (operands[1]) & 3) == 0" + "add\\t%0, %2, %1") + +(define_insn "subdi3" + [(set (match_operand:DI 0 "s_register_operand" "=l") + (minus:DI (match_operand:DI 1 "s_register_operand" "0") + (match_operand:DI 2 "s_register_operand" "l")))] + "" + "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2" +[(set_attr "conds" "changed") + (set_attr "length" "8")]) + +(define_insn "subsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (minus:SI (match_operand:SI 1 "s_register_operand" "l") + (match_operand:SI 2 "s_register_operand" "l")))] + "" + "sub\\t%0, %1, %2") + +;; We must ensure that one input matches the output, and that the other input +;; does not match the output. Using 0 satisfies the first, and using & +;; satisfies the second. Unfortunately, this fails when operands 1 and 2 +;; are the same, because reload will make operand 0 match operand 1 without +;; realizing that this conflicts with operand 2. We fix this by adding another +;; alternative to match this case, and then `reload' it ourselves. This +;; alternative must come first. +(define_insn "mulsi3" + [(set (match_operand:SI 0 "s_register_operand" "=&l,&l,&l") + (mult:SI (match_operand:SI 1 "s_register_operand" "%l,*h,0") + (match_operand:SI 2 "s_register_operand" "l,l,l")))] + "" + "* +{ + if (which_alternative < 2) + return \"mov\\t%0, %1\;mul\\t%0, %0, %2\"; + else + return \"mul\\t%0, %0, %2\"; +}" + [(set_attr "length" "4,4,2")]) + +(define_insn "negsi2" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (neg:SI (match_operand:SI 1 "s_register_operand" "l")))] + "" + "neg\\t%0, %1") + +;; Logical insns + +(define_expand "andsi3" + [(set (match_operand:SI 0 "s_register_operand" "") + (and:SI (match_operand:SI 1 "s_register_operand" "") + (match_operand:SI 2 "nonmemory_operand" "")))] + "" + " + if (GET_CODE (operands[2]) != CONST_INT) + operands[2] = force_reg (SImode, operands[2]); + else + { + int i; + if (((HOST_WIDE_UINT) ~ INTVAL (operands[2])) < 256) + { + operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2]))); + emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1])); + DONE; + } + + for (i = 9; i <= 31; i++) + if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2])) + { + emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i), + const0_rtx)); + DONE; + } + else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2])) + { + rtx shift = GEN_INT (i); + rtx reg = gen_reg_rtx (SImode); + emit_insn (gen_lshrsi3 (reg, operands[1], shift)); + emit_insn (gen_ashlsi3 (operands[0], reg, shift)); + DONE; + } + + operands[2] = force_reg (SImode, operands[2]); + } +") + +(define_insn "*andsi3_insn" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (and:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "s_register_operand" "l")))] + "" + "and\\t%0, %0, %2") + +(define_insn "bicsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (and:SI (not:SI (match_operand:SI 1 "s_register_operand" "l")) + (match_operand:SI 2 "s_register_operand" "0")))] + "" + "bic\\t%0, %0, %1") + +(define_insn "iorsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (ior:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "s_register_operand" "l")))] + "" + "orr\\t%0, %0, %2") + +(define_insn "xorsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (xor:SI (match_operand:SI 1 "s_register_operand" "%0") + (match_operand:SI 2 "s_register_operand" "l")))] + "" + "eor\\t%0, %0, %2") + +(define_insn "one_cmplsi2" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (not:SI (match_operand:SI 1 "s_register_operand" "l")))] + "" + "mvn\\t%0, %1") + +;; Shift and rotation insns + +(define_insn "ashlsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l,l") + (ashift:SI (match_operand:SI 1 "s_register_operand" "l,0") + (match_operand:SI 2 "nonmemory_operand" "N,l")))] + "" + "@ + lsl\\t%0, %1, %2 + lsl\\t%0, %0, %2") + +(define_insn "ashrsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l,l") + (ashiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0") + (match_operand:SI 2 "nonmemory_operand" "N,l")))] + "" + "@ + asr\\t%0, %1, %2 + asr\\t%0, %0, %2") + +(define_insn "lshrsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l,l") + (lshiftrt:SI (match_operand:SI 1 "s_register_operand" "l,0") + (match_operand:SI 2 "nonmemory_operand" "N,l")))] + "" + "@ + lsr\\t%0, %1, %2 + lsr\\t%0, %0, %2") + +(define_insn "rotrsi3" + [(set (match_operand:SI 0 "s_register_operand" "=l") + (rotatert:SI (match_operand:SI 1 "s_register_operand" "0") + (match_operand:SI 2 "s_register_operand" "l")))] + "" + "ror\\t%0, %0, %2") + +;; Comparison insns + +(define_expand "cmpsi" + [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "") + (match_operand:SI 1 "nonmemory_operand" "")))] + "" + " + if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG) + { + if (GET_CODE (operands[1]) != CONST_INT + || (HOST_WIDE_UINT) (INTVAL (operands[1])) >= 256) + { + if (GET_CODE (operands[1]) != CONST_INT + || INTVAL (operands[1]) < -255 + || INTVAL (operands[1]) > 0) + operands[1] = force_reg (SImode, operands[1]); + else + { + operands[1] = force_reg (SImode, + GEN_INT (- INTVAL (operands[1]))); + emit_insn (gen_cmnsi (operands[0], operands[1])); + DONE; + } + } + } +") + +(define_insn "*cmpsi_insn" + [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l,*r,*h") + (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))] + "" + "@ + cmp\\t%0, %1 + cmp\\t%0, %1 + cmp\\t%0, %1") + +(define_insn "tstsi" + [(set (cc0) (match_operand:SI 0 "s_register_operand" "l"))] + "" + "cmp\\t%0, #0") + +(define_insn "cmnsi" + [(set (cc0) (compare (match_operand:SI 0 "s_register_operand" "l") + (neg:SI (match_operand:SI 1 "s_register_operand" "l"))))] + "" + "cmn\\t%0, %1") + +;; Jump insns + +(define_insn "jump" + [(set (pc) (label_ref (match_operand 0 "" "")))] + "" + "* + if (get_attr_length (insn) == 2) + return \"b\\t%l0\"; + return \"bl\\t%l0\\t%@ far jump\"; +"[(set (attr "far_jump") + (if_then_else (eq_attr "length" "4") + (const_string "yes") + (const_string "no"))) + (set (attr "length") + (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048)) + (le (minus (match_dup 0) (pc)) (const_int 2044))) + (const_int 2) + (const_int 4)))]) + + +(define_expand "beq" + [(set (pc) (if_then_else (eq (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "bne" + [(set (pc) (if_then_else (ne (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "bge" + [(set (pc) (if_then_else (ge (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "ble" + [(set (pc) (if_then_else (le (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "bgt" + [(set (pc) (if_then_else (gt (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "blt" + [(set (pc) (if_then_else (lt (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "bgeu" + [(set (pc) (if_then_else (geu (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "bleu" + [(set (pc) (if_then_else (leu (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "bgtu" + [(set (pc) (if_then_else (gtu (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_expand "bltu" + [(set (pc) (if_then_else (ltu (cc0) (const_int 0)) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "") + +(define_insn "*cond_branch" + [(set (pc) (if_then_else (match_operator 1 "comparison_operator" + [(cc0) (const_int 0)]) + (label_ref (match_operand 0 "" "")) + (pc)))] + "" + "* + switch (get_attr_length (insn)) + { + case 2: return \"b%d1\\t%l0\\t%@cond_branch\"; + case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\"; + default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\"; + } +"[(set (attr "far_jump") + (if_then_else (eq_attr "length" "6") + (const_string "yes") + (const_string "no"))) + (set (attr "length") + (if_then_else + (and (ge (minus (match_dup 0) (pc)) (const_int -252)) + (le (minus (match_dup 0) (pc)) (const_int 254))) + (const_int 2) + (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044)) + (le (minus (match_dup 0) (pc)) (const_int 2044))) + (const_int 4) + (const_int 6))))]) + +(define_insn "*cond_branch_reversed" + [(set (pc) (if_then_else (match_operator 1 "comparison_operator" + [(cc0) (const_int 0)]) + (pc) + (label_ref (match_operand 0 "" ""))))] + "" + "* + switch (get_attr_length (insn)) + { + case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\"; + case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\"; + default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\"; + } + return \"\"; +"[(set (attr "far_jump") + (if_then_else (eq_attr "length" "6") + (const_string "yes") + (const_string "no"))) + (set (attr "length") + (if_then_else + (and (ge (minus (match_dup 0) (pc)) (const_int -252)) + (le (minus (match_dup 0) (pc)) (const_int 254))) + (const_int 2) + (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044)) + (le (minus (match_dup 0) (pc)) (const_int 2044))) + (const_int 4) + (const_int 6))))]) + +(define_insn "indirect_jump" + [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r"))] + "" + "mov\\tpc, %0") + +(define_insn "tablejump" + [(set (pc) (match_operand:SI 0 "s_register_operand" "l*r")) + (use (label_ref (match_operand 1 "" "")))] + "" + "mov\\tpc, %0") + +;; Call insns + +(define_expand "call" + [(call (match_operand:SI 0 "memory_operand" "") + (match_operand 1 "" ""))] + "" + " +{ + if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[0], 0)) != REG) + XEXP (operands[0], 0) = force_reg (Pmode, XEXP (operands[0], 0)); +}") + +(define_insn "*call_indirect" + [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r")) + (match_operand 1 "" ""))] + "! TARGET_CALLER_INTERWORKING" + "bl\\t_call_via_%0" +[(set_attr "length" "4")]) +;; The non THUMB_INTERWORK, non TARGET_CALLER_INTERWORKING version +;; used to be: "mov\\tlr,pc\;bx\\t%0", but the mov does not set +;; the bottom bit of lr so that a function return (using bx) +;; would switch back into ARM mode... + +(define_insn "*call_indirect_interwork" + [(call (mem:SI (match_operand:SI 0 "s_register_operand" "l*r")) + (match_operand 1 "" ""))] + "TARGET_CALLER_INTERWORKING" + "bl\\t_interwork_call_via_%0" +[(set_attr "length" "4")]) + +(define_expand "call_value" + [(set (match_operand 0 "" "") + (call (match_operand 1 "memory_operand" "") + (match_operand 2 "" "")))] + "" + " +{ + if (TARGET_LONG_CALLS && GET_CODE (XEXP (operands[1], 0)) != REG) + XEXP (operands[1], 0) = force_reg (Pmode, XEXP (operands[1], 0)); +}") + +(define_insn "*call_value_indirect" + [(set (match_operand 0 "" "=l") + (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r")) + (match_operand 2 "" "")))] + "! TARGET_CALLER_INTERWORKING" + "bl\\t_call_via_%1" +[(set_attr "length" "4")]) +;; See comment for call_indirect pattern + +(define_insn "*call_value_indirect_interwork" + [(set (match_operand 0 "" "=l") + (call (mem:SI (match_operand:SI 1 "s_register_operand" "l*r")) + (match_operand 2 "" "")))] + "TARGET_CALLER_INTERWORKING" + "bl\\t_interwork_call_via_%1" +[(set_attr "length" "4")]) + + +(define_insn "*call_insn" + [(call (mem:SI (match_operand:SI 0 "" "i")) + (match_operand:SI 1 "" ""))] + "! TARGET_LONG_CALLS && GET_CODE (operands[0]) == SYMBOL_REF" + "bl\\t%a0" +[(set_attr "length" "4")]) + +(define_insn "*call_value_insn" + [(set (match_operand 0 "s_register_operand" "=l") + (call (mem:SI (match_operand 1 "" "i")) + (match_operand 2 "" "")))] + "! TARGET_LONG_CALLS && GET_CODE (operands[1]) == SYMBOL_REF" + "bl\\t%a1" +[(set_attr "length" "4")]) + +;; Untyped call not required, since all funcs return in r0 + +;; Miscellaneous patterns + +(define_insn "nop" + [(clobber (const_int 0))] + "" + "mov\\tr8, r8") + +(define_insn "blockage" + [(unspec_volatile [(const_int 0)] 0)] + "" + "" + [(set_attr "length" "0")]) + +(define_expand "prologue" + [(const_int 0)] + "" + " + thumb_expand_prologue (); + DONE; +") + +(define_expand "epilogue" + [(unspec_volatile [(const_int 0)] 1)] + "" + " + thumb_expand_epilogue (); +") + +(define_insn "*epilogue_insns" + [(unspec_volatile [(const_int 0)] 1)] + "" + "* + return thumb_unexpanded_epilogue (); +" +[(set_attr "length" "42")]) + +;; Special patterns for dealing with the constant pool + +(define_insn "consttable_4" + [(unspec_volatile [(match_operand 0 "" "")] 2)] + "" + "* +{ + switch (GET_MODE_CLASS (GET_MODE (operands[0]))) + { + case MODE_FLOAT: + { + union real_extract u; + memcpy((char *)&u, (char *)&CONST_DOUBLE_LOW(operands[0]), sizeof u); + assemble_real (u.d, GET_MODE (operands[0])); + break; + } + default: + assemble_integer (operands[0], 4, 1); + break; + } + return \"\"; +}" +[(set_attr "length" "4")]) + +(define_insn "consttable_8" + [(unspec_volatile [(match_operand 0 "" "")] 3)] + "" + "* +{ + switch (GET_MODE_CLASS (GET_MODE (operands[0]))) + { + case MODE_FLOAT: + { + union real_extract u; + memcpy((char *)&u, (char *)&CONST_DOUBLE_LOW(operands[0]), sizeof u); + assemble_real (u.d, GET_MODE (operands[0])); + break; + } + default: + assemble_integer (operands[0], 8, 1); + break; + } + return \"\"; +}" +[(set_attr "length" "8")]) + +(define_insn "consttable_end" + [(unspec_volatile [(const_int 0)] 4)] + "" + "* + /* Nothing to do (currently). */ + return \"\"; +") + +(define_insn "align_4" + [(unspec_volatile [(const_int 0)] 5)] + "" + "* + assemble_align (32); + return \"\"; +") diff --git a/libgcc/fp-bit.c b/libgcc/fp-bit.c new file mode 100755 index 0000000..6b8bd70 --- /dev/null +++ b/libgcc/fp-bit.c @@ -0,0 +1,1507 @@ +/* This is a software floating point library which can be used instead of + the floating point routines in libgcc1.c for targets without hardware + floating point. + Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file with other programs, and to distribute +those programs without any restriction coming from the use of this +file. (The General Public License restrictions do apply in other +respects; for example, they cover modification of the file, and +distribution when not linked into another program.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. */ + +/* This implements IEEE 754 format arithmetic, but does not provide a + mechanism for setting the rounding mode, or for generating or handling + exceptions. + + The original code by Steve Chamberlain, hacked by Mark Eichin and Jim + Wilson, all of Cygnus Support. */ + +/* The intended way to use this file is to make two copies, add `#define FLOAT' + to one copy, then compile both copies and add them to libgcc.a. */ + +/* Defining FINE_GRAINED_LIBRARIES allows one to select which routines + from this file are compiled via additional -D options. + + This avoids the need to pull in the entire fp emulation library + when only a small number of functions are needed. + + If FINE_GRAINED_LIBRARIES is not defined, then compile every + suitable routine. */ +#ifndef FINE_GRAINED_LIBRARIES +#define L_pack_df +#define L_unpack_df +#define L_pack_sf +#define L_unpack_sf +#define L_addsub_sf +#define L_addsub_df +#define L_mul_sf +#define L_mul_df +#define L_div_sf +#define L_div_df +#define L_fpcmp_parts_sf +#define L_fpcmp_parts_df +#define L_compare_sf +#define L_compare_df +#define L_eq_sf +#define L_eq_df +#define L_ne_sf +#define L_ne_df +#define L_gt_sf +#define L_gt_df +#define L_ge_sf +#define L_ge_df +#define L_lt_sf +#define L_lt_df +#define L_le_sf +#define L_le_df +#define L_si_to_sf +#define L_si_to_df +#define L_sf_to_si +#define L_df_to_si +#define L_f_to_usi +#define L_df_to_usi +#define L_negate_sf +#define L_negate_df +#define L_make_sf +#define L_make_df +#define L_sf_to_df +#define L_df_to_sf +#endif + +/* The following macros can be defined to change the behaviour of this file: + FLOAT: Implement a `float', aka SFmode, fp library. If this is not + defined, then this file implements a `double', aka DFmode, fp library. + FLOAT_ONLY: Used with FLOAT, to implement a `float' only library, i.e. + don't include float->double conversion which requires the double library. + This is useful only for machines which can't support doubles, e.g. some + 8-bit processors. + CMPtype: Specify the type that floating point compares should return. + This defaults to SItype, aka int. + US_SOFTWARE_GOFAST: This makes all entry points use the same names as the + US Software goFast library. If this is not defined, the entry points use + the same names as libgcc1.c. + _DEBUG_BITFLOAT: This makes debugging the code a little easier, by adding + two integers to the FLO_union_type. + NO_NANS: Disable nan and infinity handling + SMALL_MACHINE: Useful when operations on QIs and HIs are faster + than on an SI */ + +/* We don't currently support extended floats (long doubles) on machines + without hardware to deal with them. + + These stubs are just to keep the linker from complaining about unresolved + references which can be pulled in from libio & libstdc++, even if the + user isn't using long doubles. However, they may generate an unresolved + external to abort if abort is not used by the function, and the stubs + are referenced from within libc, since libgcc goes before and after the + system library. */ + +#ifdef EXTENDED_FLOAT_STUBS +__truncxfsf2 (){ abort(); } +__extendsfxf2 (){ abort(); } +__addxf3 (){ abort(); } +__divxf3 (){ abort(); } +__eqxf2 (){ abort(); } +__extenddfxf2 (){ abort(); } +__gtxf2 (){ abort(); } +__lexf2 (){ abort(); } +__ltxf2 (){ abort(); } +__mulxf3 (){ abort(); } +__negxf2 (){ abort(); } +__nexf2 (){ abort(); } +__subxf3 (){ abort(); } +__truncxfdf2 (){ abort(); } + +__trunctfsf2 (){ abort(); } +__extendsftf2 (){ abort(); } +__addtf3 (){ abort(); } +__divtf3 (){ abort(); } +__eqtf2 (){ abort(); } +__extenddftf2 (){ abort(); } +__gttf2 (){ abort(); } +__letf2 (){ abort(); } +__lttf2 (){ abort(); } +__multf3 (){ abort(); } +__negtf2 (){ abort(); } +__netf2 (){ abort(); } +__subtf3 (){ abort(); } +__trunctfdf2 (){ abort(); } +__gexf2 (){ abort(); } +__fixxfsi (){ abort(); } +__floatsixf (){ abort(); } +#else /* !EXTENDED_FLOAT_STUBS, rest of file */ + + +typedef float SFtype __attribute__ ((mode (SF))); +typedef float DFtype __attribute__ ((mode (DF))); + +typedef int HItype __attribute__ ((mode (HI))); +typedef int SItype __attribute__ ((mode (SI))); +typedef int DItype __attribute__ ((mode (DI))); + +/* The type of the result of a fp compare */ +#ifndef CMPtype +#define CMPtype SItype +#endif + +typedef unsigned int UHItype __attribute__ ((mode (HI))); +typedef unsigned int USItype __attribute__ ((mode (SI))); +typedef unsigned int UDItype __attribute__ ((mode (DI))); + +#define MAX_SI_INT ((SItype) ((unsigned) (~0)>>1)) +#define MAX_USI_INT ((USItype) ~0) + + +#ifdef FLOAT_ONLY +#define NO_DI_MODE +#endif + +#ifdef FLOAT +# define NGARDS 7L +# define GARDROUND 0x3f +# define GARDMASK 0x7f +# define GARDMSB 0x40 +# define EXPBITS 8 +# define EXPBIAS 127 +# define FRACBITS 23 +# define EXPMAX (0xff) +# define QUIET_NAN 0x100000L +# define FRAC_NBITS 32 +# define FRACHIGH 0x80000000L +# define FRACHIGH2 0xc0000000L +# define pack_d __pack_f +# define unpack_d __unpack_f +# define __fpcmp_parts __fpcmp_parts_f + typedef USItype fractype; + typedef UHItype halffractype; + typedef SFtype FLO_type; + typedef SItype intfrac; + +#else +# define PREFIXFPDP dp +# define PREFIXSFDF df +# define NGARDS 8L +# define GARDROUND 0x7f +# define GARDMASK 0xff +# define GARDMSB 0x80 +# define EXPBITS 11 +# define EXPBIAS 1023 +# define FRACBITS 52 +# define EXPMAX (0x7ff) +# define QUIET_NAN 0x8000000000000LL +# define FRAC_NBITS 64 +# define FRACHIGH 0x8000000000000000LL +# define FRACHIGH2 0xc000000000000000LL +# define pack_d __pack_d +# define unpack_d __unpack_d +# define __fpcmp_parts __fpcmp_parts_d + typedef UDItype fractype; + typedef USItype halffractype; + typedef DFtype FLO_type; + typedef DItype intfrac; +#endif + +#ifdef US_SOFTWARE_GOFAST +# ifdef FLOAT +# define add fpadd +# define sub fpsub +# define multiply fpmul +# define divide fpdiv +# define compare fpcmp +# define si_to_float sitofp +# define float_to_si fptosi +# define float_to_usi fptoui +# define negate __negsf2 +# define sf_to_df fptodp +# define dptofp dptofp +#else +# define add dpadd +# define sub dpsub +# define multiply dpmul +# define divide dpdiv +# define compare dpcmp +# define si_to_float litodp +# define float_to_si dptoli +# define float_to_usi dptoul +# define negate __negdf2 +# define df_to_sf dptofp +#endif +#else +# ifdef FLOAT +# define add __addsf3 +# define sub __subsf3 +# define multiply __mulsf3 +# define divide __divsf3 +# define compare __cmpsf2 +# define _eq_f2 __eqsf2 +# define _ne_f2 __nesf2 +# define _gt_f2 __gtsf2 +# define _ge_f2 __gesf2 +# define _lt_f2 __ltsf2 +# define _le_f2 __lesf2 +# define si_to_float __floatsisf +# define float_to_si __fixsfsi +# define float_to_usi __fixunssfsi +# define negate __negsf2 +# define sf_to_df __extendsfdf2 +#else +# define add __adddf3 +# define sub __subdf3 +# define multiply __muldf3 +# define divide __divdf3 +# define compare __cmpdf2 +# define _eq_f2 __eqdf2 +# define _ne_f2 __nedf2 +# define _gt_f2 __gtdf2 +# define _ge_f2 __gedf2 +# define _lt_f2 __ltdf2 +# define _le_f2 __ledf2 +# define si_to_float __floatsidf +# define float_to_si __fixdfsi +# define float_to_usi __fixunsdfsi +# define negate __negdf2 +# define df_to_sf __truncdfsf2 +# endif +#endif + + +#ifndef INLINE +#define INLINE __inline__ +#endif + +/* Preserve the sticky-bit when shifting fractions to the right. */ +#define LSHIFT(a) { a = (a & 1) | (a >> 1); } + +/* numeric parameters */ +/* F_D_BITOFF is the number of bits offset between the MSB of the mantissa + of a float and of a double. Assumes there are only two float types. + (double::FRAC_BITS+double::NGARDS-(float::FRAC_BITS-float::NGARDS)) + */ +#define F_D_BITOFF (52+8-(23+7)) + + +#define NORMAL_EXPMIN (-(EXPBIAS)+1) +#define IMPLICIT_1 (1LL<<(FRACBITS+NGARDS)) +#define IMPLICIT_2 (1LL<<(FRACBITS+1+NGARDS)) + +/* common types */ + +typedef enum +{ + CLASS_SNAN, + CLASS_QNAN, + CLASS_ZERO, + CLASS_NUMBER, + CLASS_INFINITY +} fp_class_type; + +typedef struct +{ +#ifdef SMALL_MACHINE + char class; + unsigned char sign; + short normal_exp; +#else + fp_class_type class; + unsigned int sign; + int normal_exp; +#endif + + union + { + fractype ll; + halffractype l[2]; + } fraction; +} fp_number_type; + +typedef union +{ + FLO_type value; + fractype value_raw; + +#ifndef FLOAT + halffractype words[2]; +#endif + +#ifdef FLOAT_BIT_ORDER_MISMATCH + struct + { + fractype fraction:FRACBITS __attribute__ ((packed)); + unsigned int exp:EXPBITS __attribute__ ((packed)); + unsigned int sign:1 __attribute__ ((packed)); + } + bits; +#endif + +#ifdef _DEBUG_BITFLOAT + struct + { + unsigned int sign:1 __attribute__ ((packed)); + unsigned int exp:EXPBITS __attribute__ ((packed)); + fractype fraction:FRACBITS __attribute__ ((packed)); + } + bits_big_endian; + + struct + { + fractype fraction:FRACBITS __attribute__ ((packed)); + unsigned int exp:EXPBITS __attribute__ ((packed)); + unsigned int sign:1 __attribute__ ((packed)); + } + bits_little_endian; +#endif +} +FLO_union_type; + + +/* end of header */ + +/* IEEE "special" number predicates */ + +#ifdef NO_NANS + +#define nan() 0 +#define isnan(x) 0 +#define isinf(x) 0 +#else + +INLINE +static fp_number_type * +nan () +{ + static fp_number_type thenan; + + return &thenan; +} + +INLINE +static int +isnan ( fp_number_type * x) +{ + return x->class == CLASS_SNAN || x->class == CLASS_QNAN; +} + +INLINE +static int +isinf ( fp_number_type * x) +{ + return x->class == CLASS_INFINITY; +} + +#endif + +INLINE +static int +iszero ( fp_number_type * x) +{ + return x->class == CLASS_ZERO; +} + +INLINE +static void +flip_sign ( fp_number_type * x) +{ + x->sign = !x->sign; +} + +extern FLO_type pack_d ( fp_number_type * ); + +#if defined(L_pack_df) || defined(L_pack_sf) +FLO_type +pack_d ( fp_number_type * src) +{ + FLO_union_type dst; + fractype fraction = src->fraction.ll; /* wasn't unsigned before? */ + int sign = src->sign; + int exp = 0; + + if (isnan (src)) + { + exp = EXPMAX; + if (src->class == CLASS_QNAN || 1) + { + fraction |= QUIET_NAN; + } + } + else if (isinf (src)) + { + exp = EXPMAX; + fraction = 0; + } + else if (iszero (src)) + { + exp = 0; + fraction = 0; + } + else if (fraction == 0) + { + exp = 0; + } + else + { + if (src->normal_exp < NORMAL_EXPMIN) + { + /* This number's exponent is too low to fit into the bits + available in the number, so we'll store 0 in the exponent and + shift the fraction to the right to make up for it. */ + + int shift = NORMAL_EXPMIN - src->normal_exp; + + exp = 0; + + if (shift > FRAC_NBITS - NGARDS) + { + /* No point shifting, since it's more that 64 out. */ + fraction = 0; + } + else + { + /* Shift by the value */ + fraction >>= shift; + } + fraction >>= NGARDS; + } + else if (src->normal_exp > EXPBIAS) + { + exp = EXPMAX; + fraction = 0; + } + else + { + exp = src->normal_exp + EXPBIAS; + /* IF the gard bits are the all zero, but the first, then we're + half way between two numbers, choose the one which makes the + lsb of the answer 0. */ + if ((fraction & GARDMASK) == GARDMSB) + { + if (fraction & (1 << NGARDS)) + fraction += GARDROUND + 1; + } + else + { + /* Add a one to the guards to round up */ + fraction += GARDROUND; + } + if (fraction >= IMPLICIT_2) + { + fraction >>= 1; + exp += 1; + } + fraction >>= NGARDS; + } + } + + /* We previously used bitfields to store the number, but this doesn't + handle little/big endian systems conveniently, so use shifts and + masks */ +#ifdef FLOAT_BIT_ORDER_MISMATCH + dst.bits.fraction = fraction; + dst.bits.exp = exp; + dst.bits.sign = sign; +#else + dst.value_raw = fraction & ((((fractype)1) << FRACBITS) - (fractype)1); + dst.value_raw |= ((fractype) (exp & ((1 << EXPBITS) - 1))) << FRACBITS; + dst.value_raw |= ((fractype) (sign & 1)) << (FRACBITS | EXPBITS); +#endif + +#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT) + { + halffractype tmp = dst.words[0]; + dst.words[0] = dst.words[1]; + dst.words[1] = tmp; + } +#endif + + return dst.value; +} +#endif + +extern void unpack_d (FLO_union_type *, fp_number_type *); + +#if defined(L_unpack_df) || defined(L_unpack_sf) +void +unpack_d (FLO_union_type * src, fp_number_type * dst) +{ + /* We previously used bitfields to store the number, but this doesn't + handle little/big endian systems conveniently, so use shifts and + masks */ + fractype fraction; + int exp; + int sign; + +#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT) + FLO_union_type swapped; + + swapped.words[0] = src->words[1]; + swapped.words[1] = src->words[0]; + src = &swapped; +#endif + +#ifdef FLOAT_BIT_ORDER_MISMATCH + fraction = src->bits.fraction; + exp = src->bits.exp; + sign = src->bits.sign; +#else + fraction = src->value_raw & ((((fractype)1) << FRACBITS) - (fractype)1); + exp = ((int)(src->value_raw >> FRACBITS)) & ((1 << EXPBITS) - 1); + sign = ((int)(src->value_raw >> (FRACBITS + EXPBITS))) & 1; +#endif + + dst->sign = sign; + if (exp == 0) + { + /* Hmm. Looks like 0 */ + if (fraction == 0) + { + /* tastes like zero */ + dst->class = CLASS_ZERO; + } + else + { + /* Zero exponent with non zero fraction - it's denormalized, + so there isn't a leading implicit one - we'll shift it so + it gets one. */ + dst->normal_exp = exp - EXPBIAS + 1; + fraction <<= NGARDS; + + dst->class = CLASS_NUMBER; +#if 1 + while (fraction < IMPLICIT_1) + { + fraction <<= 1; + dst->normal_exp--; + } +#endif + dst->fraction.ll = fraction; + } + } + else if (exp == EXPMAX) + { + /* Huge exponent*/ + if (fraction == 0) + { + /* Attached to a zero fraction - means infinity */ + dst->class = CLASS_INFINITY; + } + else + { + /* Non zero fraction, means nan */ + if (fraction & QUIET_NAN) + { + dst->class = CLASS_QNAN; + } + else + { + dst->class = CLASS_SNAN; + } + /* Keep the fraction part as the nan number */ + dst->fraction.ll = fraction; + } + } + else + { + /* Nothing strange about this number */ + dst->normal_exp = exp - EXPBIAS; + dst->class = CLASS_NUMBER; + dst->fraction.ll = (fraction << NGARDS) | IMPLICIT_1; + } +} +#endif + +#if defined(L_addsub_sf) || defined(L_addsub_df) +static fp_number_type * +_fpadd_parts (fp_number_type * a, + fp_number_type * b, + fp_number_type * tmp) +{ + intfrac tfraction; + + /* Put commonly used fields in local variables. */ + int a_normal_exp; + int b_normal_exp; + fractype a_fraction; + fractype b_fraction; + + if (isnan (a)) + { + return a; + } + if (isnan (b)) + { + return b; + } + if (isinf (a)) + { + /* Adding infinities with opposite signs yields a NaN. */ + if (isinf (b) && a->sign != b->sign) + return nan (); + return a; + } + if (isinf (b)) + { + return b; + } + if (iszero (b)) + { + if (iszero (a)) + { + *tmp = *a; + tmp->sign = a->sign & b->sign; + return tmp; + } + return a; + } + if (iszero (a)) + { + return b; + } + + /* Got two numbers. shift the smaller and increment the exponent till + they're the same */ + { + int diff; + + a_normal_exp = a->normal_exp; + b_normal_exp = b->normal_exp; + a_fraction = a->fraction.ll; + b_fraction = b->fraction.ll; + + diff = a_normal_exp - b_normal_exp; + + if (diff < 0) + diff = -diff; + if (diff < FRAC_NBITS) + { + /* ??? This does shifts one bit at a time. Optimize. */ + while (a_normal_exp > b_normal_exp) + { + b_normal_exp++; + LSHIFT (b_fraction); + } + while (b_normal_exp > a_normal_exp) + { + a_normal_exp++; + LSHIFT (a_fraction); + } + } + else + { + /* Somethings's up.. choose the biggest */ + if (a_normal_exp > b_normal_exp) + { + b_normal_exp = a_normal_exp; + b_fraction = 0; + } + else + { + a_normal_exp = b_normal_exp; + a_fraction = 0; + } + } + } + + if (a->sign != b->sign) + { + if (a->sign) + { + tfraction = -a_fraction + b_fraction; + } + else + { + tfraction = a_fraction - b_fraction; + } + if (tfraction >= 0) + { + tmp->sign = 0; + tmp->normal_exp = a_normal_exp; + tmp->fraction.ll = tfraction; + } + else + { + tmp->sign = 1; + tmp->normal_exp = a_normal_exp; + tmp->fraction.ll = -tfraction; + } + /* and renormalize it */ + + while (tmp->fraction.ll < IMPLICIT_1 && tmp->fraction.ll) + { + tmp->fraction.ll <<= 1; + tmp->normal_exp--; + } + } + else + { + tmp->sign = a->sign; + tmp->normal_exp = a_normal_exp; + tmp->fraction.ll = a_fraction + b_fraction; + } + tmp->class = CLASS_NUMBER; + /* Now the fraction is added, we have to shift down to renormalize the + number */ + + if (tmp->fraction.ll >= IMPLICIT_2) + { + LSHIFT (tmp->fraction.ll); + tmp->normal_exp++; + } + return tmp; + +} + +FLO_type +add (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + fp_number_type tmp; + fp_number_type *res; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + res = _fpadd_parts (&a, &b, &tmp); + + return pack_d (res); +} + +FLO_type +sub (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + fp_number_type tmp; + fp_number_type *res; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + b.sign ^= 1; + + res = _fpadd_parts (&a, &b, &tmp); + + return pack_d (res); +} +#endif + +#if defined(L_mul_sf) || defined(L_mul_df) +static INLINE fp_number_type * +_fpmul_parts ( fp_number_type * a, + fp_number_type * b, + fp_number_type * tmp) +{ + fractype low = 0; + fractype high = 0; + + if (isnan (a)) + { + a->sign = a->sign != b->sign; + return a; + } + if (isnan (b)) + { + b->sign = a->sign != b->sign; + return b; + } + if (isinf (a)) + { + if (iszero (b)) + return nan (); + a->sign = a->sign != b->sign; + return a; + } + if (isinf (b)) + { + if (iszero (a)) + { + return nan (); + } + b->sign = a->sign != b->sign; + return b; + } + if (iszero (a)) + { + a->sign = a->sign != b->sign; + return a; + } + if (iszero (b)) + { + b->sign = a->sign != b->sign; + return b; + } + + /* Calculate the mantissa by multiplying both 64bit numbers to get a + 128 bit number */ + { +#if defined(NO_DI_MODE) + { + fractype x = a->fraction.ll; + fractype ylow = b->fraction.ll; + fractype yhigh = 0; + int bit; + + /* ??? This does multiplies one bit at a time. Optimize. */ + for (bit = 0; bit < FRAC_NBITS; bit++) + { + int carry; + + if (x & 1) + { + carry = (low += ylow) < ylow; + high += yhigh + carry; + } + yhigh <<= 1; + if (ylow & FRACHIGH) + { + yhigh |= 1; + } + ylow <<= 1; + x >>= 1; + } + } +#elif defined(FLOAT) + { + /* Multiplying two 32 bit numbers to get a 64 bit number on + a machine with DI, so we're safe */ + + DItype answer = (DItype)(a->fraction.ll) * (DItype)(b->fraction.ll); + + high = answer >> 32; + low = answer; + } +#else + /* Doing a 64*64 to 128 */ + { + UDItype nl = a->fraction.ll & 0xffffffff; + UDItype nh = a->fraction.ll >> 32; + UDItype ml = b->fraction.ll & 0xffffffff; + UDItype mh = b->fraction.ll >>32; + UDItype pp_ll = ml * nl; + UDItype pp_hl = mh * nl; + UDItype pp_lh = ml * nh; + UDItype pp_hh = mh * nh; + UDItype res2 = 0; + UDItype res0 = 0; + UDItype ps_hh__ = pp_hl + pp_lh; + if (ps_hh__ < pp_hl) + res2 += 0x100000000LL; + pp_hl = (ps_hh__ << 32) & 0xffffffff00000000LL; + res0 = pp_ll + pp_hl; + if (res0 < pp_ll) + res2++; + res2 += ((ps_hh__ >> 32) & 0xffffffffL) + pp_hh; + high = res2; + low = res0; + } +#endif + } + + tmp->normal_exp = a->normal_exp + b->normal_exp; + tmp->sign = a->sign != b->sign; +#ifdef FLOAT + tmp->normal_exp += 2; /* ??????????????? */ +#else + tmp->normal_exp += 4; /* ??????????????? */ +#endif + while (high >= IMPLICIT_2) + { + tmp->normal_exp++; + if (high & 1) + { + low >>= 1; + low |= FRACHIGH; + } + high >>= 1; + } + while (high < IMPLICIT_1) + { + tmp->normal_exp--; + + high <<= 1; + if (low & FRACHIGH) + high |= 1; + low <<= 1; + } + /* rounding is tricky. if we only round if it won't make us round later. */ +#if 0 + if (low & FRACHIGH2) + { + if (((high & GARDMASK) != GARDMSB) + && (((high + 1) & GARDMASK) == GARDMSB)) + { + /* don't round, it gets done again later. */ + } + else + { + high++; + } + } +#endif + if ((high & GARDMASK) == GARDMSB) + { + if (high & (1 << NGARDS)) + { + /* half way, so round to even */ + high += GARDROUND + 1; + } + else if (low) + { + /* but we really weren't half way */ + high += GARDROUND + 1; + } + } + tmp->fraction.ll = high; + tmp->class = CLASS_NUMBER; + return tmp; +} + +FLO_type +multiply (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + fp_number_type tmp; + fp_number_type *res; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + res = _fpmul_parts (&a, &b, &tmp); + + return pack_d (res); +} +#endif + +#if defined(L_div_sf) || defined(L_div_df) +static INLINE fp_number_type * +_fpdiv_parts (fp_number_type * a, + fp_number_type * b) +{ + fractype bit; + fractype numerator; + fractype denominator; + fractype quotient; + + if (isnan (a)) + { + return a; + } + if (isnan (b)) + { + return b; + } + + a->sign = a->sign ^ b->sign; + + if (isinf (a) || iszero (a)) + { + if (a->class == b->class) + return nan (); + return a; + } + + if (isinf (b)) + { + a->fraction.ll = 0; + a->normal_exp = 0; + return a; + } + if (iszero (b)) + { + a->class = CLASS_INFINITY; + return a; + } + + /* Calculate the mantissa by multiplying both 64bit numbers to get a + 128 bit number */ + { + /* quotient = + ( numerator / denominator) * 2^(numerator exponent - denominator exponent) + */ + + a->normal_exp = a->normal_exp - b->normal_exp; + numerator = a->fraction.ll; + denominator = b->fraction.ll; + + if (numerator < denominator) + { + /* Fraction will be less than 1.0 */ + numerator *= 2; + a->normal_exp--; + } + bit = IMPLICIT_1; + quotient = 0; + /* ??? Does divide one bit at a time. Optimize. */ + while (bit) + { + if (numerator >= denominator) + { + quotient |= bit; + numerator -= denominator; + } + bit >>= 1; + numerator *= 2; + } + + if ((quotient & GARDMASK) == GARDMSB) + { + if (quotient & (1 << NGARDS)) + { + /* half way, so round to even */ + quotient += GARDROUND + 1; + } + else if (numerator) + { + /* but we really weren't half way, more bits exist */ + quotient += GARDROUND + 1; + } + } + + a->fraction.ll = quotient; + return (a); + } +} + +FLO_type +divide (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + fp_number_type *res; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + res = _fpdiv_parts (&a, &b); + + return pack_d (res); +} +#endif + +int __fpcmp_parts (fp_number_type * a, fp_number_type *b); + +#if defined(L_fpcmp_parts_sf) || defined(L_fpcmp_parts_df) +/* according to the demo, fpcmp returns a comparison with 0... thus + a -1 + a==b -> 0 + a>b -> +1 + */ + +int +__fpcmp_parts (fp_number_type * a, fp_number_type * b) +{ +#if 0 + /* either nan -> unordered. Must be checked outside of this routine. */ + if (isnan (a) && isnan (b)) + { + return 1; /* still unordered! */ + } +#endif + + if (isnan (a) || isnan (b)) + { + return 1; /* how to indicate unordered compare? */ + } + if (isinf (a) && isinf (b)) + { + /* +inf > -inf, but +inf != +inf */ + /* b \a| +inf(0)| -inf(1) + ______\+--------+-------- + +inf(0)| a==b(0)| ab(1) | a==b(0) + -------+--------+-------- + So since unordered must be non zero, just line up the columns... + */ + return b->sign - a->sign; + } + /* but not both... */ + if (isinf (a)) + { + return a->sign ? -1 : 1; + } + if (isinf (b)) + { + return b->sign ? 1 : -1; + } + if (iszero (a) && iszero (b)) + { + return 0; + } + if (iszero (a)) + { + return b->sign ? 1 : -1; + } + if (iszero (b)) + { + return a->sign ? -1 : 1; + } + /* now both are "normal". */ + if (a->sign != b->sign) + { + /* opposite signs */ + return a->sign ? -1 : 1; + } + /* same sign; exponents? */ + if (a->normal_exp > b->normal_exp) + { + return a->sign ? -1 : 1; + } + if (a->normal_exp < b->normal_exp) + { + return a->sign ? 1 : -1; + } + /* same exponents; check size. */ + if (a->fraction.ll > b->fraction.ll) + { + return a->sign ? -1 : 1; + } + if (a->fraction.ll < b->fraction.ll) + { + return a->sign ? 1 : -1; + } + /* after all that, they're equal. */ + return 0; +} +#endif + +#if defined(L_compare_sf) || defined(L_compare_df) +CMPtype +compare (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + return __fpcmp_parts (&a, &b); +} +#endif + +#ifndef US_SOFTWARE_GOFAST + +/* These should be optimized for their specific tasks someday. */ + +#if defined(L_eq_sf) || defined(L_eq_df) +CMPtype +_eq_f2 (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + if (isnan (&a) || isnan (&b)) + return 1; /* false, truth == 0 */ + + return __fpcmp_parts (&a, &b) ; +} +#endif + +#if defined(L_ne_sf) || defined(L_ne_df) +CMPtype +_ne_f2 (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + if (isnan (&a) || isnan (&b)) + return 1; /* true, truth != 0 */ + + return __fpcmp_parts (&a, &b) ; +} +#endif + +#if defined(L_gt_sf) || defined(L_gt_df) +CMPtype +_gt_f2 (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + if (isnan (&a) || isnan (&b)) + return -1; /* false, truth > 0 */ + + return __fpcmp_parts (&a, &b); +} +#endif + +#if defined(L_ge_sf) || defined(L_ge_df) +CMPtype +_ge_f2 (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + if (isnan (&a) || isnan (&b)) + return -1; /* false, truth >= 0 */ + return __fpcmp_parts (&a, &b) ; +} +#endif + +#if defined(L_lt_sf) || defined(L_lt_df) +CMPtype +_lt_f2 (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + if (isnan (&a) || isnan (&b)) + return 1; /* false, truth < 0 */ + + return __fpcmp_parts (&a, &b); +} +#endif + +#if defined(L_le_sf) || defined(L_le_df) +CMPtype +_le_f2 (FLO_type arg_a, FLO_type arg_b) +{ + fp_number_type a; + fp_number_type b; + + unpack_d ((FLO_union_type *) & arg_a, &a); + unpack_d ((FLO_union_type *) & arg_b, &b); + + if (isnan (&a) || isnan (&b)) + return 1; /* false, truth <= 0 */ + + return __fpcmp_parts (&a, &b) ; +} +#endif + +#endif /* ! US_SOFTWARE_GOFAST */ + +#if defined(L_si_to_sf) || defined(L_si_to_df) +FLO_type +si_to_float (SItype arg_a) +{ + fp_number_type in; + + in.class = CLASS_NUMBER; + in.sign = arg_a < 0; + if (!arg_a) + { + in.class = CLASS_ZERO; + } + else + { + in.normal_exp = FRACBITS + NGARDS; + if (in.sign) + { + /* Special case for minint, since there is no +ve integer + representation for it */ + if (arg_a == (SItype) 0x80000000) + { + return -2147483648.0; + } + in.fraction.ll = (-arg_a); + } + else + in.fraction.ll = arg_a; + + while (in.fraction.ll < (1LL << (FRACBITS + NGARDS))) + { + in.fraction.ll <<= 1; + in.normal_exp -= 1; + } + } + return pack_d (&in); +} +#endif + +#if defined(L_sf_to_si) || defined(L_df_to_si) +SItype +float_to_si (FLO_type arg_a) +{ + fp_number_type a; + SItype tmp; + + unpack_d ((FLO_union_type *) & arg_a, &a); + if (iszero (&a)) + return 0; + if (isnan (&a)) + return 0; + /* get reasonable MAX_SI_INT... */ + if (isinf (&a)) + return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT; + /* it is a number, but a small one */ + if (a.normal_exp < 0) + return 0; + if (a.normal_exp > 30) + return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT; + tmp = a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp); + return a.sign ? (-tmp) : (tmp); +} +#endif + +#if defined(L_sf_to_usi) || defined(L_df_to_usi) +#ifdef US_SOFTWARE_GOFAST +/* While libgcc2.c defines its own __fixunssfsi and __fixunsdfsi routines, + we also define them for GOFAST because the ones in libgcc2.c have the + wrong names and I'd rather define these here and keep GOFAST CYG-LOC's + out of libgcc2.c. We can't define these here if not GOFAST because then + there'd be duplicate copies. */ + +USItype +float_to_usi (FLO_type arg_a) +{ + fp_number_type a; + + unpack_d ((FLO_union_type *) & arg_a, &a); + if (iszero (&a)) + return 0; + if (isnan (&a)) + return 0; + /* it is a negative number */ + if (a.sign) + return 0; + /* get reasonable MAX_USI_INT... */ + if (isinf (&a)) + return MAX_USI_INT; + /* it is a number, but a small one */ + if (a.normal_exp < 0) + return 0; + if (a.normal_exp > 31) + return MAX_USI_INT; + else if (a.normal_exp > (FRACBITS + NGARDS)) + return a.fraction.ll << (a.normal_exp - (FRACBITS + NGARDS)); + else + return a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp); +} +#endif +#endif + +#if defined(L_negate_sf) || defined(L_negate_df) +FLO_type +negate (FLO_type arg_a) +{ + fp_number_type a; + + unpack_d ((FLO_union_type *) & arg_a, &a); + flip_sign (&a); + return pack_d (&a); +} +#endif + +#ifdef FLOAT + +#if defined(L_make_sf) +SFtype +__make_fp(fp_class_type class, + unsigned int sign, + int exp, + USItype frac) +{ + fp_number_type in; + + in.class = class; + in.sign = sign; + in.normal_exp = exp; + in.fraction.ll = frac; + return pack_d (&in); +} +#endif + +#ifndef FLOAT_ONLY + +/* This enables one to build an fp library that supports float but not double. + Otherwise, we would get an undefined reference to __make_dp. + This is needed for some 8-bit ports that can't handle well values that + are 8-bytes in size, so we just don't support double for them at all. */ + +extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype frac); + +#if defined(L_sf_to_df) +DFtype +sf_to_df (SFtype arg_a) +{ + fp_number_type in; + + unpack_d ((FLO_union_type *) & arg_a, &in); + return __make_dp (in.class, in.sign, in.normal_exp, + ((UDItype) in.fraction.ll) << F_D_BITOFF); +} +#endif + +#endif +#endif + +#ifndef FLOAT + +extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype); + +#if defined(L_make_df) +DFtype +__make_dp (fp_class_type class, unsigned int sign, int exp, UDItype frac) +{ + fp_number_type in; + + in.class = class; + in.sign = sign; + in.normal_exp = exp; + in.fraction.ll = frac; + return pack_d (&in); +} +#endif + +#if defined(L_df_to_sf) +SFtype +df_to_sf (DFtype arg_a) +{ + fp_number_type in; + USItype sffrac; + + unpack_d ((FLO_union_type *) & arg_a, &in); + + sffrac = in.fraction.ll >> F_D_BITOFF; + + /* We set the lowest guard bit in SFFRAC if we discarded any non + zero bits. */ + if ((in.fraction.ll & (((USItype) 1 << F_D_BITOFF) - 1)) != 0) + sffrac |= 1; + + return __make_fp (in.class, in.sign, in.normal_exp, sffrac); +} +#endif + +#endif +#endif /* !EXTENDED_FLOAT_STUBS */ diff --git a/libgcc/lib1thumb.asm b/libgcc/lib1thumb.asm new file mode 100755 index 0000000..e0ff746 --- /dev/null +++ b/libgcc/lib1thumb.asm @@ -0,0 +1,736 @@ +@ libgcc1 routines for ARM cpu. +@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk) + +/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file with other programs, and to distribute +those programs without any restriction coming from the use of this +file. (The General Public License restrictions do apply in other +respects; for example, they cover modification of the file, and +distribution when not linked into another program.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. */ + + .code 16 + +#ifdef __elf__ +#define __PLT__ (PLT) +#define TYPE(x) .type SYM(x),function +#define SIZE(x) .size SYM(x), . - SYM(x) +#else +#define __PLT__ +#define TYPE(x) +#define SIZE(x) +#endif + +#define RET mov pc, lr + +#define SYM(x) x + +work .req r4 @ XXXX is this safe ? + +#ifdef L_udivsi3 + +dividend .req r0 +divisor .req r1 +result .req r2 +curbit .req r3 +ip .req r12 +sp .req r13 +lr .req r14 +pc .req r15 + + .text + .globl SYM (__udivsi3) + TYPE (__udivsi3) + .align 0 + .thumb_func +SYM (__udivsi3): + cmp divisor, #0 + beq Ldiv0 + mov curbit, #1 + mov result, #0 + + push { work } + cmp dividend, divisor + bcc Lgot_result + + @ Load the constant 0x10000000 into our work register + mov work, #1 + lsl work, #28 +Loop1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, work + bcs Lbignum + cmp divisor, dividend + bcs Lbignum + lsl divisor, #4 + lsl curbit, #4 + b Loop1 + +Lbignum: + @ Set work to 0x80000000 + lsl work, #3 +Loop2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, work + bcs Loop3 + cmp divisor, dividend + bcs Loop3 + lsl divisor, #1 + lsl curbit, #1 + b Loop2 + +Loop3: + @ Test for possible subtractions, and note which bits + @ are done in the result. On the final pass, this may subtract + @ too much from the dividend, but the result will be ok, since the + @ "bit" will have been shifted out at the bottom. + cmp dividend, divisor + bcc Over1 + sub dividend, dividend, divisor + orr result, result, curbit +Over1: + lsr work, divisor, #1 + cmp dividend, work + bcc Over2 + sub dividend, dividend, work + lsr work, curbit, #1 + orr result, work +Over2: + lsr work, divisor, #2 + cmp dividend, work + bcc Over3 + sub dividend, dividend, work + lsr work, curbit, #2 + orr result, work +Over3: + lsr work, divisor, #3 + cmp dividend, work + bcc Over4 + sub dividend, dividend, work + lsr work, curbit, #3 + orr result, work +Over4: + cmp dividend, #0 @ Early termination? + beq Lgot_result + lsr curbit, #4 @ No, any more bits to do? + beq Lgot_result + lsr divisor, #4 + b Loop3 +Lgot_result: + mov r0, result + pop { work } + RET + +Ldiv0: + push { lr } + bl SYM (__div0) __PLT__ + mov r0, #0 @ about as wrong as it could be + pop { pc } + + SIZE (__udivsi3) + +#endif /* L_udivsi3 */ + +#ifdef L_umodsi3 + +dividend .req r0 +divisor .req r1 +overdone .req r2 +curbit .req r3 +ip .req r12 +sp .req r13 +lr .req r14 +pc .req r15 + + .text + .globl SYM (__umodsi3) + TYPE (__umodsi3) + .align 0 + .thumb_func +SYM (__umodsi3): + cmp divisor, #0 + beq Ldiv0 + mov curbit, #1 + cmp dividend, divisor + bcs Over1 + RET + +Over1: + @ Load the constant 0x10000000 into our work register + push { work } + mov work, #1 + lsl work, #28 +Loop1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, work + bcs Lbignum + cmp divisor, dividend + bcs Lbignum + lsl divisor, #4 + lsl curbit, #4 + b Loop1 + +Lbignum: + @ Set work to 0x80000000 + lsl work, #3 +Loop2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, work + bcs Loop3 + cmp divisor, dividend + bcs Loop3 + lsl divisor, #1 + lsl curbit, #1 + b Loop2 + +Loop3: + @ Test for possible subtractions. On the final pass, this may + @ subtract too much from the dividend, so keep track of which + @ subtractions are done, we can fix them up afterwards... + mov overdone, #0 + cmp dividend, divisor + bcc Over2 + sub dividend, dividend, divisor +Over2: + lsr work, divisor, #1 + cmp dividend, work + bcc Over3 + sub dividend, dividend, work + mov ip, curbit + mov work, #1 + ror curbit, work + orr overdone, curbit + mov curbit, ip +Over3: + lsr work, divisor, #2 + cmp dividend, work + bcc Over4 + sub dividend, dividend, work + mov ip, curbit + mov work, #2 + ror curbit, work + orr overdone, curbit + mov curbit, ip +Over4: + lsr work, divisor, #3 + cmp dividend, work + bcc Over5 + sub dividend, dividend, work + mov ip, curbit + mov work, #3 + ror curbit, work + orr overdone, curbit + mov curbit, ip +Over5: + mov ip, curbit + cmp dividend, #0 @ Early termination? + beq Over6 + lsr curbit, #4 @ No, any more bits to do? + beq Over6 + lsr divisor, #4 + b Loop3 + +Over6: + @ Any subtractions that we should not have done will be recorded in + @ the top three bits of "overdone". Exactly which were not needed + @ are governed by the position of the bit, stored in ip. + @ If we terminated early, because dividend became zero, + @ then none of the below will match, since the bit in ip will not be + @ in the bottom nibble. + + mov work, #0xe + lsl work, #28 + and overdone, work + bne Over7 + pop { work } + RET @ No fixups needed +Over7: + mov curbit, ip + mov work, #3 + ror curbit, work + tst overdone, curbit + beq Over8 + lsr work, divisor, #3 + add dividend, dividend, work +Over8: + mov curbit, ip + mov work, #2 + ror curbit, work + tst overdone, curbit + beq Over9 + lsr work, divisor, #2 + add dividend, dividend, work +Over9: + mov curbit, ip + mov work, #1 + ror curbit, work + tst overdone, curbit + beq Over10 + lsr work, divisor, #1 + add dividend, dividend, work +Over10: + pop { work } + RET + +Ldiv0: + push { lr } + bl SYM (__div0) __PLT__ + mov r0, #0 @ about as wrong as it could be + pop { pc } + + SIZE (__umodsi3) + +#endif /* L_umodsi3 */ + +#ifdef L_divsi3 + +dividend .req r0 +divisor .req r1 +result .req r2 +curbit .req r3 +ip .req r12 +sp .req r13 +lr .req r14 +pc .req r15 + + .text + .globl SYM (__divsi3) + TYPE (__divsi3) + .align 0 + .thumb_func +SYM (__divsi3): + cmp divisor, #0 + beq Ldiv0 + + push { work } + mov work, dividend + eor work, divisor @ Save the sign of the result. + mov ip, work + mov curbit, #1 + mov result, #0 + cmp divisor, #0 + bpl Over1 + neg divisor, divisor @ Loops below use unsigned. +Over1: + cmp dividend, #0 + bpl Over2 + neg dividend, dividend +Over2: + cmp dividend, divisor + bcc Lgot_result + + mov work, #1 + lsl work, #28 +Loop1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, work + Bcs Lbignum + cmp divisor, dividend + Bcs Lbignum + lsl divisor, #4 + lsl curbit, #4 + b Loop1 + +Lbignum: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + lsl work, #3 +Loop2: + cmp divisor, work + Bcs Loop3 + cmp divisor, dividend + Bcs Loop3 + lsl divisor, #1 + lsl curbit, #1 + b Loop2 + +Loop3: + @ Test for possible subtractions, and note which bits + @ are done in the result. On the final pass, this may subtract + @ too much from the dividend, but the result will be ok, since the + @ "bit" will have been shifted out at the bottom. + cmp dividend, divisor + Bcc Over3 + sub dividend, dividend, divisor + orr result, result, curbit +Over3: + lsr work, divisor, #1 + cmp dividend, work + Bcc Over4 + sub dividend, dividend, work + lsr work, curbit, #1 + orr result, work +Over4: + lsr work, divisor, #2 + cmp dividend, work + Bcc Over5 + sub dividend, dividend, work + lsr work, curbit, #2 + orr result, result, work +Over5: + lsr work, divisor, #3 + cmp dividend, work + Bcc Over6 + sub dividend, dividend, work + lsr work, curbit, #3 + orr result, result, work +Over6: + cmp dividend, #0 @ Early termination? + Beq Lgot_result + lsr curbit, #4 @ No, any more bits to do? + Beq Lgot_result + lsr divisor, #4 + b Loop3 + +Lgot_result: + mov r0, result + mov work, ip + cmp work, #0 + Bpl Over7 + neg r0, r0 +Over7: + pop { work } + RET + +Ldiv0: + push { lr } + bl SYM (__div0) __PLT__ + mov r0, #0 @ about as wrong as it could be + pop { pc } + + SIZE (__divsi3) + +#endif /* L_divsi3 */ + +#ifdef L_modsi3 + +dividend .req r0 +divisor .req r1 +overdone .req r2 +curbit .req r3 +ip .req r12 +sp .req r13 +lr .req r14 +pc .req r15 + + .text + .globl SYM (__modsi3) + TYPE (__modsi3) + .align 0 + .thumb_func +SYM (__modsi3): + mov curbit, #1 + cmp divisor, #0 + beq Ldiv0 + Bpl Over1 + neg divisor, divisor @ Loops below use unsigned. +Over1: + push { work } + @ Need to save the sign of the dividend, unfortunately, we need + @ ip later on. Must do this after saving the original value of + @ the work register, because we will pop this value off first. + push { dividend } + cmp dividend, #0 + Bpl Over2 + neg dividend, dividend +Over2: + cmp dividend, divisor + bcc Lgot_result + mov work, #1 + lsl work, #28 +Loop1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, work + bcs Lbignum + cmp divisor, dividend + bcs Lbignum + lsl divisor, #4 + lsl curbit, #4 + b Loop1 + +Lbignum: + @ Set work to 0x80000000 + lsl work, #3 +Loop2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, work + bcs Loop3 + cmp divisor, dividend + bcs Loop3 + lsl divisor, #1 + lsl curbit, #1 + b Loop2 + +Loop3: + @ Test for possible subtractions. On the final pass, this may + @ subtract too much from the dividend, so keep track of which + @ subtractions are done, we can fix them up afterwards... + mov overdone, #0 + cmp dividend, divisor + bcc Over3 + sub dividend, dividend, divisor +Over3: + lsr work, divisor, #1 + cmp dividend, work + bcc Over4 + sub dividend, dividend, work + mov ip, curbit + mov work, #1 + ror curbit, work + orr overdone, curbit + mov curbit, ip +Over4: + lsr work, divisor, #2 + cmp dividend, work + bcc Over5 + sub dividend, dividend, work + mov ip, curbit + mov work, #2 + ror curbit, work + orr overdone, curbit + mov curbit, ip +Over5: + lsr work, divisor, #3 + cmp dividend, work + bcc Over6 + sub dividend, dividend, work + mov ip, curbit + mov work, #3 + ror curbit, work + orr overdone, curbit + mov curbit, ip +Over6: + mov ip, curbit + cmp dividend, #0 @ Early termination? + beq Over7 + lsr curbit, #4 @ No, any more bits to do? + beq Over7 + lsr divisor, #4 + b Loop3 + +Over7: + @ Any subtractions that we should not have done will be recorded in + @ the top three bits of "overdone". Exactly which were not needed + @ are governed by the position of the bit, stored in ip. + @ If we terminated early, because dividend became zero, + @ then none of the below will match, since the bit in ip will not be + @ in the bottom nibble. + mov work, #0xe + lsl work, #28 + and overdone, work + beq Lgot_result + + mov curbit, ip + mov work, #3 + ror curbit, work + tst overdone, curbit + beq Over8 + lsr work, divisor, #3 + add dividend, dividend, work +Over8: + mov curbit, ip + mov work, #2 + ror curbit, work + tst overdone, curbit + beq Over9 + lsr work, divisor, #2 + add dividend, dividend, work +Over9: + mov curbit, ip + mov work, #1 + ror curbit, work + tst overdone, curbit + beq Lgot_result + lsr work, divisor, #1 + add dividend, dividend, work +Lgot_result: + pop { work } + cmp work, #0 + bpl Over10 + neg dividend, dividend +Over10: + pop { work } + RET + +Ldiv0: + push { lr } + bl SYM (__div0) __PLT__ + mov r0, #0 @ about as wrong as it could be + pop { pc } + + SIZE (__modsi3) + +#endif /* L_modsi3 */ + +#ifdef L_dvmd_tls + + .globl SYM (__div0) + TYPE (__div0) + .align 0 + .thumb_func +SYM (__div0): + RET + + SIZE (__div0) + +#endif /* L_divmodsi_tools */ + + +#ifdef L_call_via_rX + +/* These labels & instructions are used by the Arm/Thumb interworking code. + The address of function to be called is loaded into a register and then + one of these labels is called via a BL instruction. This puts the + return address into the link register with the bottom bit set, and the + code here switches to the correct mode before executing the function. */ + + .text + .align 0 + +.macro call_via register + .globl SYM (_call_via_\register) + TYPE (_call_via_\register) + .thumb_func +SYM (_call_via_\register): + bx \register + nop + + SIZE (_call_via_\register) +.endm + + call_via r0 + call_via r1 + call_via r2 + call_via r3 + call_via r4 + call_via r5 + call_via r6 + call_via r7 + call_via r8 + call_via r9 + call_via sl + call_via fp + call_via ip + call_via sp + call_via lr + +#endif /* L_call_via_rX */ + +#ifdef L_interwork_call_via_rX + +/* These labels & instructions are used by the Arm/Thumb interworking code, + when the target address is in an unknown instruction set. The address + of function to be called is loaded into a register and then one of these + labels is called via a BL instruction. This puts the return address + into the link register with the bottom bit set, and the code here + switches to the correct mode before executing the function. Unfortunately + the target code cannot be relied upon to return via a BX instruction, so + instead we have to store the resturn address on the stack and allow the + called function to return here instead. Upon return we recover the real + return address and use a BX to get back to Thumb mode. */ + + .text + .align 0 + + .code 32 + .globl _arm_return +_arm_return: + ldmia r13!, {r12} + bx r12 + +.macro interwork register + .code 16 + + .globl SYM (_interwork_call_via_\register) + TYPE (_interwork_call_via_\register) + .thumb_func +SYM (_interwork_call_via_\register): + bx pc + nop + + .code 32 + .globl .Lchange_\register +.Lchange_\register: + tst \register, #1 + stmeqdb r13!, {lr} + adreq lr, _arm_return + bx \register + + SIZE (_interwork_call_via_\register) +.endm + + interwork r0 + interwork r1 + interwork r2 + interwork r3 + interwork r4 + interwork r5 + interwork r6 + interwork r7 + interwork r8 + interwork r9 + interwork sl + interwork fp + interwork ip + interwork sp + + /* The lr case has to be handled a little differently...*/ + .code 16 + .globl SYM (_interwork_call_via_lr) + TYPE (_interwork_call_via_lr) + .thumb_func +SYM (_interwork_call_via_lr): + bx pc + nop + + .code 32 + .globl .Lchange_lr +.Lchange_lr: + tst lr, #1 + stmeqdb r13!, {lr} + mov ip, lr + adreq lr, _arm_return + bx ip + + SIZE (_interwork_call_via_lr) + +#endif /* L_interwork_call_via_rX */ diff --git a/libgcc/libgcc1-test.c b/libgcc/libgcc1-test.c new file mode 100755 index 0000000..0f59cbe --- /dev/null +++ b/libgcc/libgcc1-test.c @@ -0,0 +1,117 @@ +/* This small function uses all the arithmetic operators that + libgcc1.c can handle. If you can link it, then + you have provided replacements for all the libgcc1.c functions that + your target machine needs. */ + +int foo (); +double dfoo (); + +/* We don't want __main here because that can drag in atexit (among other + things) which won't necessarily exist yet. */ + +main_without__main () +{ + int a = foo (), b = foo (); + unsigned int au = foo (), bu = foo (); + float af = dfoo (), bf = dfoo (); + double ad = dfoo (), bd = dfoo (); + + discard (a * b); + discard (a / b); + discard (a % b); + + discard (au / bu); + discard (au % bu); + + discard (a >> b); + discard (a << b); + + discard (au >> bu); + discard (au << bu); + + ddiscard (ad + bd); + ddiscard (ad - bd); + ddiscard (ad * bd); + ddiscard (ad / bd); + ddiscard (-ad); + + ddiscard (af + bf); + ddiscard (af - bf); + ddiscard (af * bf); + ddiscard (af / bf); + ddiscard (-af); + + discard ((int) ad); + discard ((int) af); + + ddiscard ((double) a); + ddiscard ((float) a); + ddiscard ((float) ad); + + discard (ad == bd); + discard (ad < bd); + discard (ad > bd); + discard (ad != bd); + discard (ad <= bd); + discard (ad >= bd); + + discard (af == bf); + discard (af < bf); + discard (af > bf); + discard (af != bf); + discard (af <= bf); + discard (af >= bf); + + return 0; +} + +discard (x) + int x; +{} + +ddiscard (x) + double x; +{} + +foo () +{ + static int table[] = {20, 69, 4, 12}; + static int idx; + + return table[idx++]; +} + +double +dfoo () +{ + static double table[] = {20.4, 69.96, 4.4, 202.202}; + static int idx; + + return table[idx++]; +} + +/* Provide functions that some versions of the linker use to default + the start address if -e symbol is not used, to avoid the warning + message saying the start address is defaulted. */ +extern void start() __asm__("start"); +extern void _start() __asm__("_start"); +extern void __start() __asm__("__start"); + +/* Provide functions that might be needed by soft-float emulation routines. */ +void memcpy() {} + +void start() {} +void _start() {} +void __start() {} +void mainCRTStartup() {} + +/* CYGNUS LOCAL - duplicate definition of memcpy() removed. */ + +/* CYGNUS LOCAL v850 */ +#if defined __v850e__ || defined __v850ea__ +/* We need to use the symbol __ctbp in order to force the linker to define it. */ +extern int _ctbp; + +void _func() { _ctbp = 1; } +#endif +/* END CYGNUS LOCAL */ diff --git a/libgcc/libgcc1.c b/libgcc/libgcc1.c new file mode 100755 index 0000000..bece500 --- /dev/null +++ b/libgcc/libgcc1.c @@ -0,0 +1,596 @@ +/* Subroutines needed by GCC output code on some machines. */ +/* Compile this file with the Unix C compiler! */ +/* Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file with other programs, and to distribute +those programs without any restriction coming from the use of this +file. (The General Public License restrictions do apply in other +respects; for example, they cover modification of the file, and +distribution when not linked into another program.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. */ + +#include "config.h" + +/* Don't use `fancy_abort' here even if config.h says to use it. */ +#ifdef abort +#undef abort +#endif + +/* On some machines, cc is really GCC. For these machines, we can't + expect these functions to be properly compiled unless GCC open codes + the operation (which is precisely when the function won't be used). + So allow tm.h to specify ways of accomplishing the operations + by defining the macros perform_*. + + On a machine where cc is some other compiler, there is usually no + reason to define perform_*. The other compiler normally has other ways + of implementing all of these operations. + + In some cases a certain machine may come with GCC installed as cc + or may have some other compiler. Then it may make sense for tm.h + to define perform_* only if __GNUC__ is defined. */ + +#ifndef perform_mulsi3 +#define perform_mulsi3(a, b) return a * b +#endif + +#ifndef perform_divsi3 +#define perform_divsi3(a, b) return a / b +#endif + +#ifndef perform_udivsi3 +#define perform_udivsi3(a, b) return a / b +#endif + +#ifndef perform_modsi3 +#define perform_modsi3(a, b) return a % b +#endif + +#ifndef perform_umodsi3 +#define perform_umodsi3(a, b) return a % b +#endif + +#ifndef perform_lshrsi3 +#define perform_lshrsi3(a, b) return a >> b +#endif + +#ifndef perform_ashrsi3 +#define perform_ashrsi3(a, b) return a >> b +#endif + +#ifndef perform_ashlsi3 +#define perform_ashlsi3(a, b) return a << b +#endif + +#ifndef perform_adddf3 +#define perform_adddf3(a, b) return a + b +#endif + +#ifndef perform_subdf3 +#define perform_subdf3(a, b) return a - b +#endif + +#ifndef perform_muldf3 +#define perform_muldf3(a, b) return a * b +#endif + +#ifndef perform_divdf3 +#define perform_divdf3(a, b) return a / b +#endif + +#ifndef perform_addsf3 +#define perform_addsf3(a, b) return INTIFY (a + b) +#endif + +#ifndef perform_subsf3 +#define perform_subsf3(a, b) return INTIFY (a - b) +#endif + +#ifndef perform_mulsf3 +#define perform_mulsf3(a, b) return INTIFY (a * b) +#endif + +#ifndef perform_divsf3 +#define perform_divsf3(a, b) return INTIFY (a / b) +#endif + +#ifndef perform_negdf2 +#define perform_negdf2(a) return -a +#endif + +#ifndef perform_negsf2 +#define perform_negsf2(a) return INTIFY (-a) +#endif + +#ifndef perform_fixdfsi +#define perform_fixdfsi(a) return (nongcc_SI_type) a; +#endif + +#ifndef perform_fixsfsi +#define perform_fixsfsi(a) return (nongcc_SI_type) a +#endif + +#ifndef perform_floatsidf +#define perform_floatsidf(a) return (double) a +#endif + +#ifndef perform_floatsisf +#define perform_floatsisf(a) return INTIFY ((float) a) +#endif + +#ifndef perform_extendsfdf2 +#define perform_extendsfdf2(a) return a +#endif + +#ifndef perform_truncdfsf2 +#define perform_truncdfsf2(a) return INTIFY (a) +#endif + +/* Note that eqdf2 returns a value for "true" that is == 0, + nedf2 returns a value for "true" that is != 0, + gtdf2 returns a value for "true" that is > 0, + and so on. */ + +#ifndef perform_eqdf2 +#define perform_eqdf2(a, b) return !(a == b) +#endif + +#ifndef perform_nedf2 +#define perform_nedf2(a, b) return a != b +#endif + +#ifndef perform_gtdf2 +#define perform_gtdf2(a, b) return a > b +#endif + +#ifndef perform_gedf2 +#define perform_gedf2(a, b) return (a >= b) - 1 +#endif + +#ifndef perform_ltdf2 +#define perform_ltdf2(a, b) return -(a < b) +#endif + +#ifndef perform_ledf2 +#define perform_ledf2(a, b) return 1 - (a <= b) +#endif + +#ifndef perform_eqsf2 +#define perform_eqsf2(a, b) return !(a == b) +#endif + +#ifndef perform_nesf2 +#define perform_nesf2(a, b) return a != b +#endif + +#ifndef perform_gtsf2 +#define perform_gtsf2(a, b) return a > b +#endif + +#ifndef perform_gesf2 +#define perform_gesf2(a, b) return (a >= b) - 1 +#endif + +#ifndef perform_ltsf2 +#define perform_ltsf2(a, b) return -(a < b) +#endif + +#ifndef perform_lesf2 +#define perform_lesf2(a, b) return 1 - (a <= b); +#endif + +/* Define the C data type to use for an SImode value. */ + +#ifndef nongcc_SI_type +#define nongcc_SI_type long int +#endif + +/* Define the C data type to use for a value of word size */ +#ifndef nongcc_word_type +#define nongcc_word_type nongcc_SI_type +#endif + +/* Define the type to be used for returning an SF mode value + and the method for turning a float into that type. + These definitions work for machines where an SF value is + returned in the same register as an int. */ + +#ifndef FLOAT_VALUE_TYPE +#define FLOAT_VALUE_TYPE int +#endif + +#ifndef INTIFY +#define INTIFY(FLOATVAL) (intify.f = (FLOATVAL), intify.i) +#endif + +#ifndef FLOATIFY +#define FLOATIFY(INTVAL) ((INTVAL).f) +#endif + +#ifndef FLOAT_ARG_TYPE +#define FLOAT_ARG_TYPE union flt_or_int +#endif + +union flt_or_value { FLOAT_VALUE_TYPE i; float f; }; + +union flt_or_int { int i; float f; }; + + +#ifdef L_mulsi3 +nongcc_SI_type +__mulsi3 (a, b) + nongcc_SI_type a, b; +{ + perform_mulsi3 (a, b); +} +#endif + +#ifdef L_udivsi3 +nongcc_SI_type +__udivsi3 (a, b) + unsigned nongcc_SI_type a, b; +{ + perform_udivsi3 (a, b); +} +#endif + +#ifdef L_divsi3 +nongcc_SI_type +__divsi3 (a, b) + nongcc_SI_type a, b; +{ + perform_divsi3 (a, b); +} +#endif + +#ifdef L_umodsi3 +nongcc_SI_type +__umodsi3 (a, b) + unsigned nongcc_SI_type a, b; +{ + perform_umodsi3 (a, b); +} +#endif + +#ifdef L_modsi3 +nongcc_SI_type +__modsi3 (a, b) + nongcc_SI_type a, b; +{ + perform_modsi3 (a, b); +} +#endif + +#ifdef L_lshrsi3 +nongcc_SI_type +__lshrsi3 (a, b) + unsigned nongcc_SI_type a, b; +{ + perform_lshrsi3 (a, b); +} +#endif + +#ifdef L_ashrsi3 +nongcc_SI_type +__ashrsi3 (a, b) + nongcc_SI_type a, b; +{ + perform_ashrsi3 (a, b); +} +#endif + +#ifdef L_ashlsi3 +nongcc_SI_type +__ashlsi3 (a, b) + nongcc_SI_type a, b; +{ + perform_ashlsi3 (a, b); +} +#endif + +#ifdef L_divdf3 +double +__divdf3 (a, b) + double a, b; +{ + perform_divdf3 (a, b); +} +#endif + +#ifdef L_muldf3 +double +__muldf3 (a, b) + double a, b; +{ + perform_muldf3 (a, b); +} +#endif + +#ifdef L_negdf2 +double +__negdf2 (a) + double a; +{ + perform_negdf2 (a); +} +#endif + +#ifdef L_adddf3 +double +__adddf3 (a, b) + double a, b; +{ + perform_adddf3 (a, b); +} +#endif + +#ifdef L_subdf3 +double +__subdf3 (a, b) + double a, b; +{ + perform_subdf3 (a, b); +} +#endif + +/* Note that eqdf2 returns a value for "true" that is == 0, + nedf2 returns a value for "true" that is != 0, + gtdf2 returns a value for "true" that is > 0, + and so on. */ + +#ifdef L_eqdf2 +nongcc_word_type +__eqdf2 (a, b) + double a, b; +{ + /* Value == 0 iff a == b. */ + perform_eqdf2 (a, b); +} +#endif + +#ifdef L_nedf2 +nongcc_word_type +__nedf2 (a, b) + double a, b; +{ + /* Value != 0 iff a != b. */ + perform_nedf2 (a, b); +} +#endif + +#ifdef L_gtdf2 +nongcc_word_type +__gtdf2 (a, b) + double a, b; +{ + /* Value > 0 iff a > b. */ + perform_gtdf2 (a, b); +} +#endif + +#ifdef L_gedf2 +nongcc_word_type +__gedf2 (a, b) + double a, b; +{ + /* Value >= 0 iff a >= b. */ + perform_gedf2 (a, b); +} +#endif + +#ifdef L_ltdf2 +nongcc_word_type +__ltdf2 (a, b) + double a, b; +{ + /* Value < 0 iff a < b. */ + perform_ltdf2 (a, b); +} +#endif + +#ifdef L_ledf2 +nongcc_word_type +__ledf2 (a, b) + double a, b; +{ + /* Value <= 0 iff a <= b. */ + perform_ledf2 (a, b); +} +#endif + +#ifdef L_fixdfsi +nongcc_SI_type +__fixdfsi (a) + double a; +{ + perform_fixdfsi (a); +} +#endif + +#ifdef L_fixsfsi +nongcc_SI_type +__fixsfsi (a) + FLOAT_ARG_TYPE a; +{ + union flt_or_value intify; + perform_fixsfsi (FLOATIFY (a)); +} +#endif + +#ifdef L_floatsidf +double +__floatsidf (a) + nongcc_SI_type a; +{ + perform_floatsidf (a); +} +#endif + +#ifdef L_floatsisf +FLOAT_VALUE_TYPE +__floatsisf (a) + nongcc_SI_type a; +{ + union flt_or_value intify; + perform_floatsisf (a); +} +#endif + +#ifdef L_addsf3 +FLOAT_VALUE_TYPE +__addsf3 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_value intify; + perform_addsf3 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_negsf2 +FLOAT_VALUE_TYPE +__negsf2 (a) + FLOAT_ARG_TYPE a; +{ + union flt_or_value intify; + perform_negsf2 (FLOATIFY (a)); +} +#endif + +#ifdef L_subsf3 +FLOAT_VALUE_TYPE +__subsf3 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_value intify; + perform_subsf3 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_eqsf2 +nongcc_word_type +__eqsf2 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_int intify; + /* Value == 0 iff a == b. */ + perform_eqsf2 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_nesf2 +nongcc_word_type +__nesf2 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_int intify; + /* Value != 0 iff a != b. */ + perform_nesf2 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_gtsf2 +nongcc_word_type +__gtsf2 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_int intify; + /* Value > 0 iff a > b. */ + perform_gtsf2 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_gesf2 +nongcc_word_type +__gesf2 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_int intify; + /* Value >= 0 iff a >= b. */ + perform_gesf2 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_ltsf2 +nongcc_word_type +__ltsf2 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_int intify; + /* Value < 0 iff a < b. */ + perform_ltsf2 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_lesf2 +nongcc_word_type +__lesf2 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_int intify; + /* Value <= 0 iff a <= b. */ + perform_lesf2 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_mulsf3 +FLOAT_VALUE_TYPE +__mulsf3 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_value intify; + perform_mulsf3 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_divsf3 +FLOAT_VALUE_TYPE +__divsf3 (a, b) + FLOAT_ARG_TYPE a, b; +{ + union flt_or_value intify; + perform_divsf3 (FLOATIFY (a), FLOATIFY (b)); +} +#endif + +#ifdef L_truncdfsf2 +FLOAT_VALUE_TYPE +__truncdfsf2 (a) + double a; +{ + union flt_or_value intify; + perform_truncdfsf2 (a); +} +#endif + +#ifdef L_extendsfdf2 +double +__extendsfdf2 (a) + FLOAT_ARG_TYPE a; +{ + union flt_or_value intify; + perform_extendsfdf2 (FLOATIFY (a)); +} +#endif diff --git a/libgcc/libgcc2.c b/libgcc/libgcc2.c new file mode 100755 index 0000000..cf7231f --- /dev/null +++ b/libgcc/libgcc2.c @@ -0,0 +1,946 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-97, 1998 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. */ + +#include + +/* Don't use `fancy_abort' here even if config.h says to use it. */ +#ifdef abort +#undef abort +#endif + +/* In the first part of this file, we are interfacing to calls generated + by the compiler itself. These calls pass values into these routines + which have very specific modes (rather than very specific types), and + these compiler-generated calls also expect any return values to have + very specific modes (rather than very specific types). Thus, we need + to avoid using regular C language type names in this part of the file + because the sizes for those types can be configured to be anything. + Instead we use the following special type names. */ + +typedef unsigned int UQItype __attribute__ ((mode (QI))); +typedef int SItype __attribute__ ((mode (SI))); +typedef unsigned int USItype __attribute__ ((mode (SI))); +typedef int DItype __attribute__ ((mode (DI))); +typedef unsigned int UDItype __attribute__ ((mode (DI))); + +typedef float SFtype __attribute__ ((mode (SF))); +typedef float DFtype __attribute__ ((mode (DF))); + +typedef int word_type __attribute__ ((mode (__word__))); + +/* Make sure that we don't accidentally use any normal C language built-in + type names in the first part of this file. Instead we want to use *only* + the type names defined above. The following macro definitions insure + that if we *do* accidentally use some normal C language built-in type name, + we will get a syntax error. */ + +#define char bogus_type +#define short bogus_type +#define int bogus_type +#define long bogus_type +#define unsigned bogus_type +#define float bogus_type +#define double bogus_type + +#define SI_TYPE_SIZE (sizeof (SItype) * 8) + +struct DIstruct {SItype low, high;}; + +/* We need this union to unpack/pack DImode values, since we don't have + any arithmetic yet. Incoming DImode parameters are stored into the + `ll' field, and the unpacked result is read from the struct `s'. */ + +typedef union +{ + struct DIstruct s; + DItype ll; +} DIunion; + +#if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\ + || defined (L_divdi3) || defined (L_udivdi3) \ + || defined (L_moddi3) || defined (L_umoddi3)) + +#include "longlong.h" + +#endif /* udiv or mul */ + +extern DItype __fixunssfdi (SFtype a); +extern DItype __fixunsdfdi (DFtype a); + +#if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3) +#if defined (L_divdi3) || defined (L_moddi3) +static inline +#endif +DItype +__negdi2 (DItype u) +{ + DIunion w; + DIunion uu; + + uu.ll = u; + + w.s.low = -uu.s.low; + w.s.high = -uu.s.high - ((USItype) w.s.low > 0); + + return w.ll; +} +#endif + +/* Unless shift functions are defined whith full ANSI prototypes, + parameter b will be promoted to int if word_type is smaller than an int. */ +#ifdef L_lshrdi3 +DItype +__lshrdi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * 8) - b; + if (bm <= 0) + { + w.s.high = 0; + w.s.low = (USItype)uu.s.high >> -bm; + } + else + { + USItype carries = (USItype)uu.s.high << bm; + w.s.high = (USItype)uu.s.high >> b; + w.s.low = ((USItype)uu.s.low >> b) | carries; + } + + return w.ll; +} +#endif + +#ifdef L_ashldi3 +DItype +__ashldi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * 8) - b; + if (bm <= 0) + { + w.s.low = 0; + w.s.high = (USItype)uu.s.low << -bm; + } + else + { + USItype carries = (USItype)uu.s.low >> bm; + w.s.low = (USItype)uu.s.low << b; + w.s.high = ((USItype)uu.s.high << b) | carries; + } + + return w.ll; +} +#endif + +#ifdef L_ashrdi3 +DItype +__ashrdi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * 8) - b; + if (bm <= 0) + { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = uu.s.high >> (sizeof (SItype) * 8 - 1); + w.s.low = uu.s.high >> -bm; + } + else + { + USItype carries = (USItype)uu.s.high << bm; + w.s.high = uu.s.high >> b; + w.s.low = ((USItype)uu.s.low >> b) | carries; + } + + return w.ll; +} +#endif + +#ifdef L_ffsdi2 +DItype +__ffsdi2 (DItype u) +{ + DIunion uu, w; + uu.ll = u; + w.s.high = 0; + w.s.low = ffs (uu.s.low); + if (w.s.low != 0) + return w.ll; + w.s.low = ffs (uu.s.high); + if (w.s.low != 0) + { + w.s.low += 8 * sizeof (SItype); + return w.ll; + } + return w.ll; +} +#endif + +#ifdef L_muldi3 +DItype +__muldi3 (DItype u, DItype v) +{ + DIunion w; + DIunion uu, vv; + + uu.ll = u, + vv.ll = v; + + w.ll = __umulsidi3 (uu.s.low, vv.s.low); + w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high + + (USItype) uu.s.high * (USItype) vv.s.low); + + return w.ll; +} +#endif + +#ifdef L_udiv_w_sdiv +#if defined (sdiv_qrnnd) +USItype +__udiv_w_sdiv (USItype *rp, USItype a1, USItype a0, USItype d) +{ + USItype q, r; + USItype c0, c1, b1; + + if ((SItype) d >= 0) + { + if (a1 < d - a1 - (a0 >> (SI_TYPE_SIZE - 1))) + { + /* dividend, divisor, and quotient are nonnegative */ + sdiv_qrnnd (q, r, a1, a0, d); + } + else + { + /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */ + sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (SI_TYPE_SIZE - 1)); + /* Divide (c1*2^32 + c0) by d */ + sdiv_qrnnd (q, r, c1, c0, d); + /* Add 2^31 to quotient */ + q += (USItype) 1 << (SI_TYPE_SIZE - 1); + } + } + else + { + b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */ + c1 = a1 >> 1; /* A/2 */ + c0 = (a1 << (SI_TYPE_SIZE - 1)) + (a0 >> 1); + + if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */ + { + sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */ + + r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */ + if ((d & 1) != 0) + { + if (r >= q) + r = r - q; + else if (q - r <= d) + { + r = r - q + d; + q--; + } + else + { + r = r - q + 2*d; + q -= 2; + } + } + } + else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */ + { + c1 = (b1 - 1) - c1; + c0 = ~c0; /* logical NOT */ + + sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */ + + q = ~q; /* (A/2)/b1 */ + r = (b1 - 1) - r; + + r = 2*r + (a0 & 1); /* A/(2*b1) */ + + if ((d & 1) != 0) + { + if (r >= q) + r = r - q; + else if (q - r <= d) + { + r = r - q + d; + q--; + } + else + { + r = r - q + 2*d; + q -= 2; + } + } + } + else /* Implies c1 = b1 */ + { /* Hence a1 = d - 1 = 2*b1 - 1 */ + if (a0 >= -d) + { + q = -1; + r = a0 + d; + } + else + { + q = -2; + r = a0 + 2*d; + } + } + } + + *rp = r; + return q; +} +#else +/* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */ +USItype +__udiv_w_sdiv (USItype *rp __attribute__ ((__unused__)), + USItype a1 __attribute__ ((__unused__)), + USItype a0 __attribute__ ((__unused__)), + USItype d __attribute__ ((__unused__))) +{ + return 0; +} +#endif +#endif + +#if (defined (L_udivdi3) || defined (L_divdi3) || \ + defined (L_umoddi3) || defined (L_moddi3)) +#define L_udivmoddi4 +#endif + +#ifdef L_udivmoddi4 +static const UQItype __clz_tab[] = +{ + 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, +}; + +#if (defined (L_udivdi3) || defined (L_divdi3) || \ + defined (L_umoddi3) || defined (L_moddi3)) +static inline +#endif +UDItype +__udivmoddi4 (UDItype n, UDItype d, UDItype *rp) +{ + DIunion ww; + DIunion nn, dd; + DIunion rr; + USItype d0, d1, n0, n1, n2; + USItype q0, q1; + USItype b, bm; + + nn.ll = n; + dd.ll = d; + + d0 = dd.s.low; + d1 = dd.s.high; + n0 = nn.s.low; + n1 = nn.s.high; + +#if !UDIV_NEEDS_NORMALIZATION + if (d1 == 0) + { + if (d0 > n1) + { + /* 0q = nn / 0D */ + + udiv_qrnnd (q0, n0, n1, n0, d0); + q1 = 0; + + /* Remainder in n0. */ + } + else + { + /* qq = NN / 0d */ + + if (d0 == 0) + d0 = 1 / d0; /* Divide intentionally by zero. */ + + udiv_qrnnd (q1, n1, 0, n1, d0); + udiv_qrnnd (q0, n0, n1, n0, d0); + + /* Remainder in n0. */ + } + + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = 0; + *rp = rr.ll; + } + } + +#else /* UDIV_NEEDS_NORMALIZATION */ + + if (d1 == 0) + { + if (d0 > n1) + { + /* 0q = nn / 0D */ + + count_leading_zeros (bm, d0); + + if (bm != 0) + { + /* Normalize, i.e. make the most significant bit of the + denominator set. */ + + d0 = d0 << bm; + n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm)); + n0 = n0 << bm; + } + + udiv_qrnnd (q0, n0, n1, n0, d0); + q1 = 0; + + /* Remainder in n0 >> bm. */ + } + else + { + /* qq = NN / 0d */ + + if (d0 == 0) + d0 = 1 / d0; /* Divide intentionally by zero. */ + + count_leading_zeros (bm, d0); + + if (bm == 0) + { + /* From (n1 >= d0) /\ (the most significant bit of d0 is set), + conclude (the most significant bit of n1 is set) /\ (the + leading quotient digit q1 = 1). + + This special case is necessary, not an optimization. + (Shifts counts of SI_TYPE_SIZE are undefined.) */ + + n1 -= d0; + q1 = 1; + } + else + { + /* Normalize. */ + + b = SI_TYPE_SIZE - bm; + + d0 = d0 << bm; + n2 = n1 >> b; + n1 = (n1 << bm) | (n0 >> b); + n0 = n0 << bm; + + udiv_qrnnd (q1, n1, n2, n1, d0); + } + + /* n1 != d0... */ + + udiv_qrnnd (q0, n0, n1, n0, d0); + + /* Remainder in n0 >> bm. */ + } + + if (rp != 0) + { + rr.s.low = n0 >> bm; + rr.s.high = 0; + *rp = rr.ll; + } + } +#endif /* UDIV_NEEDS_NORMALIZATION */ + + else + { + if (d1 > n1) + { + /* 00 = nn / DD */ + + q0 = 0; + q1 = 0; + + /* Remainder in n1n0. */ + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = n1; + *rp = rr.ll; + } + } + else + { + /* 0q = NN / dd */ + + count_leading_zeros (bm, d1); + if (bm == 0) + { + /* From (n1 >= d1) /\ (the most significant bit of d1 is set), + conclude (the most significant bit of n1 is set) /\ (the + quotient digit q0 = 0 or 1). + + This special case is necessary, not an optimization. */ + + /* The condition on the next line takes advantage of that + n1 >= d1 (true due to program flow). */ + if (n1 > d1 || n0 >= d0) + { + q0 = 1; + sub_ddmmss (n1, n0, n1, n0, d1, d0); + } + else + q0 = 0; + + q1 = 0; + + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = n1; + *rp = rr.ll; + } + } + else + { + USItype m1, m0; + /* Normalize. */ + + b = SI_TYPE_SIZE - bm; + + d1 = (d1 << bm) | (d0 >> b); + d0 = d0 << bm; + n2 = n1 >> b; + n1 = (n1 << bm) | (n0 >> b); + n0 = n0 << bm; + + udiv_qrnnd (q0, n1, n2, n1, d1); + umul_ppmm (m1, m0, q0, d0); + + if (m1 > n1 || (m1 == n1 && m0 > n0)) + { + q0--; + sub_ddmmss (m1, m0, m1, m0, d1, d0); + } + + q1 = 0; + + /* Remainder in (n1n0 - m1m0) >> bm. */ + if (rp != 0) + { + sub_ddmmss (n1, n0, n1, n0, m1, m0); + rr.s.low = (n1 << b) | (n0 >> bm); + rr.s.high = n1 >> bm; + *rp = rr.ll; + } + } + } + } + + ww.s.low = q0; + ww.s.high = q1; + return ww.ll; +} +#endif + +#ifdef L_divdi3 +UDItype __udivmoddi4 (); + +DItype +__divdi3 (DItype u, DItype v) +{ + word_type c = 0; + DIunion uu, vv; + DItype w; + + uu.ll = u; + vv.ll = v; + + if (uu.s.high < 0) + c = ~c, + uu.ll = __negdi2 (uu.ll); + if (vv.s.high < 0) + c = ~c, + vv.ll = __negdi2 (vv.ll); + + w = __udivmoddi4 (uu.ll, vv.ll, (UDItype *) 0); + if (c) + w = __negdi2 (w); + + return w; +} +#endif + +#ifdef L_moddi3 +UDItype __udivmoddi4 (); +DItype +__moddi3 (DItype u, DItype v) +{ + word_type c = 0; + DIunion uu, vv; + DItype w; + + uu.ll = u; + vv.ll = v; + + if (uu.s.high < 0) + c = ~c, + uu.ll = __negdi2 (uu.ll); + if (vv.s.high < 0) + vv.ll = __negdi2 (vv.ll); + + (void) __udivmoddi4 (uu.ll, vv.ll, &w); + if (c) + w = __negdi2 (w); + + return w; +} +#endif + +#ifdef L_umoddi3 +UDItype __udivmoddi4 (); +UDItype +__umoddi3 (UDItype u, UDItype v) +{ + UDItype w; + + (void) __udivmoddi4 (u, v, &w); + + return w; +} +#endif + +#ifdef L_udivdi3 +UDItype __udivmoddi4 (); +UDItype +__udivdi3 (UDItype n, UDItype d) +{ + return __udivmoddi4 (n, d, (UDItype *) 0); +} +#endif + +#ifdef L_cmpdi2 +word_type +__cmpdi2 (DItype a, DItype b) +{ + DIunion au, bu; + + au.ll = a, bu.ll = b; + + if (au.s.high < bu.s.high) + return 0; + else if (au.s.high > bu.s.high) + return 2; + if ((USItype) au.s.low < (USItype) bu.s.low) + return 0; + else if ((USItype) au.s.low > (USItype) bu.s.low) + return 2; + return 1; +} +#endif + +#ifdef L_ucmpdi2 +word_type +__ucmpdi2 (DItype a, DItype b) +{ + DIunion au, bu; + + au.ll = a, bu.ll = b; + + if ((USItype) au.s.high < (USItype) bu.s.high) + return 0; + else if ((USItype) au.s.high > (USItype) bu.s.high) + return 2; + if ((USItype) au.s.low < (USItype) bu.s.low) + return 0; + else if ((USItype) au.s.low > (USItype) bu.s.low) + return 2; + return 1; +} +#endif + +#ifdef L_fixunsdfdi +#define WORD_SIZE (sizeof (SItype) * 8) +#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE) + +DItype +__fixunsdfdi (DFtype a) +{ + DFtype b; + UDItype v; + + if (a < 0) + return 0; + + /* Compute high word of result, as a flonum. */ + b = (a / HIGH_WORD_COEFF); + /* Convert that to fixed (but not to DItype!), + and shift it into the high word. */ + v = (USItype) b; + v <<= WORD_SIZE; + /* Remove high part from the DFtype, leaving the low part as flonum. */ + a -= (DFtype)v; + /* Convert that to fixed (but not to DItype!) and add it in. + Sometimes A comes out negative. This is significant, since + A has more bits than a long int does. */ + if (a < 0) + v -= (USItype) (- a); + else + v += (USItype) a; + return v; +} +#endif + +#ifdef L_fixdfdi +DItype +__fixdfdi (DFtype a) +{ + if (a < 0) + return - __fixunsdfdi (-a); + return __fixunsdfdi (a); +} +#endif + +#ifdef L_fixunssfdi +#define WORD_SIZE (sizeof (SItype) * 8) +#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE) + +DItype +__fixunssfdi (SFtype original_a) +{ + /* Convert the SFtype to a DFtype, because that is surely not going + to lose any bits. Some day someone else can write a faster version + that avoids converting to DFtype, and verify it really works right. */ + DFtype a = original_a; + DFtype b; + UDItype v; + + if (a < 0) + return 0; + + /* Compute high word of result, as a flonum. */ + b = (a / HIGH_WORD_COEFF); + /* Convert that to fixed (but not to DItype!), + and shift it into the high word. */ + v = (USItype) b; + v <<= WORD_SIZE; + /* Remove high part from the DFtype, leaving the low part as flonum. */ + a -= (DFtype)v; + /* Convert that to fixed (but not to DItype!) and add it in. + Sometimes A comes out negative. This is significant, since + A has more bits than a long int does. */ + if (a < 0) + v -= (USItype) (- a); + else + v += (USItype) a; + return v; +} +#endif + +#ifdef L_fixsfdi +DItype +__fixsfdi (SFtype a) +{ + if (a < 0) + return - __fixunssfdi (-a); + return __fixunssfdi (a); +} +#endif + +#ifdef L_floatdidf +#define WORD_SIZE (sizeof (SItype) * 8) +#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2)) +#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE) + +DFtype +__floatdidf (DItype u) +{ + DFtype d; + + d = (SItype) (u >> WORD_SIZE); + d *= HIGH_HALFWORD_COEFF; + d *= HIGH_HALFWORD_COEFF; + d += (USItype) (u & (HIGH_WORD_COEFF - 1)); + + return d; +} +#endif + +#ifdef L_floatdisf +#define WORD_SIZE (sizeof (SItype) * 8) +#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2)) +#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE) +#define DI_SIZE (sizeof (DItype) * 8) + +/* Define codes for all the float formats that we know of. Note + that this is copied from real.h. */ + +#define UNKNOWN_FLOAT_FORMAT 0 +#define IEEE_FLOAT_FORMAT 1 +#define VAX_FLOAT_FORMAT 2 +#define IBM_FLOAT_FORMAT 3 + +/* Default to IEEE float if not specified. Nearly all machines use it. */ +#ifndef HOST_FLOAT_FORMAT +#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT +#endif + +#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT +#define DF_SIZE 53 +#define SF_SIZE 24 +#endif + +#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT +#define DF_SIZE 56 +#define SF_SIZE 24 +#endif + +#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT +#define DF_SIZE 56 +#define SF_SIZE 24 +#endif + +SFtype +__floatdisf (DItype u) +{ + /* Do the calculation in DFmode + so that we don't lose any of the precision of the high word + while multiplying it. */ + DFtype f; + + /* Protect against double-rounding error. + Represent any low-order bits, that might be truncated in DFmode, + by a bit that won't be lost. The bit can go in anywhere below the + rounding position of the SFmode. A fixed mask and bit position + handles all usual configurations. It doesn't handle the case + of 128-bit DImode, however. */ + if (DF_SIZE < DI_SIZE + && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE)) + { +#define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE)) + if (! (- ((DItype) 1 << DF_SIZE) < u + && u < ((DItype) 1 << DF_SIZE))) + { + if ((USItype) u & (REP_BIT - 1)) + u |= REP_BIT; + } + } + f = (SItype) (u >> WORD_SIZE); + f *= HIGH_HALFWORD_COEFF; + f *= HIGH_HALFWORD_COEFF; + f += (USItype) (u & (HIGH_WORD_COEFF - 1)); + + return (SFtype) f; +} +#endif + +#ifdef L_fixunsdfsi +/* Reenable the normal types, in case limits.h needs them. */ +#undef char +#undef short +#undef int +#undef long +#undef unsigned +#undef float +#undef double +#undef MIN +#undef MAX +#include + +USItype +__fixunsdfsi (DFtype a) +{ + if (a >= - (DFtype) LONG_MIN) + return (SItype) (a + LONG_MIN) - LONG_MIN; + return (SItype) a; +} +#endif + +#ifdef L_fixunssfsi +/* Reenable the normal types, in case limits.h needs them. */ +#undef char +#undef short +#undef int +#undef long +#undef unsigned +#undef float +#undef double +#undef MIN +#undef MAX +#include + +USItype +__fixunssfsi (SFtype a) +{ + if (a >= - (SFtype) LONG_MIN) + return (SItype) (a + LONG_MIN) - LONG_MIN; + return (SItype) a; +} +#endif + +/* From here on down, the routines use normal data types. */ + +#define SItype bogus_type +#define USItype bogus_type +#define DItype bogus_type +#define UDItype bogus_type +#define SFtype bogus_type +#define DFtype bogus_type + +#undef char +#undef short +#undef int +#undef long +#undef unsigned +#undef float +#undef double -- cgit v1.2.3