summaryrefslogtreecommitdiff
path: root/gcc/config/m32r
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/m32r')
-rwxr-xr-xgcc/config/m32r/initfini.c169
-rwxr-xr-xgcc/config/m32r/m32r.c3323
-rwxr-xr-xgcc/config/m32r/m32r.h2408
-rwxr-xr-xgcc/config/m32r/m32r.md2649
-rwxr-xr-xgcc/config/m32r/t-m32r70
-rwxr-xr-xgcc/config/m32r/xm-m32r.h47
6 files changed, 8666 insertions, 0 deletions
diff --git a/gcc/config/m32r/initfini.c b/gcc/config/m32r/initfini.c
new file mode 100755
index 0000000..34ef5da
--- /dev/null
+++ b/gcc/config/m32r/initfini.c
@@ -0,0 +1,169 @@
+/* .init/.fini section handling + C++ global constructor/destructor handling.
+ This file is based on crtstuff.c, sol2-crti.asm, sol2-crtn.asm.
+
+Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this file with files
+ compiled with GCC to produce an executable, this does not cause
+ the resulting executable to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+/* Declare a pointer to void function type. */
+typedef void (*func_ptr) (void);
+
+#ifdef CRT_INIT
+
+/* NOTE: In order to be able to support SVR4 shared libraries, we arrange
+ to have one set of symbols { __CTOR_LIST__, __DTOR_LIST__, __CTOR_END__,
+ __DTOR_END__ } per root executable and also one set of these symbols
+ per shared library. So in any given whole process image, we may have
+ multiple definitions of each of these symbols. In order to prevent
+ these definitions from conflicting with one another, and in order to
+ ensure that the proper lists are used for the initialization/finalization
+ of each individual shared library (respectively), we give these symbols
+ only internal (i.e. `static') linkage, and we also make it a point to
+ refer to only the __CTOR_END__ symbol in crtfini.o and the __DTOR_LIST__
+ symbol in crtinit.o, where they are defined. */
+
+static func_ptr __CTOR_LIST__[1]
+ __attribute__ ((section (".ctors")))
+ = { (func_ptr) (-1) };
+
+static func_ptr __DTOR_LIST__[1]
+ __attribute__ ((section (".dtors")))
+ = { (func_ptr) (-1) };
+
+/* Run all the global destructors on exit from the program. */
+
+/* Some systems place the number of pointers in the first word of the
+ table. On SVR4 however, that word is -1. In all cases, the table is
+ null-terminated. On SVR4, we start from the beginning of the list and
+ invoke each per-compilation-unit destructor routine in order
+ until we find that null.
+
+ Note that this function MUST be static. There will be one of these
+ functions in each root executable and one in each shared library, but
+ although they all have the same code, each one is unique in that it
+ refers to one particular associated `__DTOR_LIST__' which belongs to the
+ same particular root executable or shared library file. */
+
+static void __do_global_dtors ()
+asm ("__do_global_dtors") __attribute__ ((section (".text")));
+
+static void
+__do_global_dtors ()
+{
+ func_ptr *p;
+
+ for (p = __DTOR_LIST__ + 1; *p; p++)
+ (*p) ();
+}
+
+/* .init section start.
+ This must appear at the start of the .init section. */
+
+asm ("
+ .section .init,\"ax\",@progbits
+ .balign 4
+ .global __init
+__init:
+ push fp
+ push lr
+ mv fp,sp
+ ld24 r0,#__fini
+ bl atexit
+ .fillinsn
+");
+
+/* .fini section start.
+ This must appear at the start of the .init section. */
+
+asm ("
+ .section .fini,\"ax\",@progbits
+ .balign 4
+ .global __fini
+__fini:
+ push fp
+ push lr
+ mv fp,sp
+ bl __do_global_dtors
+ .fillinsn
+");
+
+#endif /* CRT_INIT */
+
+#ifdef CRT_FINI
+
+/* Put a word containing zero at the end of each of our two lists of function
+ addresses. Note that the words defined here go into the .ctors and .dtors
+ sections of the crtend.o file, and since that file is always linked in
+ last, these words naturally end up at the very ends of the two lists
+ contained in these two sections. */
+
+static func_ptr __CTOR_END__[1]
+ __attribute__ ((section (".ctors")))
+ = { (func_ptr) 0 };
+
+static func_ptr __DTOR_END__[1]
+ __attribute__ ((section (".dtors")))
+ = { (func_ptr) 0 };
+
+/* Run all global constructors for the program.
+ Note that they are run in reverse order. */
+
+static void __do_global_ctors ()
+asm ("__do_global_ctors") __attribute__ ((section (".text")));
+
+static void
+__do_global_ctors ()
+{
+ func_ptr *p;
+
+ for (p = __CTOR_END__ - 1; *p != (func_ptr) -1; p--)
+ (*p) ();
+}
+
+/* .init section end.
+ This must live at the end of the .init section. */
+
+asm ("
+ .section .init,\"ax\",@progbits
+ bl __do_global_ctors
+ mv sp,fp
+ pop lr
+ pop fp
+ jmp lr
+ .fillinsn
+");
+
+/* .fini section end.
+ This must live at the end of the .fini section. */
+
+asm ("
+ .section .fini,\"ax\",@progbits
+ mv sp,fp
+ pop lr
+ pop fp
+ jmp lr
+ .fillinsn
+");
+
+#endif /* CRT_FINI */
diff --git a/gcc/config/m32r/m32r.c b/gcc/config/m32r/m32r.c
new file mode 100755
index 0000000..c7717b1
--- /dev/null
+++ b/gcc/config/m32r/m32r.c
@@ -0,0 +1,3323 @@
+/* CYGNUS LOCAL -- meissner/m32r work */
+/* Subroutines used for code generation on the Mitsubishi M32R cpu.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "expr.h"
+#include "recog.h"
+
+/* Save the operands last given to a compare for use when we
+ generate a scc or bcc insn. */
+rtx m32r_compare_op0, m32r_compare_op1;
+
+/* Array of valid operand punctuation characters. */
+char m32r_punct_chars[256];
+
+/* Selected code model. */
+char *m32r_model_string = M32R_MODEL_DEFAULT;
+enum m32r_model m32r_model;
+
+/* Selected SDA support. */
+char *m32r_sdata_string = M32R_SDATA_DEFAULT;
+enum m32r_sdata m32r_sdata;
+
+/* Scheduler support */
+int m32r_sched_odd_word_p;
+
+/* Values of the -mcond-exec=n string. */
+int m32rx_cond_exec = 4;
+char * m32rx_cond_exec_string = NULL;
+
+/* Forward declaration. */
+static void init_reg_tables PROTO((void));
+static void emit_S_clause PROTO((rtx, rtx, char *));
+static int generate_comparison PROTO((rtx, char *));
+
+static int internal_reg_or_eq_int16_operand PROTO ((rtx, enum machine_mode));
+static int internal_reg_or_cmp_int16_operand PROTO ((rtx, enum machine_mode));
+static int internal_reg_or_uint16_operand PROTO ((rtx, enum machine_mode));
+static int internal_reg_or_zero_operand PROTO ((rtx, enum machine_mode));
+
+/* Called by OVERRIDE_OPTIONS to initialize various things. */
+
+void
+m32r_init ()
+{
+ init_reg_tables ();
+
+ /* Initialize array for PRINT_OPERAND_PUNCT_VALID_P. */
+ memset (m32r_punct_chars, 0, sizeof (m32r_punct_chars));
+ m32r_punct_chars['#'] = 1;
+ m32r_punct_chars['@'] = 1; /* ??? no longer used */
+
+ /* Provide default value if not specified. */
+ if (!g_switch_set)
+ g_switch_value = SDATA_DEFAULT_SIZE;
+
+ if (strcmp (m32r_model_string, "small") == 0)
+ m32r_model = M32R_MODEL_SMALL;
+ else if (strcmp (m32r_model_string, "medium") == 0)
+ m32r_model = M32R_MODEL_MEDIUM;
+ else if (strcmp (m32r_model_string, "large") == 0)
+ m32r_model = M32R_MODEL_LARGE;
+ else
+ error ("bad value (%s) for -mmodel switch", m32r_model_string);
+
+ if (strcmp (m32r_sdata_string, "none") == 0)
+ m32r_sdata = M32R_SDATA_NONE;
+ else if (strcmp (m32r_sdata_string, "sdata") == 0)
+ m32r_sdata = M32R_SDATA_SDATA;
+ else if (strcmp (m32r_sdata_string, "use") == 0)
+ m32r_sdata = M32R_SDATA_USE;
+ else
+ error ("bad value (%s) for -msdata switch", m32r_sdata_string);
+
+ /* Set up max # instructions to use with conditional execution */
+ if (m32rx_cond_exec_string)
+ m32rx_cond_exec = atoi (m32rx_cond_exec_string);
+}
+
+/* Vectors to keep interesting information about registers where it can easily
+ be got. We use to use the actual mode value as the bit number, but there
+ is (or may be) more than 32 modes now. Instead we use two tables: one
+ indexed by hard register number, and one indexed by mode. */
+
+/* The purpose of m32r_mode_class is to shrink the range of modes so that
+ they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
+ mapped into one m32r_mode_class mode. */
+
+enum m32r_mode_class
+{
+ C_MODE,
+ S_MODE, D_MODE, T_MODE, O_MODE,
+ SF_MODE, DF_MODE, TF_MODE, OF_MODE
+ , A_MODE
+};
+
+/* Modes for condition codes. */
+#define C_MODES (1 << (int) C_MODE)
+
+/* Modes for single-word and smaller quantities. */
+#define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
+
+/* Modes for double-word and smaller quantities. */
+#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
+
+/* Modes for quad-word and smaller quantities. */
+#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
+
+/* Modes for accumulators. */
+#define A_MODES (1 << (int) A_MODE)
+
+/* Value is 1 if register/mode pair is acceptable on arc. */
+
+unsigned int m32r_hard_regno_mode_ok[FIRST_PSEUDO_REGISTER] =
+{
+ T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, T_MODES,
+ T_MODES, T_MODES, T_MODES, T_MODES, T_MODES, S_MODES, S_MODES, S_MODES,
+ S_MODES, C_MODES
+ , A_MODES, A_MODES
+};
+
+unsigned int m32r_mode_class [NUM_MACHINE_MODES];
+
+enum reg_class m32r_regno_reg_class[FIRST_PSEUDO_REGISTER];
+
+static void
+init_reg_tables ()
+{
+ int i;
+
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ switch (GET_MODE_CLASS (i))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ case MODE_COMPLEX_INT:
+ if (GET_MODE_SIZE (i) <= 4)
+ m32r_mode_class[i] = 1 << (int) S_MODE;
+ else if (GET_MODE_SIZE (i) == 8)
+ m32r_mode_class[i] = 1 << (int) D_MODE;
+ else if (GET_MODE_SIZE (i) == 16)
+ m32r_mode_class[i] = 1 << (int) T_MODE;
+ else if (GET_MODE_SIZE (i) == 32)
+ m32r_mode_class[i] = 1 << (int) O_MODE;
+ else
+ m32r_mode_class[i] = 0;
+ break;
+ case MODE_FLOAT:
+ case MODE_COMPLEX_FLOAT:
+ if (GET_MODE_SIZE (i) <= 4)
+ m32r_mode_class[i] = 1 << (int) SF_MODE;
+ else if (GET_MODE_SIZE (i) == 8)
+ m32r_mode_class[i] = 1 << (int) DF_MODE;
+ else if (GET_MODE_SIZE (i) == 16)
+ m32r_mode_class[i] = 1 << (int) TF_MODE;
+ else if (GET_MODE_SIZE (i) == 32)
+ m32r_mode_class[i] = 1 << (int) OF_MODE;
+ else
+ m32r_mode_class[i] = 0;
+ break;
+ case MODE_CC:
+ default:
+ /* mode_class hasn't been initialized yet for EXTRA_CC_MODES, so
+ we must explicitly check for them here. */
+ if (i == (int) CCmode)
+ m32r_mode_class[i] = 1 << (int) C_MODE;
+ else
+ m32r_mode_class[i] = 0;
+ break;
+ }
+ }
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (GPR_P (i))
+ m32r_regno_reg_class[i] = GENERAL_REGS;
+ else if (i == ARG_POINTER_REGNUM)
+ m32r_regno_reg_class[i] = GENERAL_REGS;
+ else
+ m32r_regno_reg_class[i] = NO_REGS;
+ }
+}
+
+static tree interrupt_ident;
+static tree model_ident;
+static tree small_ident;
+static tree medium_ident;
+static tree large_ident;
+
+static void
+init_idents ()
+{
+ if (interrupt_ident == 0)
+ {
+ interrupt_ident = get_identifier ("interrupt");
+ model_ident = get_identifier ("model");
+ small_ident = get_identifier ("small");
+ medium_ident = get_identifier ("medium");
+ large_ident = get_identifier ("large");
+ }
+}
+
+/* M32R specific attribute support.
+
+ interrupt - for interrupt functions
+
+ model - select code model used to access object
+
+ small: addresses use 24 bits, use bl to make calls
+ medium: addresses use 32 bits, use bl to make calls
+ large: addresses use 32 bits, use seth/add3/jl to make calls
+
+ Grep for MODEL in m32r.h for more info.
+*/
+
+/* Return nonzero if IDENTIFIER is a valid decl attribute. */
+
+int
+m32r_valid_machine_decl_attribute (type, attributes, identifier, args)
+ tree type;
+ tree attributes;
+ tree identifier;
+ tree args;
+ {
+ init_idents ();
+
+ if (identifier == interrupt_ident
+ && list_length (args) == 0)
+ return 1;
+
+ if (identifier == model_ident
+ && list_length (args) == 1
+ && (TREE_VALUE (args) == small_ident
+ || TREE_VALUE (args) == medium_ident
+ || TREE_VALUE (args) == large_ident))
+ return 1;
+
+ return 0;
+}
+
+/* Return zero if TYPE1 and TYPE are incompatible, one if they are compatible,
+ and two if they are nearly compatible (which causes a warning to be
+ generated). */
+
+int
+m32r_comp_type_attributes (type1, type2)
+ tree type1, type2;
+{
+ return 1;
+}
+
+/* Set the default attributes for TYPE. */
+
+void
+m32r_set_default_type_attributes (type)
+ tree type;
+{
+}
+
+/* A C statement or statements to switch to the appropriate
+ section for output of DECL. DECL is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether forming
+ the initial value of DECL requires link-time relocations. */
+
+void
+m32r_select_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ if (TREE_CODE (decl) == STRING_CST)
+ {
+ if (! flag_writable_strings)
+ const_section ();
+ else
+ data_section ();
+ }
+ else if (TREE_CODE (decl) == VAR_DECL)
+ {
+ if (SDATA_NAME_P (XSTR (XEXP (DECL_RTL (decl), 0), 0)))
+ sdata_section ();
+ else if ((flag_pic && reloc)
+ || !TREE_READONLY (decl)
+ || TREE_SIDE_EFFECTS (decl)
+ || !DECL_INITIAL (decl)
+ || (DECL_INITIAL (decl) != error_mark_node
+ && !TREE_CONSTANT (DECL_INITIAL (decl))))
+ data_section ();
+ else
+ const_section ();
+ }
+ else
+ const_section ();
+}
+
+/* Encode section information of DECL, which is either a VAR_DECL,
+ FUNCTION_DECL, STRING_CST, CONSTRUCTOR, or ???.
+
+ For the M32R we want to record:
+
+ - whether the object lives in .sdata/.sbss.
+ objects living in .sdata/.sbss are prefixed with SDATA_FLAG_CHAR
+
+ - what code model should be used to access the object
+ small: recorded with no flag - for space efficiency since they'll
+ be the most common
+ medium: prefixed with MEDIUM_FLAG_CHAR
+ large: prefixed with LARGE_FLAG_CHAR
+*/
+
+void
+m32r_encode_section_info (decl)
+ tree decl;
+{
+ char prefix = 0;
+ tree model = 0;
+
+ switch (TREE_CODE (decl))
+ {
+ case VAR_DECL :
+ case FUNCTION_DECL :
+ model = lookup_attribute ("model", DECL_MACHINE_ATTRIBUTES (decl));
+ break;
+ case STRING_CST :
+ case CONSTRUCTOR :
+ /* ??? document all others that can appear here */
+ default :
+ return;
+ }
+
+ /* Only mark the object as being small data area addressable if
+ it hasn't been explicitly marked with a code model.
+
+ The user can explicitly put an object in the small data area with the
+ section attribute. If the object is in sdata/sbss and marked with a
+ code model do both [put the object in .sdata and mark it as being
+ addressed with a specific code model - don't mark it as being addressed
+ with an SDA reloc though]. This is ok and might be useful at times. If
+ the object doesn't fit the linker will give an error. */
+
+ if (! model)
+ {
+ if (TREE_CODE_CLASS (TREE_CODE (decl)) == 'd'
+ && DECL_SECTION_NAME (decl) != NULL_TREE)
+ {
+ char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+ if (! strcmp (name, ".sdata") || ! strcmp (name, ".sbss"))
+ {
+#if 0 /* ??? There's no reason to disallow this, is there? */
+ if (TREE_READONLY (decl))
+ error_with_decl (decl, "const objects cannot go in .sdata/.sbss");
+#endif
+ prefix = SDATA_FLAG_CHAR;
+ }
+ }
+ else
+ {
+ if (TREE_CODE (decl) == VAR_DECL
+ && ! TREE_READONLY (decl)
+ && ! TARGET_SDATA_NONE)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (decl));
+
+ if (size > 0 && size <= g_switch_value)
+ prefix = SDATA_FLAG_CHAR;
+ }
+ }
+ }
+
+ /* If data area not decided yet, check for a code model. */
+ if (prefix == 0)
+ {
+ if (model)
+ {
+ init_idents ();
+
+ if (TREE_VALUE (TREE_VALUE (model)) == small_ident)
+ ; /* don't mark the symbol specially */
+ else if (TREE_VALUE (TREE_VALUE (model)) == medium_ident)
+ prefix = MEDIUM_FLAG_CHAR;
+ else if (TREE_VALUE (TREE_VALUE (model)) == large_ident)
+ prefix = LARGE_FLAG_CHAR;
+ else
+ abort (); /* shouldn't happen */
+ }
+ else
+ {
+ if (TARGET_MODEL_SMALL)
+ ; /* don't mark the symbol specially */
+ else if (TARGET_MODEL_MEDIUM)
+ prefix = MEDIUM_FLAG_CHAR;
+ else if (TARGET_MODEL_LARGE)
+ prefix = LARGE_FLAG_CHAR;
+ else
+ abort (); /* shouldn't happen */
+ }
+ }
+
+ if (prefix != 0)
+ {
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl));
+ char *str = XSTR (XEXP (rtl, 0), 0);
+ int len = strlen (str);
+ char *newstr = savealloc (len + 2);
+ strcpy (newstr + 1, str);
+ *newstr = prefix;
+ XSTR (XEXP (rtl, 0), 0) = newstr;
+ }
+}
+
+/* Do anything needed before RTL is emitted for each function. */
+
+void
+m32r_init_expanders ()
+{
+ /* ??? At one point there was code here. The function is left in
+ to make it easy to experiment. */
+}
+
+/* Acceptable arguments to the call insn. */
+
+int
+call_address_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ return symbolic_operand (op, int_mode);
+
+/* The following xxx_operand functions all take an integer for the machine_mode
+ argument. This is to allow them to be prototyped in m32r.h which is
+ included before rtl.h is included. Not every function includes rtl.h, so we
+ can't assume it will be included. */
+
+/* Constants and values in registers are not OK, because
+ the m32r BL instruction can only support PC relative branching. */
+}
+
+int
+call_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+
+ if (GET_CODE (op) != MEM)
+ return 0;
+ op = XEXP (op, 0);
+ return call_address_operand (op, mode);
+}
+
+/* Returns 1 if OP is a symbol reference. */
+
+int
+symbolic_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST :
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return 1 if OP is a reference to an object in .sdata/.sbss. */
+
+int
+small_data_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (! TARGET_SDATA_USE)
+ return 0;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return SDATA_NAME_P (XSTR (op, 0));
+
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
+ && INT16_P (INTVAL (XEXP (XEXP (op, 0), 1))))
+ return SDATA_NAME_P (XSTR (XEXP (XEXP (op, 0), 0), 0));
+
+ return 0;
+}
+
+/* Return 1 if OP is a symbol that can use 24 bit addressing. */
+
+int
+addr24_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) == LABEL_REF)
+ return TARGET_ADDR24;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return (SMALL_NAME_P (XSTR (op, 0))
+ || (TARGET_ADDR24
+ && (CONSTANT_POOL_ADDRESS_P (op)
+ || LIT_NAME_P (XSTR (op, 0)))));
+
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
+ && UINT24_P (INTVAL (XEXP (XEXP (op, 0), 1))))
+ {
+ rtx sym = XEXP (XEXP (op, 0), 0);
+ return (SMALL_NAME_P (XSTR (sym, 0))
+ || (TARGET_ADDR24
+ && (CONSTANT_POOL_ADDRESS_P (op)
+ || LIT_NAME_P (XSTR (op, 0)))));
+ }
+
+ return 0;
+}
+
+/* Return 1 if OP is a symbol that needs 32 bit addressing. */
+
+int
+addr32_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) == LABEL_REF)
+ return TARGET_ADDR32;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return (! addr24_operand (op, int_mode)
+ && ! small_data_operand (op, int_mode));
+
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
+ {
+ return (! addr24_operand (op, int_mode)
+ && ! small_data_operand (op, int_mode));
+ }
+
+ return 0;
+}
+
+/* Return 1 if OP is a function that can be called with the `bl' insn. */
+
+int
+call26_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) == SYMBOL_REF)
+ return ! LARGE_NAME_P (XSTR (op, 0));
+
+ return TARGET_CALL26;
+}
+
+/* Returns 1 if OP is an acceptable operand for seth/add3. */
+
+int
+seth_add3_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) == SYMBOL_REF
+ || GET_CODE (op) == LABEL_REF)
+ return 1;
+
+ if (GET_CODE (op) == CONST
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
+ && INT16_P (INTVAL (XEXP (XEXP (op, 0), 1))))
+ return 1;
+
+ return 0;
+}
+
+/* Return true if OP is a signed 8 bit immediate value. */
+
+int
+int8_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ return INT8_P (INTVAL (op));
+}
+
+/* Return true if OP is a signed 16 bit immediate value
+ useful in comparisons. */
+
+int
+cmp_int16_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ return CMP_INT16_P (INTVAL (op));
+}
+
+/* Return true if OP is an unsigned 16 bit immediate value. */
+int
+uint16_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ return UINT16_P (INTVAL (op));
+}
+
+/* Return true if OP is a register or signed 16 bit value. */
+int
+reg_or_int16_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ return INT16_P (INTVAL (op));
+}
+
+/* Return true if OP is a register or an unsigned 16 bit value. */
+int
+reg_or_uint16_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ return UINT16_P (INTVAL (op));
+}
+
+/* Return true if OP is a register or an integer value that can be
+ used is SEQ/SNE. We can use either XOR of the value or ADD of
+ the negative of the value for the constant. Don't allow 0,
+ because that is special cased. */
+int
+reg_or_eq_int16_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ HOST_WIDE_INT value;
+ enum machine_mode mode = (enum machine_mode)int_mode;
+
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ value = INTVAL (op);
+ return (value != 0) && (UINT16_P (value) || CMP_INT16_P (-value));
+}
+
+/* Same as eqne_int16_operand, except the mode argument is an enum. */
+int
+internal_reg_or_eq_int16_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT value;
+
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ value = INTVAL (op);
+ return (value != 0) && (UINT16_P (value) || CMP_INT16_P (-value));
+}
+
+/* Same as reg_or_int16_operand, except the mode argument is an enum. */
+static int
+internal_reg_or_uint16_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ return UINT16_P (INTVAL (op));
+}
+
+/* Return true if OP is a register or signed 16 bit value for compares. */
+int
+reg_or_cmp_int16_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ return CMP_INT16_P (INTVAL (op));
+}
+
+/* Same as reg_or_cmp_int16_operand, but uses machine_mode as an
+ operand. */
+static int
+internal_reg_or_cmp_int16_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ return CMP_INT16_P (INTVAL (op));
+}
+
+/* Return true if OP is a register or the constant 0. */
+int
+reg_or_zero_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ return INTVAL (op) == 0;
+}
+
+/* Like reg_or_zero_operand, except mode argument is an enum. */
+static int
+internal_reg_or_zero_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == REG || GET_CODE (op) == SUBREG)
+ return register_operand (op, mode);
+
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ return INTVAL (op) == 0;
+}
+
+/* Return true if OP is a const_int requiring two instructions to load. */
+
+int
+two_insn_const_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ if (INT16_P (INTVAL (op))
+ || UINT24_P (INTVAL (op))
+ || UPPER16_P (INTVAL (op)))
+ return 0;
+ return 1;
+}
+
+/* Return true if OP is an acceptable argument for a single word
+ move source. */
+
+int
+move_src_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF :
+ case CONST :
+ return addr24_operand (op, int_mode);
+ case CONST_INT :
+ /* ??? We allow more cse opportunities if we only allow constants
+ loadable with one insn, and split the rest into two. The instances
+ where this would help should be rare and the current way is
+ simpler. */
+ return INT32_P (INTVAL (op));
+ case LABEL_REF :
+ return TARGET_ADDR24;
+ case CONST_DOUBLE :
+ if (mode == SFmode)
+ return 1;
+ else if (mode == SImode)
+ {
+ /* Large unsigned constants are represented as const_double's. */
+ unsigned HOST_WIDE_INT low, high;
+
+ low = CONST_DOUBLE_LOW (op);
+ high = CONST_DOUBLE_HIGH (op);
+ return high == 0 && low <= 0xffffffff;
+ }
+ else
+ return 0;
+ case REG :
+ return register_operand (op, mode);
+ case SUBREG :
+ /* (subreg (mem ...) ...) can occur here if the inner part was once a
+ pseudo-reg and is now a stack slot. */
+ if (GET_CODE (SUBREG_REG (op)) == MEM)
+ return address_operand (XEXP (SUBREG_REG (op), 0), mode);
+ else
+ return register_operand (op, mode);
+ case MEM :
+ if (GET_CODE (XEXP (op, 0)) == PRE_INC
+ || GET_CODE (XEXP (op, 0)) == PRE_DEC)
+ return 0; /* loads can't do pre-{inc,dec} */
+ return address_operand (XEXP (op, 0), mode);
+ default :
+ return 0;
+ }
+}
+
+/* Return true if OP is an acceptable argument for a double word
+ move source. */
+
+int
+move_double_src_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+ switch (GET_CODE (op))
+ {
+ case CONST_INT :
+ case CONST_DOUBLE :
+ return 1;
+ case REG :
+ return register_operand (op, mode);
+ case SUBREG :
+ /* (subreg (mem ...) ...) can occur here if the inner part was once a
+ pseudo-reg and is now a stack slot. */
+ if (GET_CODE (SUBREG_REG (op)) == MEM)
+ return move_double_src_operand (SUBREG_REG (op), int_mode);
+ else
+ return register_operand (op, mode);
+ case MEM :
+ /* Disallow auto inc/dec for now. */
+ if (GET_CODE (XEXP (op, 0)) == PRE_DEC
+ || GET_CODE (XEXP (op, 0)) == PRE_INC)
+ return 0;
+ return address_operand (XEXP (op, 0), mode);
+ default :
+ return 0;
+ }
+}
+
+/* Return true if OP is an acceptable argument for a move destination. */
+
+int
+move_dest_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+ switch (GET_CODE (op))
+ {
+ case REG :
+ return register_operand (op, mode);
+ case SUBREG :
+ /* (subreg (mem ...) ...) can occur here if the inner part was once a
+ pseudo-reg and is now a stack slot. */
+ if (GET_CODE (SUBREG_REG (op)) == MEM)
+ return address_operand (XEXP (SUBREG_REG (op), 0), mode);
+ else
+ return register_operand (op, mode);
+ case MEM :
+ if (GET_CODE (XEXP (op, 0)) == POST_INC)
+ return 0; /* stores can't do post inc */
+ return address_operand (XEXP (op, 0), mode);
+ default :
+ return 0;
+ }
+}
+
+/* Return 1 if OP is a DImode const we want to handle inline.
+ This must match the code in the movdi pattern.
+ It is used by the 'G' CONST_DOUBLE_OK_FOR_LETTER. */
+
+int
+easy_di_const (op)
+ rtx op;
+{
+ rtx high_rtx, low_rtx;
+ HOST_WIDE_INT high, low;
+
+ split_double (op, &high_rtx, &low_rtx);
+ high = INTVAL (high_rtx);
+ low = INTVAL (low_rtx);
+ /* Pick constants loadable with 2 16 bit `ldi' insns. */
+ if (high >= -128 && high <= 127
+ && low >= -128 && low <= 127)
+ return 1;
+ return 0;
+}
+
+/* Return 1 if OP is a DFmode const we want to handle inline.
+ This must match the code in the movdf pattern.
+ It is used by the 'H' CONST_DOUBLE_OK_FOR_LETTER. */
+
+int
+easy_df_const (op)
+ rtx op;
+{
+ REAL_VALUE_TYPE r;
+ long l[2];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (r, l);
+ if (l[0] == 0 && l[1] == 0)
+ return 1;
+ if ((l[0] & 0xffff) == 0 && l[1] == 0)
+ return 1;
+ return 0;
+}
+
+/* Return 1 if OP is an EQ or NE comparison operator. */
+
+int
+eqne_comparison_operator (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+ return (code == EQ || code == NE);
+}
+
+/* Return 1 if OP is a signed comparison operator. */
+
+int
+signed_comparison_operator (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+ return (code == EQ || code == NE
+ || code == LT || code == LE || code == GT || code == GE);
+}
+
+/* Return 1 if OP is (mem (reg ...)).
+ This is used in insn length calcs. */
+
+int
+memreg_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ return GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == REG;
+}
+
+/* Return true if OP is an acceptable input argument for a zero/sign extend
+ operation. */
+
+int
+extend_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+ rtx addr;
+
+ switch (GET_CODE (op))
+ {
+ case REG :
+ case SUBREG :
+ return register_operand (op, mode);
+
+ case MEM :
+ addr = XEXP (op, 0);
+ if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
+ return 0; /* loads can't do pre inc/pre dec */
+
+ return address_operand (addr, mode);
+
+ default :
+ return 0;
+ }
+}
+
+/* Return non-zero if the operand is an insn that is a small insn.
+ Allow const_int 0 as well, which is a placeholder for NOP slots. */
+
+int
+small_insn_p (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
+ return 1;
+
+ if (GET_RTX_CLASS (GET_CODE (op)) != 'i')
+ return 0;
+
+ return get_attr_length (op) == 2;
+}
+
+/* Return non-zero if the operand is an insn that is a large insn. */
+
+int
+large_insn_p (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ if (GET_RTX_CLASS (GET_CODE (op)) != 'i')
+ return 0;
+
+ return get_attr_length (op) != 2;
+}
+
+
+/* Comparisons. */
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. */
+
+int
+m32r_select_cc_mode (op, x, y)
+ int op;
+ rtx x, y;
+{
+ return (int)CCmode;
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for compare [arg0 of the if_then_else].
+ If need_compare is true then the comparison insn must be generated, rather
+ than being susummed into the following branch instruction. */
+
+rtx
+gen_compare (int_code, label, x, y)
+ int int_code;
+ rtx label;
+ rtx x;
+ rtx y;
+{
+ enum rtx_code code = (enum rtx_code)int_code;
+ enum rtx_code branch_code;
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg = gen_rtx (REG, mode, CARRY_REGNUM);
+ int must_swap = FALSE;
+ int int_p = (GET_CODE (y) == CONST_INT);
+ HOST_WIDE_INT value = (int_p) ? INTVAL (y) : 0;
+ int signed_p = 0;
+ rtx arg1;
+ rtx arg2;
+ rtx ret;
+ rtx (*gen_func) PROTO ((rtx, rtx))
+ = (rtx (*) PROTO ((rtx, rtx))) abort;
+ int (*constrain_func) PROTO ((rtx, enum machine_mode))
+ = (int (*) PROTO ((rtx, enum machine_mode))) abort;
+
+ start_sequence ();
+ if (GET_CODE (x) != REG && GET_CODE (x) != SUBREG)
+ x = force_reg (GET_MODE (x), x);
+
+ if (GET_CODE (y) != REG && GET_CODE (y) != SUBREG && !int_p)
+ y = force_reg (GET_MODE (x), y);
+
+ /* If we have a LE, GT, etc. of a constant, see if we can tranform it to the
+ appropriate LT, GE, etc. to use the cmpi or cmpui instruction. */
+ if (int_p)
+ {
+ if ((code == LE || code == GT) && (value >= -32768 && value < 32767))
+ {
+ ++value;
+ y = GEN_INT (value);
+ code = (code == LE) ? LT : GE;
+ }
+
+ else if ((code == LEU || code == GTU) && (value >= 0 && value < 32767))
+ {
+ ++value;
+ y = GEN_INT (value);
+ code = (code == LEU) ? LTU : GEU;
+ }
+ }
+
+ switch (code)
+ {
+ default:
+ abort ();
+
+ case EQ:
+ gen_func = gen_cmp_eqsi_insn;
+ constrain_func = internal_reg_or_uint16_operand;
+ branch_code = NE;
+ signed_p = TRUE;
+ if (TARGET_M32RX)
+ {
+ gen_func = gen_cmp_eqsi_zero_insn;
+ constrain_func = internal_reg_or_zero_operand;
+ }
+ break;
+
+ case NE:
+ gen_func = gen_cmp_eqsi_insn;
+ constrain_func = internal_reg_or_uint16_operand;
+ branch_code = EQ;
+ signed_p = TRUE;
+ if (TARGET_M32RX)
+ {
+ gen_func = gen_cmp_eqsi_zero_insn;
+ constrain_func = internal_reg_or_zero_operand;
+ }
+ break;
+
+ case LT:
+ gen_func = gen_cmp_ltsi_insn;
+ constrain_func = internal_reg_or_cmp_int16_operand;
+ branch_code = NE;
+ signed_p = TRUE;
+ break;
+
+ case LE:
+ gen_func = gen_cmp_ltsi_insn;
+ constrain_func = internal_reg_or_cmp_int16_operand;
+ branch_code = EQ;
+ signed_p = TRUE;
+ must_swap = TRUE;
+ break;
+
+ case GT:
+ gen_func = gen_cmp_ltsi_insn;
+ constrain_func = internal_reg_or_cmp_int16_operand;
+ branch_code = NE;
+ signed_p = TRUE;
+ must_swap = TRUE;
+ break;
+
+ case GE:
+ gen_func = gen_cmp_ltsi_insn;
+ constrain_func = internal_reg_or_cmp_int16_operand;
+ branch_code = EQ;
+ signed_p = TRUE;
+ break;
+
+ case LTU:
+ gen_func = gen_cmp_ltusi_insn;
+ constrain_func = internal_reg_or_cmp_int16_operand;
+ branch_code = NE;
+ signed_p = FALSE;
+ break;
+
+ case LEU:
+ gen_func = gen_cmp_ltusi_insn;
+ constrain_func = internal_reg_or_cmp_int16_operand;
+ branch_code = EQ;
+ signed_p = FALSE;
+ must_swap = TRUE;
+ break;
+
+ case GTU:
+ gen_func = gen_cmp_ltusi_insn;
+ constrain_func = internal_reg_or_cmp_int16_operand;
+ branch_code = NE;
+ signed_p = FALSE;
+ must_swap = TRUE;
+ break;
+
+ case GEU:
+ gen_func = gen_cmp_ltusi_insn;
+ constrain_func = internal_reg_or_cmp_int16_operand;
+ branch_code = EQ;
+ signed_p = FALSE;
+ break;
+ }
+
+ /* Is this a branch comparison against 0? */
+ if (int_p && value == 0 && signed_p)
+ {
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx (code,
+ VOIDmode,
+ x,
+ CONST0_RTX (mode)),
+ gen_rtx_LABEL_REF (VOIDmode,
+ label),
+ pc_rtx)));
+ }
+
+ /* Is this a branch comparison comparing two registers for == or !=? */
+ else if (code == EQ || code == NE)
+ {
+ if (int_p)
+ y = force_reg (GET_MODE (x), y);
+
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx (code,
+ VOIDmode,
+ x, y),
+ gen_rtx_LABEL_REF (VOIDmode,
+ label),
+ pc_rtx)));
+ }
+
+ /* If not, set c bit and then do jump */
+ else
+ {
+ if (int_p && (must_swap || ! (*constrain_func) (y, GET_MODE (y))))
+ y = force_reg (GET_MODE (x), y);
+
+ if (must_swap)
+ {
+ arg1 = y;
+ arg2 = x;
+ }
+ else
+ {
+ arg1 = x;
+ arg2 = y;
+ }
+
+ emit_insn ((*gen_func) (arg1, arg2));
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx (branch_code,
+ VOIDmode,
+ cc_reg,
+ CONST0_RTX (mode)),
+ gen_rtx_LABEL_REF (VOIDmode,
+ label),
+ pc_rtx)));
+ }
+
+ ret = gen_sequence ();
+ end_sequence ();
+ return ret;
+}
+
+/* Split a 2 word move (DI or DF) into component parts. */
+
+rtx
+gen_split_move_double (operands)
+ rtx operands[];
+{
+ enum machine_mode mode = GET_MODE (operands[0]);
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ rtx val;
+
+ /* We might have (SUBREG (MEM)) here, so just get rid of the
+ subregs to make this code simpler. It is safe to call
+ alter_subreg any time after reload. */
+ if (GET_CODE (dest) == SUBREG)
+ dest = alter_subreg (dest);
+ if (GET_CODE (src) == SUBREG)
+ src = alter_subreg (src);
+
+ start_sequence ();
+ if (GET_CODE (dest) == REG)
+ {
+ int dregno = REGNO (dest);
+
+ /* reg = reg */
+ if (GET_CODE (src) == REG)
+ {
+ int sregno = REGNO (src);
+
+ int reverse = (dregno == sregno + 1);
+
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, reverse, TRUE, mode),
+ operand_subword (src, reverse, TRUE, mode)));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, !reverse, TRUE, mode),
+ operand_subword (src, !reverse, TRUE, mode)));
+ }
+
+ /* reg = constant */
+ else if (GET_CODE (src) == CONST_INT || GET_CODE (src) == CONST_DOUBLE)
+ {
+ rtx words[2];
+ split_double (src, &words[0], &words[1]);
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, 0, TRUE, mode),
+ words[0]));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, 1, TRUE, mode),
+ words[1]));
+ }
+
+ /* reg = mem */
+ else if (GET_CODE (src) == MEM)
+ {
+ /* If the high-address word is used in the address, we must load it
+ last. Otherwise, load it first. */
+ rtx addr = XEXP (src, 0);
+ int reverse = (refers_to_regno_p (dregno, dregno+1, addr, 0) != 0);
+
+ /* We used to optimize loads from single registers as
+
+ ld r1,r3+; ld r2,r3
+
+ if r3 were not used subsequently. However, the REG_NOTES aren't
+ propigated correctly by the reload phase, and it can cause bad
+ code to be generated. We could still try:
+
+ ld r1,r3+; ld r2,r3; addi r3,-4
+
+ which saves 2 bytes and doesn't force longword alignment. */
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, reverse, TRUE, mode),
+ change_address (src, SImode,
+ plus_constant (addr,
+ reverse * UNITS_PER_WORD))));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operand_subword (dest, !reverse, TRUE, mode),
+ change_address (src, SImode,
+ plus_constant (addr,
+ (!reverse) * UNITS_PER_WORD))));
+ }
+
+ else
+ abort ();
+ }
+
+ /* mem = reg */
+ /* We used to optimize loads from single registers as
+
+ st r1,r3; st r2,+r3
+
+ if r3 were not used subsequently. However, the REG_NOTES aren't
+ propigated correctly by the reload phase, and it can cause bad
+ code to be generated. We could still try:
+
+ st r1,r3; st r2,+r3; addi r3,-4
+
+ which saves 2 bytes and doesn't force longword alignment. */
+ else if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
+ {
+ rtx addr = XEXP (dest, 0);
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ change_address (dest, SImode, addr),
+ operand_subword (src, 0, TRUE, mode)));
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ change_address (dest, SImode,
+ plus_constant (addr, UNITS_PER_WORD)),
+ operand_subword (src, 1, TRUE, mode)));
+ }
+
+ else
+ abort ();
+
+ val = gen_sequence ();
+ end_sequence ();
+ return val;
+}
+
+
+/* Implements the FUNCTION_ARG_PARTIAL_NREGS macro. */
+
+int
+function_arg_partial_nregs (cum, int_mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ int int_mode;
+ tree type;
+ int named;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+ int ret;
+ int size = (((mode == BLKmode && type)
+ ? int_size_in_bytes (type)
+ : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ if (*cum >= M32R_MAX_PARM_REGS)
+ ret = 0;
+ else if (*cum + size > M32R_MAX_PARM_REGS)
+ ret = (*cum + size) - M32R_MAX_PARM_REGS;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/* Do any needed setup for a variadic function. For the M32R, we must
+ create a register parameter block, and then copy any anonymous arguments
+ in registers to memory.
+
+ CUM has not been updated for the last named argument which has type TYPE
+ and mode MODE, and we rely on this fact. */
+
+void
+m32r_setup_incoming_varargs (cum, int_mode, type, pretend_size, no_rtl)
+ CUMULATIVE_ARGS *cum;
+ int int_mode;
+ tree type;
+ int *pretend_size;
+ int no_rtl;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+ int first_anon_arg;
+
+ if (no_rtl)
+ return;
+
+ /* All BLKmode values are passed by reference. */
+ if (mode == BLKmode)
+ abort ();
+
+ /* We must treat `__builtin_va_alist' as an anonymous arg. */
+ if (current_function_varargs)
+ first_anon_arg = *cum;
+ else
+ first_anon_arg = (ROUND_ADVANCE_CUM (*cum, mode, type)
+ + ROUND_ADVANCE_ARG (mode, type));
+
+ if (first_anon_arg < M32R_MAX_PARM_REGS)
+ {
+ /* Note that first_reg_offset < M32R_MAX_PARM_REGS. */
+ int first_reg_offset = first_anon_arg;
+ /* Size in words to "pretend" allocate. */
+ int size = M32R_MAX_PARM_REGS - first_reg_offset;
+ rtx regblock;
+
+ regblock = gen_rtx (MEM, BLKmode,
+ plus_constant (arg_pointer_rtx,
+ FIRST_PARM_OFFSET (0)));
+ move_block_from_reg (first_reg_offset, regblock,
+ size, size * UNITS_PER_WORD);
+
+ *pretend_size = (size * UNITS_PER_WORD);
+ }
+}
+
+/* Cost functions. */
+
+/* Provide the costs of an addressing mode that contains ADDR.
+ If ADDR is not a valid address, its cost is irrelevant.
+
+ This function is trivial at the moment. This code doesn't live
+ in m32r.h so it's easy to experiment. */
+
+int
+m32r_address_cost (addr)
+ rtx addr;
+{
+ return 1;
+}
+
+
+/* A C statement (sans semicolon) to update the integer variable COST based on
+ the relationship between INSN that is dependent on DEP_INSN through the
+ dependence LINK. The default is to make no adjustment to COST. This can be
+ used for example to specify to the scheduler that an output- or
+ anti-dependence does not incur the same cost as a data-dependence. */
+
+int
+m32r_adjust_cost (insn, link, dep_insn, cost)
+ rtx insn;
+ rtx link;
+ rtx dep_insn;
+ int cost;
+{
+ return cost;
+}
+
+
+/* A C statement (sans semicolon) to update the integer scheduling
+ priority `INSN_PRIORITY(INSN)'. Reduce the priority to execute
+ the INSN earlier, increase the priority to execute INSN later.
+ Do not define this macro if you do not need to adjust the
+ scheduling priorities of insns.
+
+ On the m32r, increase the priority of long instructions so that
+ the short instructions are scheduled ahead of the long ones. */
+
+int
+m32r_adjust_priority (insn, priority)
+ rtx insn;
+ int priority;
+{
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
+ {
+ enum rtx_code code = GET_CODE (PATTERN (insn));
+ if (code != USE && code != CLOBBER && code != ADDR_VEC
+ && get_attr_insn_size (insn) != INSN_SIZE_SHORT)
+ priority <<= 3;
+ }
+
+ return priority;
+}
+
+
+/* Initialize for scheduling a group of instructions. */
+
+void
+m32r_sched_init (stream, verbose)
+ FILE *stream;
+ int verbose;
+{
+ m32r_sched_odd_word_p = FALSE;
+}
+
+
+/* Reorder the schedulers priority list if needed */
+
+void
+m32r_sched_reorder (stream, verbose, ready, n_ready)
+ FILE *stream;
+ int verbose;
+ rtx *ready;
+ int n_ready;
+{
+ if (TARGET_DEBUG)
+ return;
+
+ if (verbose <= 7)
+ stream = (FILE *)0;
+
+ if (stream)
+ fprintf (stream,
+ ";;\t\t::: Looking at %d insn(s) on ready list, boundary is %s word\n",
+ n_ready,
+ (m32r_sched_odd_word_p) ? "odd" : "even");
+
+ if (n_ready > 1)
+ {
+ rtx *long_head = (rtx *) alloca (sizeof (rtx) * n_ready);
+ rtx *long_tail = long_head;
+ rtx *short_head = (rtx *) alloca (sizeof (rtx) * n_ready);
+ rtx *short_tail = short_head;
+ rtx *new_head = (rtx *) alloca (sizeof (rtx) * n_ready);
+ rtx *new_tail = new_head + (n_ready - 1);
+ int i;
+
+ /* Loop through the instructions, classifing them as short/long. Try
+ to keep 2 short together and/or 1 long. Note, the ready list is
+ actually ordered backwards, so keep it in that manner. */
+ for (i = n_ready-1; i >= 0; i--)
+ {
+ rtx insn = ready[i];
+ enum rtx_code code;
+
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i'
+ || (code = GET_CODE (PATTERN (insn))) == USE
+ || code == CLOBBER || code == ADDR_VEC)
+ {
+ /* Dump all current short/long insns just in case */
+ while (long_head != long_tail)
+ *new_tail-- = *long_head++;
+
+ while (short_head != short_tail)
+ *new_tail-- = *short_head++;
+
+ *new_tail-- = insn;
+ if (stream)
+ fprintf (stream,
+ ";;\t\t::: Skipping non instruction %d\n",
+ INSN_UID (insn));
+
+ }
+
+ else
+ {
+ if (get_attr_insn_size (insn) != INSN_SIZE_SHORT)
+ *long_tail++ = insn;
+
+ else
+ *short_tail++ = insn;
+ }
+ }
+
+ /* If we are on an odd word, emit a single short instruction if
+ we can */
+ if (m32r_sched_odd_word_p && short_head != short_tail)
+ *new_tail-- = *short_head++;
+
+ /* Now dump out all of the long instructions */
+ while (long_head != long_tail)
+ *new_tail-- = *long_head++;
+
+ /* Now dump out all of the short instructions */
+ while (short_head != short_tail)
+ *new_tail-- = *short_head++;
+
+ if (new_tail+1 != new_head)
+ abort ();
+
+ bcopy ((char *) new_head, (char *) ready, sizeof (rtx) * n_ready);
+ if (stream)
+ {
+#ifdef HAIFA
+ fprintf (stream, ";;\t\t::: New ready list: ");
+ debug_ready_list (ready, n_ready);
+#else
+ int i;
+ for (i = 0; i < n_ready; i++)
+ {
+ rtx insn = ready[i];
+ enum rtx_code code;
+
+ fprintf (stream, " %d", INSN_UID (ready[i]));
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i'
+ || (code = GET_CODE (PATTERN (insn))) == USE
+ || code == CLOBBER || code == ADDR_VEC)
+ fputs ("(?)", stream);
+
+ else if (get_attr_insn_size (insn) != INSN_SIZE_SHORT)
+ fputs ("(l)", stream);
+
+ else
+ fputs ("(s)", stream);
+ }
+
+ fprintf (stream, "\n");
+#endif
+ }
+ }
+}
+
+
+/* if we have a machine that can issue a variable # of instructions
+ per cycle, indicate how many more instructions can be issued
+ after the current one. */
+int
+m32r_sched_variable_issue (stream, verbose, insn, how_many)
+ FILE *stream;
+ int verbose;
+ rtx insn;
+ int how_many;
+{
+ int orig_odd_word_p = m32r_sched_odd_word_p;
+ int short_p = FALSE;
+
+ how_many--;
+ if (how_many > 0 && !TARGET_DEBUG)
+ {
+ if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
+ how_many++;
+
+ else if (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER
+ || GET_CODE (PATTERN (insn)) == ADDR_VEC)
+ how_many++;
+
+ else if (get_attr_insn_size (insn) != INSN_SIZE_SHORT)
+ {
+ how_many = 0;
+ m32r_sched_odd_word_p = 0;
+ }
+
+ else
+ {
+ m32r_sched_odd_word_p = !m32r_sched_odd_word_p;
+ short_p = TRUE;
+ }
+ }
+
+ if (verbose > 7 && stream)
+ fprintf (stream,
+ ";;\t\t::: %s insn %d starts on an %s word, can emit %d more instruction(s)\n",
+ short_p ? "short" : "long",
+ INSN_UID (insn),
+ orig_odd_word_p ? "odd" : "even",
+ how_many);
+
+ return how_many;
+}
+
+
+/* Type of function DECL.
+
+ The result is cached. To reset the cache at the end of a function,
+ call with DECL = NULL_TREE. */
+
+enum m32r_function_type
+m32r_compute_function_type (decl)
+ tree decl;
+{
+ /* Cached value. */
+ static enum m32r_function_type fn_type = M32R_FUNCTION_UNKNOWN;
+ /* Last function we were called for. */
+ static tree last_fn = NULL_TREE;
+
+ /* Resetting the cached value? */
+ if (decl == NULL_TREE)
+ {
+ fn_type = M32R_FUNCTION_UNKNOWN;
+ last_fn = NULL_TREE;
+ return fn_type;
+ }
+
+ if (decl == last_fn && fn_type != M32R_FUNCTION_UNKNOWN)
+ return fn_type;
+
+ /* Compute function type. */
+ fn_type = (lookup_attribute ("interrupt", DECL_MACHINE_ATTRIBUTES (current_function_decl)) != NULL_TREE
+ ? M32R_FUNCTION_INTERRUPT
+ : M32R_FUNCTION_NORMAL);
+
+ last_fn = decl;
+ return fn_type;
+}
+ /* Function prologue/epilogue handlers. */
+
+/* M32R stack frames look like:
+
+ Before call After call
+ +-----------------------+ +-----------------------+
+ | | | |
+ high | local variables, | | local variables, |
+ mem | reg save area, etc. | | reg save area, etc. |
+ | | | |
+ +-----------------------+ +-----------------------+
+ | | | |
+ | arguments on stack. | | arguments on stack. |
+ | | | |
+ SP+0->+-----------------------+ +-----------------------+
+ | reg parm save area, |
+ | only created for |
+ | variable argument |
+ | functions |
+ +-----------------------+
+ | previous frame ptr |
+ +-----------------------+
+ | |
+ | register save area |
+ | |
+ +-----------------------+
+ | return address |
+ +-----------------------+
+ | |
+ | local variables |
+ | |
+ +-----------------------+
+ | |
+ | alloca allocations |
+ | |
+ +-----------------------+
+ | |
+ low | arguments on stack |
+ memory | |
+ SP+0->+-----------------------+
+
+Notes:
+1) The "reg parm save area" does not exist for non variable argument fns.
+2) The "reg parm save area" can be eliminated completely if we saved regs
+ containing anonymous args separately but that complicates things too
+ much (so it's not done).
+3) The return address is saved after the register save area so as to have as
+ many insns as possible between the restoration of `lr' and the `jmp lr'.
+*/
+
+/* Structure to be filled in by m32r_compute_frame_size with register
+ save masks, and offsets for the current function. */
+struct m32r_frame_info
+{
+ unsigned int total_size; /* # bytes that the entire frame takes up */
+ unsigned int extra_size; /* # bytes of extra stuff */
+ unsigned int pretend_size; /* # bytes we push and pretend caller did */
+ unsigned int args_size; /* # bytes that outgoing arguments take up */
+ unsigned int reg_size; /* # bytes needed to store regs */
+ unsigned int var_size; /* # bytes that variables take up */
+ unsigned int gmask; /* mask of saved gp registers */
+ unsigned int save_fp; /* nonzero if fp must be saved */
+ unsigned int save_lr; /* nonzero if lr (return addr) must be saved */
+ int initialized; /* nonzero if frame size already calculated */
+};
+
+/* Current frame information calculated by m32r_compute_frame_size. */
+static struct m32r_frame_info current_frame_info;
+
+/* Zero structure to initialize current_frame_info. */
+static struct m32r_frame_info zero_frame_info;
+
+#define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
+#define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
+
+/* Tell prologue and epilogue if register REGNO should be saved / restored.
+ The return address and frame pointer are treated separately.
+ Don't consider them here. */
+#define MUST_SAVE_REGISTER(regno, interrupt_p) \
+((regno) != RETURN_ADDR_REGNUM && (regno) != FRAME_POINTER_REGNUM \
+ && (regs_ever_live[regno] && (!call_used_regs[regno] || interrupt_p)))
+
+#define MUST_SAVE_FRAME_POINTER (regs_ever_live[FRAME_POINTER_REGNUM])
+#define MUST_SAVE_RETURN_ADDR (regs_ever_live[RETURN_ADDR_REGNUM] || profile_flag)
+
+#define SHORT_INSN_SIZE 2 /* size of small instructions */
+#define LONG_INSN_SIZE 4 /* size of long instructions */
+
+/* Return the bytes needed to compute the frame pointer from the current
+ stack pointer.
+
+ SIZE is the size needed for local variables. */
+
+unsigned int
+m32r_compute_frame_size (size)
+ int size; /* # of var. bytes allocated. */
+{
+ int regno;
+ unsigned int total_size, var_size, args_size, pretend_size, extra_size;
+ unsigned int reg_size, frame_size;
+ unsigned int gmask;
+ enum m32r_function_type fn_type;
+ int interrupt_p;
+
+ var_size = M32R_STACK_ALIGN (size);
+ args_size = M32R_STACK_ALIGN (current_function_outgoing_args_size);
+ pretend_size = current_function_pretend_args_size;
+ extra_size = FIRST_PARM_OFFSET (0);
+ total_size = extra_size + pretend_size + args_size + var_size;
+ reg_size = 0;
+ gmask = 0;
+
+ /* See if this is an interrupt handler. Call used registers must be saved
+ for them too. */
+ fn_type = m32r_compute_function_type (current_function_decl);
+ interrupt_p = M32R_INTERRUPT_P (fn_type);
+
+ /* Calculate space needed for registers. */
+
+ for (regno = 0; regno < M32R_MAX_INT_REGS; regno++)
+ {
+ if (MUST_SAVE_REGISTER (regno, interrupt_p))
+ {
+ reg_size += UNITS_PER_WORD;
+ gmask |= 1 << regno;
+ }
+ }
+
+ current_frame_info.save_fp = MUST_SAVE_FRAME_POINTER;
+ current_frame_info.save_lr = MUST_SAVE_RETURN_ADDR;
+
+ reg_size += ((current_frame_info.save_fp + current_frame_info.save_lr)
+ * UNITS_PER_WORD);
+ total_size += reg_size;
+
+ /* ??? Not sure this is necessary, and I don't think the epilogue
+ handler will do the right thing if this changes total_size. */
+ total_size = M32R_STACK_ALIGN (total_size);
+
+ frame_size = total_size - (pretend_size + reg_size);
+
+ /* Save computed information. */
+ current_frame_info.total_size = total_size;
+ current_frame_info.extra_size = extra_size;
+ current_frame_info.pretend_size = pretend_size;
+ current_frame_info.var_size = var_size;
+ current_frame_info.args_size = args_size;
+ current_frame_info.reg_size = reg_size;
+ current_frame_info.gmask = gmask;
+ current_frame_info.initialized = reload_completed;
+
+ /* Ok, we're done. */
+ return total_size;
+}
+
+/* When the `length' insn attribute is used, this macro specifies the
+ value to be assigned to the address of the first insn in a
+ function. If not specified, 0 is used. */
+
+int
+m32r_first_insn_address ()
+{
+ if (! current_frame_info.initialized)
+ m32r_compute_frame_size (get_frame_size ());
+
+ return 0;
+}
+
+/* Expand the m32r prologue as a series of insns. */
+
+void
+m32r_expand_prologue ()
+{
+ int regno;
+ int frame_size;
+ unsigned int gmask;
+
+ if (! current_frame_info.initialized)
+ m32r_compute_frame_size (get_frame_size ());
+
+ gmask = current_frame_info.gmask;
+
+ /* These cases shouldn't happen. Catch them now. */
+ if (current_frame_info.total_size == 0 && gmask)
+ abort ();
+
+ /* Allocate space for register arguments if this is a variadic function. */
+ if (current_frame_info.pretend_size != 0)
+ {
+ /* Use a HOST_WIDE_INT temporary, since negating an unsigned int gives
+ the wrong result on a 64-bit host. */
+ HOST_WIDE_INT pretend_size = current_frame_info.pretend_size;
+ emit_insn (gen_addsi3 (stack_pointer_rtx,
+ stack_pointer_rtx,
+ GEN_INT (-pretend_size)));
+ }
+
+ /* Save any registers we need to and set up fp. */
+
+ if (current_frame_info.save_fp)
+ emit_insn (gen_movsi_push (stack_pointer_rtx, frame_pointer_rtx));
+
+ gmask &= ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK);
+
+ /* Save any needed call-saved regs (and call-used if this is an
+ interrupt handler). */
+ for (regno = 0; regno <= M32R_MAX_INT_REGS; ++regno)
+ {
+ if ((gmask & (1 << regno)) != 0)
+ emit_insn (gen_movsi_push (stack_pointer_rtx,
+ gen_rtx_REG (Pmode, regno)));
+ }
+
+ if (current_frame_info.save_lr)
+ emit_insn (gen_movsi_push (stack_pointer_rtx,
+ gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM)));
+
+ /* Allocate the stack frame. */
+ frame_size = (current_frame_info.total_size
+ - (current_frame_info.pretend_size
+ + current_frame_info.reg_size));
+
+ if (frame_size == 0)
+ ; /* nothing to do */
+ else if (frame_size <= 32768)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-frame_size)));
+ else
+ {
+ rtx tmp = gen_rtx_REG (Pmode, PROLOGUE_TMP_REGNUM);
+ emit_insn (gen_movsi (tmp, GEN_INT (frame_size)));
+ emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
+ }
+
+ if (frame_pointer_needed)
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+
+ if (profile_flag || profile_block_flag)
+ emit_insn (gen_blockage ());
+}
+
+
+/* Set up the stack and frame pointer (if desired) for the function.
+ Note, if this is changed, you need to mirror the changes in
+ m32r_compute_frame_size which calculates the prolog size. */
+
+void
+m32r_output_function_prologue (file, size)
+ FILE * file;
+ int size;
+{
+ enum m32r_function_type fn_type = m32r_compute_function_type (current_function_decl);
+
+ /* If this is an interrupt handler, mark it as such. */
+ if (M32R_INTERRUPT_P (fn_type))
+ {
+ fprintf (file, "\t%s interrupt handler\n",
+ ASM_COMMENT_START);
+ }
+
+ if (! current_frame_info.initialized)
+ m32r_compute_frame_size (size);
+
+ /* This is only for the human reader. */
+ fprintf (file,
+ "\t%s PROLOGUE, vars= %d, regs= %d, args= %d, extra= %d\n",
+ ASM_COMMENT_START,
+ current_frame_info.var_size,
+ current_frame_info.reg_size / 4,
+ current_frame_info.args_size,
+ current_frame_info.extra_size);
+}
+
+/* Do any necessary cleanup after a function to restore stack, frame,
+ and regs. */
+
+void
+m32r_output_function_epilogue (file, size)
+ FILE * file;
+ int size;
+{
+ int regno;
+ int noepilogue = FALSE;
+ int total_size;
+ enum m32r_function_type fn_type = m32r_compute_function_type (current_function_decl);
+
+ /* This is only for the human reader. */
+ fprintf (file, "\t%s EPILOGUE\n", ASM_COMMENT_START);
+
+ if (!current_frame_info.initialized)
+ abort ();
+ total_size = current_frame_info.total_size;
+
+ if (total_size == 0)
+ {
+ rtx insn = get_last_insn ();
+
+ /* If the last insn was a BARRIER, we don't have to write any code
+ because a jump (aka return) was put there. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == BARRIER)
+ noepilogue = TRUE;
+ }
+
+ if (!noepilogue)
+ {
+ unsigned int pretend_size = current_frame_info.pretend_size;
+ unsigned int frame_size = total_size - pretend_size;
+ unsigned int var_size = current_frame_info.var_size;
+ unsigned int args_size = current_frame_info.args_size;
+ unsigned int gmask = current_frame_info.gmask;
+ int can_trust_sp_p = !current_function_calls_alloca;
+ char * sp_str = reg_names[STACK_POINTER_REGNUM];
+ char * fp_str = reg_names[FRAME_POINTER_REGNUM];
+
+ /* The first thing to do is point the sp at the bottom of the register
+ save area. */
+ if (can_trust_sp_p)
+ {
+ unsigned int reg_offset = var_size + args_size;
+ if (reg_offset == 0)
+ ; /* nothing to do */
+ else if (reg_offset < 128)
+ fprintf (file, "\taddi %s,%s%d\n",
+ sp_str, IMMEDIATE_PREFIX, reg_offset);
+ else if (reg_offset < 32768)
+ fprintf (file, "\tadd3 %s,%s,%s%d\n",
+ sp_str, sp_str, IMMEDIATE_PREFIX, reg_offset);
+ else
+ fprintf (file, "\tld24 %s,%s%d\n\tadd %s,%s\n",
+ reg_names[PROLOGUE_TMP_REGNUM],
+ IMMEDIATE_PREFIX, reg_offset,
+ sp_str, reg_names[PROLOGUE_TMP_REGNUM]);
+ }
+ else if (frame_pointer_needed)
+ {
+ unsigned int reg_offset = var_size + args_size;
+ if (reg_offset == 0)
+ fprintf (file, "\tmv %s,%s\n", sp_str, fp_str);
+ else if (reg_offset < 32768)
+ fprintf (file, "\tadd3 %s,%s,%s%d\n",
+ sp_str, fp_str, IMMEDIATE_PREFIX, reg_offset);
+ else
+ fprintf (file, "\tld24 %s,%s%d\n\tadd %s,%s\n",
+ reg_names[PROLOGUE_TMP_REGNUM],
+ IMMEDIATE_PREFIX, reg_offset,
+ sp_str, reg_names[PROLOGUE_TMP_REGNUM]);
+ }
+ else
+ abort ();
+
+ if (current_frame_info.save_lr)
+ fprintf (file, "\tpop %s\n", reg_names[RETURN_ADDR_REGNUM]);
+
+ /* Restore any saved registers, in reverse order of course. */
+ gmask &= ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK);
+ for (regno = M32R_MAX_INT_REGS - 1; regno >= 0; --regno)
+ {
+ if ((gmask & (1L << regno)) != 0)
+ fprintf (file, "\tpop %s\n", reg_names[regno]);
+ }
+
+ if (current_frame_info.save_fp)
+ fprintf (file, "\tpop %s\n", fp_str);
+
+ /* Remove varargs area if present. */
+ if (current_frame_info.pretend_size != 0)
+ fprintf (file, "\taddi %s,%s%d\n",
+ sp_str, IMMEDIATE_PREFIX, current_frame_info.pretend_size);
+
+ /* Emit the return instruction. */
+ if (M32R_INTERRUPT_P (fn_type))
+ fprintf (file, "\trte\n");
+ else
+ fprintf (file, "\tjmp %s\n", reg_names[RETURN_ADDR_REGNUM]);
+ }
+
+#if 0 /* no longer needed */
+ /* Ensure the function cleanly ends on a 32 bit boundary. */
+ fprintf (file, "\t.fillinsn\n");
+#endif
+
+ /* Reset state info for each function. */
+ current_frame_info = zero_frame_info;
+ m32r_compute_function_type (NULL_TREE);
+}
+
+/* Return non-zero if this function is known to have a null or 1 instruction
+ epilogue. */
+
+int
+direct_return ()
+{
+ if (!reload_completed)
+ return FALSE;
+
+ if (! current_frame_info.initialized)
+ m32r_compute_frame_size (get_frame_size ());
+
+ return current_frame_info.total_size == 0;
+}
+
+
+/* PIC */
+
+/* Emit special PIC prologues and epilogues. */
+
+void
+m32r_finalize_pic ()
+{
+ /* nothing to do */
+}
+
+/* Nested function support. */
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+void
+m32r_initialize_trampoline (tramp, fnaddr, cxt)
+ rtx tramp, fnaddr, cxt;
+{
+}
+
+/* Set the cpu type and print out other fancy things,
+ at the top of the file. */
+
+void
+m32r_asm_file_start (file)
+ FILE * file;
+{
+ if (flag_verbose_asm)
+ fprintf (file, "%s M32R/D special options: -G %d\n",
+ ASM_COMMENT_START, g_switch_value);
+}
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+void
+m32r_print_operand (file, x, code)
+ FILE * file;
+ rtx x;
+ int code;
+{
+ rtx addr;
+
+ switch (code)
+ {
+ /* The 's' and 'p' codes are used by output_block_move() to
+ indicate post-increment 's'tores and 'p're-increment loads. */
+ case 's':
+ if (GET_CODE (x) == REG)
+ fprintf (file, "@+%s", reg_names [REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand to %s code");
+ return;
+
+ case 'p':
+ if (GET_CODE (x) == REG)
+ fprintf (file, "@%s+", reg_names [REGNO (x)]);
+ else
+ output_operand_lossage ("invalid operand to %p code");
+ return;
+
+ case 'R' :
+ /* Write second word of DImode or DFmode reference,
+ register or memory. */
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)+1], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ fprintf (file, "@(");
+ /* Handle possible auto-increment. Since it is pre-increment and
+ we have already done it, we can just use an offset of four. */
+ /* ??? This is taken from rs6000.c I think. I don't think it is
+ currently necessary, but keep it around. */
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 4));
+ else
+ output_address (plus_constant (XEXP (x, 0), 4));
+ fputc (')', file);
+ }
+ else
+ output_operand_lossage ("invalid operand to %R code");
+ return;
+
+ case 'H' : /* High word */
+ case 'L' : /* Low word */
+ if (GET_CODE (x) == REG)
+ {
+ /* L = least significant word, H = most significant word */
+ if ((WORDS_BIG_ENDIAN != 0) ^ (code == 'L'))
+ fputs (reg_names[REGNO (x)], file);
+ else
+ fputs (reg_names[REGNO (x)+1], file);
+ }
+ else if (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_DOUBLE)
+ {
+ rtx first, second;
+
+ split_double (x, &first, &second);
+ fprintf (file, "0x%08lx",
+ code == 'L' ? INTVAL (first) : INTVAL (second));
+ }
+ else
+ output_operand_lossage ("invalid operand to %H/%L code");
+ return;
+
+ case 'A' :
+ {
+ REAL_VALUE_TYPE d;
+ char str[30];
+
+ if (GET_CODE (x) != CONST_DOUBLE
+ || GET_MODE_CLASS (GET_MODE (x)) != MODE_FLOAT)
+ fatal_insn ("Bad insn for 'A'", x);
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ REAL_VALUE_TO_DECIMAL (d, "%.20e", str);
+ fprintf (file, "%s", str);
+ return;
+ }
+
+ case 'B' : /* Bottom half */
+ case 'T' : /* Top half */
+ /* Output the argument to a `seth' insn (sets the Top half-word).
+ For constants output arguments to a seth/or3 pair to set Top and
+ Bottom halves. For symbols output arguments to a seth/add3 pair to
+ set Top and Bottom halves. The difference exists because for
+ constants seth/or3 is more readable but for symbols we need to use
+ the same scheme as `ld' and `st' insns (16 bit addend is signed). */
+ switch (GET_CODE (x))
+ {
+ case CONST_INT :
+ case CONST_DOUBLE :
+ {
+ rtx first, second;
+
+ split_double (x, &first, &second);
+ x = WORDS_BIG_ENDIAN ? second : first;
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "0x%x",
+#else
+ "0x%lx",
+#endif
+ (code == 'B'
+ ? INTVAL (x) & 0xffff
+ : (INTVAL (x) >> 16) & 0xffff));
+ }
+ return;
+ case CONST :
+ case SYMBOL_REF :
+ if (code == 'B'
+ && small_data_operand (x, VOIDmode))
+ {
+ fputs ("sda(", file);
+ output_addr_const (file, x);
+ fputc (')', file);
+ return;
+ }
+ /* fall through */
+ case LABEL_REF :
+ fputs (code == 'T' ? "shigh(" : "low(", file);
+ output_addr_const (file, x);
+ fputc (')', file);
+ return;
+ default :
+ output_operand_lossage ("invalid operand to %T/%B code");
+ return;
+ }
+ break;
+
+ case 'U' :
+ /* ??? wip */
+ /* Output a load/store with update indicator if appropriate. */
+ if (GET_CODE (x) == MEM)
+ {
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ fputs (".a", file);
+ }
+ else
+ output_operand_lossage ("invalid operand to %U code");
+ return;
+
+ case 'N' :
+ /* Print a constant value negated. */
+ if (GET_CODE (x) == CONST_INT)
+ output_addr_const (file, GEN_INT (- INTVAL (x)));
+ else
+ output_operand_lossage ("invalid operand to %N code");
+ return;
+
+ case 'X' :
+ /* Print a const_int in hex. Used in comments. */
+ if (GET_CODE (x) == CONST_INT)
+ fprintf (file,
+#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_INT
+ "0x%x",
+#else
+ "0x%lx",
+#endif
+ INTVAL (x));
+ return;
+
+ case '#' :
+ fputs (IMMEDIATE_PREFIX, file);
+ return;
+
+#if 0 /* ??? no longer used */
+ case '@' :
+ fputs (reg_names[SDA_REGNUM], file);
+ return;
+#endif
+
+ case 0 :
+ /* Do nothing special. */
+ break;
+
+ default :
+ /* Unknown flag. */
+ output_operand_lossage ("invalid operand output code");
+ }
+
+ switch (GET_CODE (x))
+ {
+ case REG :
+ fputs (reg_names[REGNO (x)], file);
+ break;
+
+ case MEM :
+ addr = XEXP (x, 0);
+ if (GET_CODE (addr) == PRE_INC)
+ {
+ if (GET_CODE (XEXP (addr, 0)) != REG)
+ fatal_insn ("Pre-increment address is not a register", x);
+
+ fprintf (file, "@+%s", reg_names[REGNO (XEXP (addr, 0))]);
+ }
+ else if (GET_CODE (addr) == PRE_DEC)
+ {
+ if (GET_CODE (XEXP (addr, 0)) != REG)
+ fatal_insn ("Pre-decrement address is not a register", x);
+
+ fprintf (file, "@-%s", reg_names[REGNO (XEXP (addr, 0))]);
+ }
+ else if (GET_CODE (addr) == POST_INC)
+ {
+ if (GET_CODE (XEXP (addr, 0)) != REG)
+ fatal_insn ("Post-increment address is not a register", x);
+
+ fprintf (file, "@%s+", reg_names[REGNO (XEXP (addr, 0))]);
+ }
+ else
+ {
+ fputs ("@(", file);
+ output_address (XEXP (x, 0));
+ fputc (')', file);
+ }
+ break;
+
+ case CONST_DOUBLE :
+ /* We handle SFmode constants here as output_addr_const doesn't. */
+ if (GET_MODE (x) == SFmode)
+ {
+ REAL_VALUE_TYPE d;
+ long l;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ REAL_VALUE_TO_TARGET_SINGLE (d, l);
+ fprintf (file, "0x%08lx", l);
+ break;
+ }
+
+ /* Fall through. Let output_addr_const deal with it. */
+
+ default :
+ output_addr_const (file, x);
+ break;
+ }
+}
+
+/* Print a memory address as an operand to reference that memory location. */
+
+void
+m32r_print_operand_address (file, addr)
+ FILE * file;
+ rtx addr;
+{
+ register rtx base;
+ register rtx index = 0;
+ int offset = 0;
+
+ switch (GET_CODE (addr))
+ {
+ case REG :
+ fputs (reg_names[REGNO (addr)], file);
+ break;
+
+ case PLUS :
+ if (GET_CODE (XEXP (addr, 0)) == CONST_INT)
+ offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);
+ else if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);
+ else
+ base = XEXP (addr, 0), index = XEXP (addr, 1);
+ if (GET_CODE (base) == REG)
+ {
+ /* Print the offset first (if present) to conform to the manual. */
+ if (index == 0)
+ {
+ if (offset != 0)
+ fprintf (file, "%d,", offset);
+ fputs (reg_names[REGNO (base)], file);
+ }
+ /* The chip doesn't support this, but left in for generality. */
+ else if (GET_CODE (index) == REG)
+ fprintf (file, "%s,%s",
+ reg_names[REGNO (base)], reg_names[REGNO (index)]);
+ /* Not sure this can happen, but leave in for now. */
+ else if (GET_CODE (index) == SYMBOL_REF)
+ {
+ output_addr_const (file, index);
+ fputc (',', file);
+ fputs (reg_names[REGNO (base)], file);
+ }
+ else
+ fatal_insn ("Bad address", addr);
+ }
+ else if (GET_CODE (base) == LO_SUM)
+ {
+ if (index != 0
+ || GET_CODE (XEXP (base, 0)) != REG)
+ abort ();
+ if (small_data_operand (XEXP (base, 1), VOIDmode))
+ fputs ("sda(", file);
+ else
+ fputs ("low(", file);
+ output_addr_const (file, plus_constant (XEXP (base, 1), offset));
+ fputs ("),", file);
+ fputs (reg_names[REGNO (XEXP (base, 0))], file);
+ }
+ else
+ fatal_insn ("Bad address", addr);
+ break;
+
+ case LO_SUM :
+ if (GET_CODE (XEXP (addr, 0)) != REG)
+ fatal_insn ("Lo_sum not of register", addr);
+ if (small_data_operand (XEXP (addr, 1), VOIDmode))
+ fputs ("sda(", file);
+ else
+ fputs ("low(", file);
+ output_addr_const (file, XEXP (addr, 1));
+ fputs ("),", file);
+ fputs (reg_names[REGNO (XEXP (addr, 0))], file);
+ break;
+
+ case PRE_INC : /* Assume SImode */
+ fprintf (file, "+%s", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case PRE_DEC : /* Assume SImode */
+ fprintf (file, "-%s", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case POST_INC : /* Assume SImode */
+ fprintf (file, "%s+", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ default :
+ output_addr_const (file, addr);
+ break;
+ }
+}
+
+/* Return true if the operands are the constants 0 and 1. */
+int
+zero_and_one (operand1, operand2)
+ rtx operand1;
+ rtx operand2;
+{
+ return
+ GET_CODE (operand1) == CONST_INT
+ && GET_CODE (operand2) == CONST_INT
+ && ( ((INTVAL (operand1) == 0) && (INTVAL (operand2) == 1))
+ ||((INTVAL (operand1) == 1) && (INTVAL (operand2) == 0)));
+}
+
+/* Return non-zero if the operand is suitable for use in a conditional move sequence. */
+int
+conditional_move_operand (operand, int_mode)
+ rtx operand;
+ int int_mode;
+{
+ enum machine_mode mode = (enum machine_mode)int_mode;
+
+ /* Only defined for simple integers so far... */
+ if (mode != SImode && mode != HImode && mode != QImode)
+ return FALSE;
+
+ /* At the moment we can hanndle moving registers and loading constants. */
+ /* To be added: Addition/subtraction/bitops/multiplication of registers. */
+
+ switch (GET_CODE (operand))
+ {
+ case REG:
+ return 1;
+
+ case CONST_INT:
+ return INT8_P (INTVAL (operand));
+
+ default:
+#if 0
+ fprintf (stderr, "Test for cond move op of type: %s\n",
+ GET_RTX_NAME (GET_CODE (operand)));
+#endif
+ return 0;
+ }
+}
+
+/* Return true if the code is a test of the carry bit */
+int
+carry_compare_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ rtx x;
+
+ if (GET_MODE (op) != CCmode && GET_MODE (op) != VOIDmode)
+ return FALSE;
+
+ if (GET_CODE (op) != NE && GET_CODE (op) != EQ)
+ return FALSE;
+
+ x = XEXP (op, 0);
+ if (GET_CODE (x) != REG || REGNO (x) != CARRY_REGNUM)
+ return FALSE;
+
+ x = XEXP (op, 1);
+ if (GET_CODE (x) != CONST_INT || INTVAL (x) != 0)
+ return FALSE;
+
+ return TRUE;
+}
+
+/* Place into the buffer an assembler instruction which can be executed in the S pipe. */
+static void
+emit_S_clause (dest, source, buffer)
+ rtx dest;
+ rtx source;
+ char * buffer;
+{
+ char * dest_reg;
+
+
+ dest_reg = reg_names [REGNO (dest)];
+
+ switch (GET_CODE (source))
+ {
+ case REG:
+ sprintf (buffer, "mv %s, %s", dest_reg, reg_names [REGNO (source)]);
+ break;
+
+ case CONST_INT:
+ sprintf (buffer, "ldi %s, #%d", dest_reg, INTVAL (source));
+ break;
+
+ /* Todo: other S pipe instructions. */
+ default:
+ abort();
+ }
+
+ return;
+}
+
+
+/* Generate the correct assembler code to handle the conditional loading of a
+ value into a register. It is known that the operands satisfy the
+ conditional_move_operand() function above. The destination is operand[0].
+ The condition is operand [1]. The 'true' value is operand [2] and the
+ 'false' value is operand [3]. */
+char *
+emit_cond_move (operands, insn)
+ rtx * operands;
+ rtx insn;
+{
+ static char buffer [100];
+
+ buffer [0] = 0;
+
+ /* Destination must be a register. */
+ if (GET_CODE (operands [0]) != REG)
+ abort();
+ if (! conditional_move_operand (operands [2], SImode))
+ abort();
+ if (! conditional_move_operand (operands [3], SImode))
+ abort();
+
+
+ /* Check to see if the test is reversed. */
+ if (GET_CODE (operands [1]) == NE)
+ {
+ rtx tmp = operands [2];
+ operands [2] = operands [3];
+ operands [3] = tmp;
+ }
+
+ /* Catch a special case where 0 or 1 is being loaded into the destination.
+ Since we already have these values in the C bit we can use a special
+ instruction. */
+ if (zero_and_one (operands [2], operands [3]))
+ {
+ char * dest = reg_names [REGNO (operands [0])];
+
+ sprintf (buffer, "mvfc %s, cbr", dest);
+
+ /* If the true value was '0' then we need to invert the results of the move. */
+ if (INTVAL (operands [2]) == 0)
+ sprintf (buffer + strlen (buffer), "\n\txor3 %s, %s, #1",
+ dest, dest);
+
+ return buffer;
+ }
+
+
+ /* Test to see if the then clause is redundant. */
+ if (GET_CODE (operands [2]) == REG
+ && REGNO (operands [2]) == REGNO (operands [0]))
+ /* Do not do anything for the then clause. */
+ ;
+ else
+ {
+ /* Generate the then clause. */
+ sprintf (buffer, "snc || ");
+ emit_S_clause (operands [0], operands [2], buffer + strlen (buffer));
+ }
+
+ /* Test to see if the else clause is redundant. */
+ if (GET_CODE (operands [3]) == REG
+ && REGNO (operands [3]) == REGNO (operands [0]))
+ /* Do not do anything for the else clause. */
+ ;
+ else
+ {
+ /* Generate the else clause. */
+
+ if (* buffer != 0)
+ strcat (buffer, "\n\t");
+
+ strcat (buffer, "sc || ");
+ emit_S_clause (operands [0], operands [3], buffer + strlen (buffer));
+ }
+
+
+ return buffer;
+}
+
+
+/* Return true if the code is a test of the carry bit that can be used
+ to start a conditional execution sequence. This function is very
+ similar to carry_compare_operand() except that it allows a test
+ for 'register == register' and 'register == 0' as well as the
+ normal 'carry flag set'. */
+int
+conditional_compare_operand (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ rtx x;
+
+ if (GET_MODE (op) != CCmode && GET_MODE (op) != VOIDmode)
+ return FALSE;
+
+ if ( GET_CODE (op) != NE
+ && GET_CODE (op) != EQ
+ && GET_CODE (op) != LE
+ && GET_CODE (op) != GE
+ && GET_CODE (op) != LT
+ && GET_CODE (op) != GT)
+ return FALSE;
+
+ x = XEXP (op, 0);
+ if (GET_CODE (x) != REG)
+ return FALSE;
+
+ /* Allow a test of 'reg eq/ne reg' or 'Carry flag set/not set'. */
+ x = XEXP (op, 1);
+ if (GET_CODE (x) == CONST_INT)
+ {
+ if (INTVAL (x) != 0)
+ return FALSE;
+
+ return TRUE;
+ }
+ else if (GET_CODE (x) != REG)
+ return FALSE;
+
+ return TRUE;
+}
+
+/* Return true if the binary operator can go inside of a s{n}c || operation. */
+int
+binary_parallel_operator (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ rtx op0, op1;
+
+ /* Can only handle integer operations. */
+ if ( GET_MODE (op) != SImode
+ && GET_MODE (op) != HImode
+ && GET_MODE (op) != QImode)
+ return FALSE;
+
+ /* Can only handle simple binary operations. */
+ if ( GET_CODE (op) != PLUS
+ && GET_CODE (op) != MINUS
+ && GET_CODE (op) != MULT
+ && GET_CODE (op) != AND
+ && GET_CODE (op) != IOR
+ && GET_CODE (op) != XOR)
+ return FALSE;
+
+ op0 = XEXP (op, 0);
+ op1 = XEXP (op, 1);
+
+ while (GET_CODE (op0) == SUBREG)
+ op0 = SUBREG_REG (op0);
+
+ while (GET_CODE (op1) == SUBREG)
+ op1 = SUBREG_REG (op1);
+
+ if (GET_CODE (op0) != REG)
+ return FALSE;
+
+ /* Allowable opreations are entirely register based
+ or the addition of a small constant to a register. */
+ if (GET_CODE (op1) == REG)
+ {
+ return TRUE;
+ }
+ else if (GET_CODE (op1) == CONST_INT)
+ {
+ if (GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
+ return INT8_P (INTVAL (op1));
+ }
+
+ return FALSE;
+}
+
+/* Generate the assembler statement for the given comparison RTL.
+ Returns true if the sense of the comparison is reversed. */
+static int
+generate_comparison (operand, buffer)
+ rtx operand;
+ char * buffer;
+{
+ rtx op1 = XEXP (operand, 0);
+ rtx op2 = XEXP (operand, 1);
+ int reverse_sense = FALSE;
+
+
+ if (GET_CODE (op1) != REG)
+ abort();
+
+ /* If we are testing the carry bit the comparison has already been generated.
+ Since there is only one bit, the only viable tests are NE and EQ. If the
+ test is NE, then we must reverse the sense of the comparision. */
+ if (REGNO (op1) == CARRY_REGNUM)
+ return GET_CODE (operand) == NE;
+
+ /* Handle tests of a value in a register. */
+ switch (GET_CODE (operand))
+ {
+ case NE:
+ reverse_sense = TRUE;
+ /* Fall through. */
+ case EQ:
+ if (GET_CODE (op2) == REG)
+ sprintf (buffer, "cmpeq %s %s\n\t",
+ reg_names [REGNO (op1)], reg_names [REGNO (op2)]);
+ else if (GET_CODE (op2) == CONST_INT && INTVAL (op2) == 0)
+ sprintf (buffer, "cmpz %s\n\t", reg_names [REGNO (op1)]);
+ else
+ abort();
+ break;
+
+ case GE:
+ reverse_sense = TRUE;
+ /* Fall through. */
+ case LT:
+ if (GET_CODE (op2) != CONST_INT || ! INT16_P (INTVAL (op2)))
+ abort();
+
+ sprintf (buffer, "cmpi %s, #%d\n\t", reg_names [REGNO (op1)], INTVAL (op2));
+ break;
+
+ case GT:
+ reverse_sense = TRUE;
+ /* Fall through. */
+ case LE:
+ if (GET_CODE (op2) != CONST_INT || ! INT16_P (INTVAL (op2) + 1))
+ abort();
+
+ sprintf (buffer, "cmpi %s, #%d\n\t", reg_names [REGNO (op1)], INTVAL (op2) + 1);
+ break;
+
+ default:
+ abort();
+ }
+
+ /* These generated comparisons are inverted. */
+ return ! reverse_sense;
+}
+
+
+
+/* Generate the correct assembler code to handle the conditional execution of a
+ simple binary operation. It is known that the operation satisfies
+ binary_parallel_operand() and that the condition satifies
+ conditional_compare_operand(). The operation is in operands [4] and its two
+ arguments are in operands [1] and [2]. The destination of the operation is
+ operands [0], which is the same as operands [1]. The condition is in
+ operands [3], and if 'condition_true' is non-zero then the operation should
+ be performed if the condition is true, otherwise it should be performed if
+ the condition is false. */
+char *
+emit_binary_cond_exec (operands, condition_true)
+ rtx * operands;
+ int condition_true;
+{
+ static char buffer [100];
+
+ buffer [0] = 0;
+
+ /* Destination and first operand must be registers. */
+ if ( GET_CODE (operands [0]) != REG
+ || GET_CODE (operands [1]) != REG)
+ abort();
+
+ /* Destination must be the same as the first opreand. */
+ if (REGNO (operands [0]) != REGNO (operands [1]))
+ abort();
+
+ /* Generate the comparison and if necessary reverse the sense of the test. */
+ if (generate_comparison (operands [3], buffer))
+ condition_true = ! condition_true;
+
+ /* Generate the appropriate abort of the following binary operation. */
+ if (condition_true)
+ strcat (buffer, "snc || ");
+ else
+ strcat (buffer, "sc || ");
+
+ /* Generate the binary operation. */
+ switch (GET_CODE (operands [4]))
+ {
+ case PLUS: strcat (buffer, "add "); break;
+ case MINUS: strcat (buffer, "sub "); break;
+ case MULT: strcat (buffer, "mul "); break;
+ case AND: strcat (buffer, "and "); break;
+ case IOR: strcat (buffer, "or "); break;
+ case XOR: strcat (buffer, "xor "); break;
+ default: abort();
+ }
+
+ /* Generate the arguments for the binary operation. */
+ if (GET_CODE (operands [2]) == REG)
+ sprintf (buffer + strlen (buffer), "%s, %s",
+ reg_names [REGNO (operands [0])],
+ reg_names [REGNO (operands [2])]);
+ else if (GET_CODE (operands [2]) == CONST_INT)
+ sprintf (buffer + strlen (buffer), "%s, %d",
+ reg_names [REGNO (operands [0])],
+ INTVAL (operands [2]));
+ else
+ abort();
+
+ return buffer;
+}
+
+/* Return true if the unary operator can go inside of a s{n}c || operation. */
+int
+unary_parallel_operator (op, int_mode)
+ rtx op;
+ int int_mode;
+{
+ /* Can only handle integer operations. */
+ if ( GET_MODE (op) != SImode
+ && GET_MODE (op) != HImode
+ && GET_MODE (op) != QImode)
+ return FALSE;
+
+ /* Can only handle simple unary operations. */
+ if ( GET_CODE (op) != NEG
+ && GET_CODE (op) != NOT)
+ return FALSE;
+
+ /* Can only handle register based operations. */
+ op = XEXP (op, 0);
+ while (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ if (GET_CODE (op) == REG)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Generate the correct assembler code to handle the conditional execution
+ of a simple unary operation. It is known that the operation satisfies
+ unary_parallel_operand() and that the condition satifies
+ conditional_compare_operand(). The operation is in operands [3] and
+ its argument is in operands [1]. The destination of the operation is
+ operands [0]. The condition is in operands [2], and if 'condition_true' is
+ non-zero then the operation should be performed if the condition is true,
+ otherwise it should be performed if the condition is false. */
+char *
+emit_unary_cond_exec (operands, condition_true)
+ rtx * operands;
+ int condition_true;
+{
+ static char buffer [100];
+
+ buffer [0] = 0;
+
+ /* Destination and operand must be registers. */
+ if ( GET_CODE (operands [0]) != REG
+ || GET_CODE (operands [1]) != REG)
+ abort();
+
+ /* Generate the comparison, and if necessary reverse the sense of the test. */
+ if (generate_comparison (operands [2], buffer))
+ condition_true = ! condition_true;
+
+ /* Generate the appropriate abort of the following binary operation. */
+ if (condition_true)
+ strcat (buffer, "snc || ");
+ else
+ strcat (buffer, "sc || ");
+
+ /* Generate the unary operation. */
+ switch (GET_CODE (operands [3]))
+ {
+ case NOT: strcat (buffer, "not "); break;
+ case NEG: strcat (buffer, "neg "); break;
+ default: abort();
+ }
+
+ /* Generate the arguments for the unary operation. */
+ sprintf (buffer + strlen (buffer), "%s, %s",
+ reg_names [REGNO (operands [0])],
+ reg_names [REGNO (operands [1])]);
+
+ return buffer;
+}
+
+/* END CYGNUS LOCAL -- meissner/m32r work */
+
+/* Returns true if the registers contained in the two
+ rtl expressions are different. */
+int
+m32r_not_same_reg (a, b)
+ rtx a;
+ rtx b;
+{
+ int reg_a = -1;
+ int reg_b = -2;
+
+ while (GET_CODE (a) == SUBREG)
+ a = SUBREG_REG (a);
+
+ if (GET_CODE (a) == REG)
+ reg_a = REGNO (a);
+
+ while (GET_CODE (b) == SUBREG)
+ b = SUBREG_REG (b);
+
+ if (GET_CODE (b) == REG)
+ reg_b = REGNO (b);
+
+ return reg_a != reg_b;
+}
+
+
+/* Use a library function to move some bytes. */
+static void
+block_move_call (dest_reg, src_reg, bytes_rtx)
+ rtx dest_reg;
+ rtx src_reg;
+ rtx bytes_rtx;
+{
+ /* We want to pass the size as Pmode, which will normally be SImode
+ but will be DImode if we are using 64 bit longs and pointers. */
+ if (GET_MODE (bytes_rtx) != VOIDmode
+ && GET_MODE (bytes_rtx) != Pmode)
+ bytes_rtx = convert_to_mode (Pmode, bytes_rtx, 1);
+
+#ifdef TARGET_MEM_FUNCTIONS
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0,
+ VOIDmode, 3, dest_reg, Pmode, src_reg, Pmode,
+ convert_to_mode (TYPE_MODE (sizetype), bytes_rtx,
+ TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+#else
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "bcopy"), 0,
+ VOIDmode, 3, src_reg, Pmode, dest_reg, Pmode,
+ convert_to_mode (TYPE_MODE (integer_type_node), bytes_rtx,
+ TREE_UNSIGNED (integer_type_node)),
+ TYPE_MODE (integer_type_node));
+#endif
+}
+
+/* The maximum number of bytes to copy using pairs of load/store instructions.
+ If a block is larger than this then a loop will be generated to copy
+ MAX_MOVE_BYTES chunks at a time. The value of 32 is a semi-arbitary choice.
+ A customer uses Dhrystome as their benchmark, and Dhrystone has a 31 byte
+ string copy in it. */
+#define MAX_MOVE_BYTES 32
+
+/* Expand string/block move operations.
+
+ operands[0] is the pointer to the destination.
+ operands[1] is the pointer to the source.
+ operands[2] is the number of bytes to move.
+ operands[3] is the alignment. */
+
+void
+m32r_expand_block_move (operands)
+ rtx operands[];
+{
+ rtx orig_dst = operands[0];
+ rtx orig_src = operands[1];
+ rtx bytes_rtx = operands[2];
+ rtx align_rtx = operands[3];
+ int constp = GET_CODE (bytes_rtx) == CONST_INT;
+ HOST_WIDE_INT bytes = constp ? INTVAL (bytes_rtx) : 0;
+ int align = INTVAL (align_rtx);
+ int leftover;
+ rtx src_reg;
+ rtx dst_reg;
+
+ if (constp && bytes <= 0)
+ return;
+
+ /* Move the address into scratch registers. */
+ dst_reg = copy_addr_to_reg (XEXP (orig_dst, 0));
+ src_reg = copy_addr_to_reg (XEXP (orig_src, 0));
+
+ if (align > UNITS_PER_WORD)
+ align = UNITS_PER_WORD;
+
+ /* If we prefer size over speed, always use a function call.
+ If we do not know the size, use a function call.
+ If the blocks are not word aligned, use a function call. */
+ if (optimize_size || ! constp || align != UNITS_PER_WORD)
+ {
+ block_move_call (dst_reg, src_reg, bytes_rtx);
+ return;
+ }
+
+ leftover = bytes % MAX_MOVE_BYTES;
+ bytes -= leftover;
+
+ /* If necessary, generate a loop to handle the bulk of the copy. */
+ if (bytes)
+ {
+ rtx label;
+ rtx final_src;
+ rtx at_a_time = GEN_INT (MAX_MOVE_BYTES);
+ rtx rounded_total = GEN_INT (bytes);
+
+ /* If we are going to have to perform this loop more than
+ once, then generate a label and compute the address the
+ source register will contain upon completion of the final
+ itteration. */
+ if (bytes > MAX_MOVE_BYTES)
+ {
+ final_src = gen_reg_rtx (Pmode);
+
+ if (INT16_P(bytes))
+ emit_insn (gen_addsi3 (final_src, src_reg, rounded_total));
+ else
+ {
+ emit_insn (gen_movsi (final_src, rounded_total));
+ emit_insn (gen_addsi3 (final_src, final_src, src_reg));
+ }
+
+ label = gen_label_rtx ();
+ emit_label (label);
+ }
+
+ /* It is known that output_block_move() will update src_reg to point
+ to the word after the end of the source block, and dst_reg to point
+ to the last word of the destination block, provided that the block
+ is MAX_MOVE_BYTES long. */
+ emit_insn (gen_movstrsi_internal (dst_reg, src_reg, at_a_time));
+ emit_insn (gen_addsi3 (dst_reg, dst_reg, GEN_INT (4)));
+
+ if (bytes > MAX_MOVE_BYTES)
+ {
+ emit_insn (gen_cmpsi (src_reg, final_src));
+ emit_jump_insn (gen_bne (label));
+ }
+ }
+
+ if (leftover)
+ emit_insn (gen_movstrsi_internal (dst_reg, src_reg, GEN_INT (leftover)));
+}
+
+
+/* Emit load/stores for a small constant word aligned block_move.
+
+ operands[0] is the memory address of the destination.
+ operands[1] is the memory address of the source.
+ operands[2] is the number of bytes to move.
+ operands[3] is a temp register.
+ operands[4] is a temp register. */
+
+char *
+m32r_output_block_move (insn, operands)
+ rtx insn;
+ rtx operands[];
+{
+ HOST_WIDE_INT bytes = INTVAL (operands[2]);
+ int first_time;
+ int got_extra = 0;
+
+ if (bytes < 1 || bytes > MAX_MOVE_BYTES)
+ abort ();
+
+ /* We do not have a post-increment store available, so the first set of
+ stores are done without any increment, then the remaining ones can use
+ the pre-increment addressing mode.
+
+ Note: expand_block_move() also relies upon this behaviour when building
+ loops to copy large blocks. */
+ first_time = 1;
+
+ while (bytes > 0)
+ {
+ if (bytes >= 8)
+ {
+ if (first_time)
+ {
+ output_asm_insn ("ld\t%3, %p1", operands);
+ output_asm_insn ("ld\t%4, %p1", operands);
+ output_asm_insn ("st\t%3, @%0", operands);
+ output_asm_insn ("st\t%4, %s0", operands);
+ }
+ else
+ {
+ output_asm_insn ("ld\t%3, %p1", operands);
+ output_asm_insn ("ld\t%4, %p1", operands);
+ output_asm_insn ("st\t%3, %s0", operands);
+ output_asm_insn ("st\t%4, %s0", operands);
+ }
+
+ bytes -= 8;
+ }
+ else if (bytes >= 4)
+ {
+ if (bytes > 4)
+ got_extra = 1;
+
+ output_asm_insn ("ld\t%3, %p1", operands);
+
+ if (got_extra)
+ output_asm_insn ("ld\t%4, %p1", operands);
+
+ if (first_time)
+ output_asm_insn ("st\t%3, @%0", operands);
+ else
+ output_asm_insn ("st\t%3, %s0", operands);
+
+ bytes -= 4;
+ }
+ else
+ {
+ /* Get the entire next word, even though we do not want all of it.
+ The saves us from doing several smaller loads, and we assume that
+ we cannot cause a page fault when at least part of the word is in
+ valid memory [since we don't get called if things aren't properly
+ aligned]. */
+ int dst_offset = first_time ? 0 : 4;
+ int last_shift;
+ rtx my_operands[3];
+
+ /* If got_extra is true then we have already loaded
+ the next word as part of loading and storing the previous word. */
+ if (! got_extra)
+ output_asm_insn ("ld\t%4, @%1", operands);
+
+ if (bytes >= 2)
+ {
+ bytes -= 2;
+
+ output_asm_insn ("sra3\t%3, %4, #16", operands);
+ my_operands[0] = operands[3];
+ my_operands[1] = GEN_INT (dst_offset);
+ my_operands[2] = operands[0];
+ output_asm_insn ("sth\t%0, @(%1,%2)", my_operands);
+
+ /* If there is a byte left to store then increment the
+ destination address and shift the contents of the source
+ register down by 8 bits. We could not do the address
+ increment in the store half word instruction, because it does
+ not have an auto increment mode. */
+ if (bytes > 0) /* assert (bytes == 1) */
+ {
+ dst_offset += 2;
+ last_shift = 8;
+ }
+ }
+ else
+ last_shift = 24;
+
+ if (bytes > 0)
+ {
+ my_operands[0] = operands[4];
+ my_operands[1] = GEN_INT (last_shift);
+ output_asm_insn ("srai\t%0, #%1", my_operands);
+ my_operands[0] = operands[4];
+ my_operands[1] = GEN_INT (dst_offset);
+ my_operands[2] = operands[0];
+ output_asm_insn ("stb\t%0, @(%1,%2)", my_operands);
+ }
+
+ bytes = 0;
+ }
+
+ first_time = 0;
+ }
+
+ return "";
+}
+
+/* Return true if op is an integer constant, less than or equal to
+ MAX_MOVE_BYTES. */
+int
+m32r_block_immediate_operand (op, mode)
+ rtx op;
+ int mode;
+{
+ if (GET_CODE (op) != CONST_INT
+ || INTVAL (op) > MAX_MOVE_BYTES
+ || INTVAL (op) <= 0)
+ return 0;
+
+ return 1;
+}
+
diff --git a/gcc/config/m32r/m32r.h b/gcc/config/m32r/m32r.h
new file mode 100755
index 0000000..d6e5f4c
--- /dev/null
+++ b/gcc/config/m32r/m32r.h
@@ -0,0 +1,2408 @@
+/* CYGNUS LOCAL -- meissner/m32r work */
+/* Definitions of target machine for GNU compiler, Mitsubishi M32R cpu.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? Create elf.h and have svr4.h include it. */
+#include "svr4.h"
+
+#undef SWITCH_TAKES_ARG
+#undef WORD_SWITCH_TAKES_ARG
+#undef HANDLE_SYSV_PRAGMA
+#undef SIZE_TYPE
+#undef PTRDIFF_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+#undef ASM_FILE_START
+#undef ASM_OUTPUT_EXTERNAL_LIBCALL
+#undef TARGET_VERSION
+#undef CPP_SPEC
+#undef ASM_SPEC
+#undef LINK_SPEC
+#undef STARTFILE_SPEC
+#undef ENDFILE_SPEC
+#undef SUBTARGET_SWITCHES
+
+/* M32R/X overrides */
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION fprintf (stderr, " (m32r/x)");
+
+/* Additional flags for the preprocessor. */
+#define CPP_CPU_SPEC "%{m32rx:-D__M32RX__} %{m32r:-U__M32RX__}"
+
+/* Assembler switches */
+#define ASM_CPU_SPEC \
+"%{m32r} %{m32rx} %{!O0: %{O*: -O}} --no-warn-explicit-parallel-conflicts"
+
+/* Use m32rx specific crt0/crtinit/crtfini files */
+#define STARTFILE_CPU_SPEC "%{!shared:crt0.o%s} %{m32rx:m32rx/crtinit.o%s} %{!m32rx:crtinit.o%s}"
+#define ENDFILE_CPU_SPEC "-lgloss %{m32rx:m32rx/crtfini.o%s} %{!m32rx:crtfini.o%s}"
+
+/* Extra machine dependent switches */
+#define SUBTARGET_SWITCHES \
+ { "32rx", TARGET_M32RX_MASK, "Compile for the m32rx" }, \
+ { "32r", -TARGET_M32RX_MASK, "" },
+
+/* A C expression for the maximum number of instructions to execute via
+ conditional execution instructions instead of a branch. A value of
+ BRANCH_COST+1 is the default if the machine does not use cc0, and 1 if it
+ does use cc0. */
+#define MAX_CONDITIONAL_EXECUTE m32rx_cond_exec
+
+extern char * m32rx_cond_exec_string;
+extern int m32rx_cond_exec;
+
+/* m32rx specific switches that take values */
+#define SUBTARGET_OPTIONS ,{ "cond-exec=", & m32rx_cond_exec_string, \
+ "Maximum number of conditionally executed instructions" }
+
+/* Define this macro as a C expression for the initializer of an array of
+ strings to tell the driver program which options are defaults for this
+ target and thus do not need to be handled specially when using
+ `MULTILIB_OPTIONS'. */
+#define SUBTARGET_MULTILIB_DEFAULTS , "m32r"
+
+/* Number of additional registers the subtarget defines. */
+#define SUBTARGET_NUM_REGISTERS 1
+
+/* 1 for registers that cannot be allocated. */
+#define SUBTARGET_FIXED_REGISTERS , 1
+
+/* 1 for registers that are not available across function calls */
+#define SUBTARGET_CALL_USED_REGISTERS , 1
+
+/* Order to allocate model specific registers */
+#define SUBTARGET_REG_ALLOC_ORDER , 19
+
+/* Registers which are accumulators */
+#define SUBTARGET_REG_CLASS_ACCUM 0x80000
+
+/* All registers added */
+#define SUBTARGET_REG_CLASS_ALL SUBTARGET_REG_CLASS_ACCUM
+
+/* Additional accumulator registers */
+#define SUBTARGET_ACCUM_P(REGNO) ((REGNO) == 19)
+
+/* Define additional register names */
+#define SUBTARGET_REGISTER_NAMES , "a1"
+/* end M32R/X overrides */
+
+
+/* Print subsidiary information on the compiler version in use. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (m32r)")
+#endif
+
+/* Switch Recognition by gcc.c. Add -G xx support */
+
+#define SWITCH_TAKES_ARG(CHAR) \
+(DEFAULT_SWITCH_TAKES_ARG (CHAR) || (CHAR) == 'G')
+
+/* Names to predefine in the preprocessor for this target machine. */
+/* __M32R__ is defined by the existing compiler so we use that. */
+#define CPP_PREDEFINES "-Acpu(m32r) -Amachine(m32r) -D__M32R__"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#ifndef ASM_CPU_SPEC
+#define ASM_CPU_SPEC ""
+#endif
+
+#ifndef CPP_CPU_SPEC
+#define CPP_CPU_SPEC ""
+#endif
+
+#ifndef CC1_CPU_SPEC
+#define CC1_CPU_SPEC ""
+#endif
+
+#ifndef LINK_CPU_SPEC
+#define LINK_CPU_SPEC ""
+#endif
+
+#ifndef STARTFILE_CPU_SPEC
+#define STARTFILE_CPU_SPEC "%{!shared:crt0.o%s} crtinit.o%s"
+#endif
+
+#ifndef ENDFILE_CPU_SPEC
+#define ENDFILE_CPU_SPEC "-lgloss crtfini.o%s"
+#endif
+
+#ifndef RELAX_SPEC
+#if 0 /* not supported yet */
+#define RELAX_SPEC "%{mrelax:-relax}"
+#else
+#define RELAX_SPEC ""
+#endif
+#endif
+
+#define EXTRA_SPECS \
+ { "asm_cpu", ASM_CPU_SPEC }, \
+ { "cpp_cpu", CPP_CPU_SPEC }, \
+ { "cc1_cpu", CC1_CPU_SPEC }, \
+ { "link_cpu", LINK_CPU_SPEC }, \
+ { "startfile_cpu", STARTFILE_CPU_SPEC }, \
+ { "endfile_cpu", ENDFILE_CPU_SPEC }, \
+ { "relax", RELAX_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define CC1_SPEC "%{G*} %(cc1_cpu)"
+
+/* Options to pass on to the assembler. */
+#define ASM_SPEC "%{v} %(asm_cpu) %(relax)"
+#undef ASM_FINAL_SPEC
+
+#define LINK_SPEC "%{v} %(link_cpu) %(relax)"
+
+#define STARTFILE_SPEC "%(startfile_cpu)"
+#define ENDFILE_SPEC "%(endfile_cpu)"
+
+#undef LIB_SPEC
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+extern int target_flags;
+
+/* If non-zero, tell the linker to do relaxing.
+ We don't do anything with the option, other than recognize it.
+ LINK_SPEC handles passing -relax to the linker.
+ This can cause incorrect debugging information as line numbers may
+ turn out wrong. This shouldn't be specified unless accompanied with -O2
+ [where the user expects debugging information to be less accurate]. */
+#define TARGET_RELAX_MASK 1
+
+/* For miscellaneous debugging purposes. */
+#define TARGET_DEBUG_MASK (1 << 1)
+#define TARGET_DEBUG (target_flags & TARGET_DEBUG_MASK)
+
+/* Align loops to 32 byte boundaries (cache line size). */
+/* ??? This option is experimental and is not documented. */
+#define TARGET_ALIGN_LOOPS_MASK (1 << 2)
+#define TARGET_ALIGN_LOOPS (target_flags & TARGET_ALIGN_LOOPS_MASK)
+
+/* Change issue rate */
+#define TARGET_ISSUE_RATE_MASK (1 << 3)
+#define TARGET_ISSUE_RATE (target_flags & TARGET_ISSUE_RATE_MASK)
+
+/* Target machine to compile for. */
+#define TARGET_M32R 1
+
+/* Support extended instruction set. */
+#define TARGET_M32RX_MASK (1 << 4)
+#define TARGET_M32RX (target_flags & TARGET_M32RX_MASK)
+#undef TARGET_M32R
+#define TARGET_M32R (! TARGET_M32RX)
+
+/* Change branch cost */
+#define TARGET_BRANCH_COST_MASK (1 << 5)
+#define TARGET_BRANCH_COST (target_flags & TARGET_BRANCH_COST_MASK)
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#ifndef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES
+#endif
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+#define TARGET_SWITCHES \
+{ \
+/* { "relax", TARGET_RELAX_MASK }, \
+ { "no-relax", -TARGET_RELAX_MASK },*/ \
+ { "debug", TARGET_DEBUG_MASK, \
+ "Display compile time statistics" }, \
+ { "align-loops", TARGET_ALIGN_LOOPS_MASK, \
+ "Align all loops to 32 byte boundary" }, \
+ { "no-align-loops", -TARGET_ALIGN_LOOPS_MASK, "" }, \
+ { "issue-rate=1", TARGET_ISSUE_RATE_MASK, \
+ "Only issue one instruction per cycle" }, \
+ { "issue-rate=2", -TARGET_ISSUE_RATE_MASK, "" }, \
+ { "branch-cost=1", TARGET_BRANCH_COST_MASK, \
+ "Prefer branches over conditional execution" }, \
+ { "branch-cost=2", -TARGET_BRANCH_COST_MASK, "" }, \
+ SUBTARGET_SWITCHES \
+ { "", TARGET_DEFAULT } \
+}
+
+extern char * m32r_model_string;
+extern char * m32r_sdata_string;
+
+#define TARGET_OPTIONS \
+{ \
+ { "model=", & m32r_model_string, "Code size: small, medium or large" },\
+ { "sdata=", & m32r_sdata_string, "Small data area: none, sdata, use" } \
+ SUBTARGET_OPTIONS \
+}
+
+/* Code Models
+
+ Code models are used to select between two choices of two separate
+ possibilities (address space size, call insn to use):
+
+ small: addresses use 24 bits, use bl to make calls
+ medium: addresses use 32 bits, use bl to make calls (*1)
+ large: addresses use 32 bits, use seth/add3/jl to make calls (*2)
+
+ The fourth is "addresses use 24 bits, use seth/add3/jl to make calls" but
+ using this one doesn't make much sense.
+
+ (*1) The linker may eventually be able to relax seth/add3 -> ld24.
+ (*2) The linker may eventually be able to relax seth/add3/jl -> bl.
+
+ Internally these are recorded as TARGET_ADDR{24,32} and
+ TARGET_CALL{26,32}.
+
+ The __model__ attribute can be used to select the code model to use when
+ accessing particular objects. */
+
+enum m32r_model
+{
+ M32R_MODEL_SMALL,
+ M32R_MODEL_MEDIUM,
+ M32R_MODEL_LARGE
+};
+
+extern enum m32r_model m32r_model;
+#define TARGET_MODEL_SMALL (m32r_model == M32R_MODEL_SMALL)
+#define TARGET_MODEL_MEDIUM (m32r_model == M32R_MODEL_MEDIUM)
+#define TARGET_MODEL_LARGE (m32r_model == M32R_MODEL_LARGE)
+#define TARGET_ADDR24 (m32r_model == M32R_MODEL_SMALL)
+#define TARGET_ADDR32 (! TARGET_ADDR24)
+#define TARGET_CALL26 (! TARGET_CALL32)
+#define TARGET_CALL32 (m32r_model == M32R_MODEL_LARGE)
+
+/* The default is the small model. */
+#ifndef M32R_MODEL_DEFAULT
+#define M32R_MODEL_DEFAULT "small"
+#endif
+
+/* Small Data Area
+
+ The SDA consists of sections .sdata, .sbss, and .scommon.
+ .scommon isn't a real section, symbols in it have their section index
+ set to SHN_M32R_SCOMMON, though support for it exists in the linker script.
+
+ Two switches control the SDA:
+
+ -G NNN - specifies the maximum size of variable to go in the SDA
+
+ -msdata=foo - specifies how such variables are handled
+
+ -msdata=none - small data area is disabled
+
+ -msdata=sdata - small data goes in the SDA, special code isn't
+ generated to use it, and special relocs aren't
+ generated
+
+ -msdata=use - small data goes in the SDA, special code is generated
+ to use the SDA and special relocs are generated
+
+ The SDA is not multilib'd, it isn't necessary.
+ MULTILIB_EXTRA_OPTS is set in tmake_file to -msdata=sdata so multilib'd
+ libraries have small data in .sdata/SHN_M32R_SCOMMON so programs that use
+ -msdata=use will successfully link with them (references in header files
+ will cause the compiler to emit code that refers to library objects in
+ .data). ??? There can be a problem if the user passes a -G value greater
+ than the default and a library object in a header file is that size.
+ The default is 8 so this should be rare - if it occurs the user
+ is required to rebuild the libraries or use a smaller value for -G.
+*/
+
+/* Maximum size of variables that go in .sdata/.sbss.
+ The -msdata=foo switch also controls how small variables are handled. */
+#ifndef SDATA_DEFAULT_SIZE
+#define SDATA_DEFAULT_SIZE 8
+#endif
+
+extern int g_switch_value; /* value of the -G xx switch */
+extern int g_switch_set; /* whether -G xx was passed. */
+
+enum m32r_sdata
+{
+ M32R_SDATA_NONE,
+ M32R_SDATA_SDATA,
+ M32R_SDATA_USE
+};
+
+extern enum m32r_sdata m32r_sdata;
+#define TARGET_SDATA_NONE (m32r_sdata == M32R_SDATA_NONE)
+#define TARGET_SDATA_SDATA (m32r_sdata == M32R_SDATA_SDATA)
+#define TARGET_SDATA_USE (m32r_sdata == M32R_SDATA_USE)
+
+/* Default is to disable the SDA
+ [for upward compatibility with previous toolchains]. */
+#ifndef M32R_SDATA_DEFAULT
+#define M32R_SDATA_DEFAULT "none"
+#endif
+
+/* Define this macro as a C expression for the initializer of an array of
+ strings to tell the driver program which options are defaults for this
+ target and thus do not need to be handled specially when using
+ `MULTILIB_OPTIONS'. */
+#ifndef SUBTARGET_MULTILIB_DEFAULTS
+#define SUBTARGET_MULTILIB_DEFAULTS
+#endif
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mmodel=small" SUBTARGET_MULTILIB_DEFAULTS }
+#endif
+
+/* Sometimes certain combinations of command options do not make
+ sense on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ Don't use this macro to turn on various extra optimizations for
+ `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
+
+#ifndef SUBTARGET_OVERRIDE_OPTIONS
+#define SUBTARGET_OVERRIDE_OPTIONS
+#endif
+
+#define OVERRIDE_OPTIONS \
+do { \
+ /* These need to be done at start up. \
+ It's convenient to do them here. */ \
+ m32r_init (); \
+ SUBTARGET_OVERRIDE_OPTIONS \
+} while (0)
+
+/* Some machines may desire to change what optimizations are
+ performed for various optimization levels. This macro, if
+ defined, is executed once just after the optimization level is
+ determined and before the remainder of the command options have
+ been parsed. Values set in this macro are used as the default
+ values for the other command line options.
+
+ LEVEL is the optimization level specified; 2 if `-O2' is
+ specified, 1 if `-O' is specified, and 0 if neither is specified.
+
+ SIZE is non-zero if `-Os' is specified and zero otherwise.
+
+ You should not use this macro to change options that are not
+ machine-specific. These should uniformly selected by the same
+ optimization level on all supported machines. Use this macro to
+ enable machbine-specific optimizations.
+
+ *Do not examine `write_symbols' in this macro!* The debugging
+ options are not supposed to alter the generated code. */
+#ifndef SUBTARGET_OPTIMIZATION_OPTIONS
+#define SUBTARGET_OPTIMIZATION_OPTIONS
+#endif
+
+#define OPTIMIZATION_OPTIONS(LEVEL, SIZE) \
+do { \
+ if (LEVEL == 1) \
+ flag_regmove = TRUE; \
+ \
+ if (SIZE) \
+ { \
+ flag_omit_frame_pointer = TRUE; \
+ flag_strength_reduce = FALSE; \
+ } \
+ \
+ SUBTARGET_OPTIMIZATION_OPTIONS \
+} while (0)
+
+/* Define this macro if debugging can be performed even without a frame
+ pointer. If this macro is defined, GNU CC will turn on the
+ `-fomit-frame-pointer' option whenever `-O' is specified. This is disabled
+ because the debugger cannot find the start of a function when optimization
+ is specified. */
+/* #define CAN_DEBUG_WITHOUT_FP */
+
+
+/* Target machine storage layout. */
+
+/* Define to use software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN 1
+
+/* Define this macro if WORDS_BIG_ENDIAN is not constant. This must
+ be a constant value with the same meaning as WORDS_BIG_ENDIAN,
+ which will be used only when compiling libgcc2.c. Typically the
+ value will be set based on preprocessor defines. */
+/*#define LIBGCC2_WORDS_BIG_ENDIAN 1*/
+
+/* Number of bits in an addressable storage unit. */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 32
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+{ \
+ (MODE) = SImode; \
+}
+
+/* Define this macro if the promotion described by `PROMOTE_MODE'
+ should also be done for outgoing function arguments. */
+/*#define PROMOTE_FUNCTION_ARGS*/
+
+/* Likewise, if the function return value is promoted.
+ If defined, FUNCTION_VALUE must perform the same promotions done by
+ PROMOTE_MODE. */
+/*#define PROMOTE_FUNCTION_RETURN*/
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 32
+
+/* ALIGN FRAMES on word boundaries */
+#define M32R_STACK_ALIGN(LOC) (((LOC)+3) & ~3)
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* Layout of source language data types. */
+
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+#define SIZE_TYPE "long unsigned int"
+#define PTRDIFF_TYPE "long int"
+#define WCHAR_TYPE "short unsigned int"
+#define WCHAR_TYPE_SIZE 16
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+
+#define M32R_NUM_REGISTERS 19
+
+#ifndef SUBTARGET_NUM_REGISTERS
+#define SUBTARGET_NUM_REGISTERS 0
+#endif
+
+#define FIRST_PSEUDO_REGISTER (M32R_NUM_REGISTERS + SUBTARGET_NUM_REGISTERS)
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+
+ 0-3 - arguments/results
+ 4-5 - call used [4 is used as a tmp during prologue/epilogue generation]
+ 6 - call used, gptmp
+ 7 - call used, static chain pointer
+ 8-11 - call saved
+ 12 - call saved [reserved for global pointer]
+ 13 - frame pointer
+ 14 - subroutine link register
+ 15 - stack pointer
+ 16 - arg pointer
+ 17 - carry flag
+ 18 - accumulator
+ 19 - accumulator 1 in the m32r/x
+
+ By default, the extension registers are not available. */
+
+#ifndef SUBTARGET_FIXED_REGISTERS
+#define SUBTARGET_FIXED_REGISTERS
+#endif
+
+#define FIXED_REGISTERS \
+{ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 1, \
+ 1, 1, 1 \
+ SUBTARGET_FIXED_REGISTERS \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#ifndef SUBTARGET_CALL_USED_REGISTERS
+#define SUBTARGET_CALL_USED_REGISTERS
+#endif
+
+#define CALL_USED_REGISTERS \
+{ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 1, 1, \
+ 1, 1, 1 \
+ SUBTARGET_CALL_USED_REGISTERS \
+}
+
+/* Zero or more C statements that may conditionally modify two variables
+ `fixed_regs' and `call_used_regs' (both of type `char []') after they
+ have been initialized from the two preceding macros.
+
+ This is necessary in case the fixed or call-clobbered registers depend
+ on target flags.
+
+ You need not define this macro if it has no work to do. */
+
+#ifdef SUBTARGET_CONDITIONAL_REGISTER_USAGE
+#define CONDITIONAL_REGISTER_USAGE SUBTARGET_CONDITIONAL_REGISTER_USAGE
+#endif
+
+/* If defined, an initializer for a vector of integers, containing the
+ numbers of hard registers in the order in which GNU CC should
+ prefer to use them (from most preferred to least). */
+
+#ifndef SUBTARGET_REG_ALLOC_ORDER
+#define SUBTARGET_REG_ALLOC_ORDER
+#endif
+
+#if 1 /* better for int code */
+#define REG_ALLOC_ORDER \
+{ \
+ 4, 5, 6, 7, 2, 3, 8, 9, 10, \
+ 11, 12, 13, 14, 0, 1, 15, 16, 17, 18 \
+ SUBTARGET_REG_ALLOC_ORDER \
+}
+
+#else /* better for fp code at expense of int code */
+#define REG_ALLOC_ORDER \
+{ \
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, \
+ 9, 10, 11, 12, 13, 14, 15, 16, 17, 18 \
+ SUBTARGET_REG_ALLOC_ORDER \
+}
+#endif
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
+extern unsigned int m32r_hard_regno_mode_ok[];
+extern unsigned int m32r_mode_class[];
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+((m32r_hard_regno_mode_ok[REGNO] & m32r_mode_class[MODE]) != 0)
+
+/* A C expression that is nonzero if it is desirable to choose
+ register allocation so as to avoid move instructions between a
+ value of mode MODE1 and a value of mode MODE2.
+
+ If `HARD_REGNO_MODE_OK (R, MODE1)' and `HARD_REGNO_MODE_OK (R,
+ MODE2)' are ever different for any R, then `MODES_TIEABLE_P (MODE1,
+ MODE2)' must be zero. */
+
+/* Tie QI/HI/SI modes together. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+(GET_MODE_CLASS (MODE1) == MODE_INT \
+ && GET_MODE_CLASS (MODE2) == MODE_INT \
+ && GET_MODE_SIZE (MODE1) <= UNITS_PER_WORD \
+ && GET_MODE_SIZE (MODE2) <= UNITS_PER_WORD)
+
+/* Register classes and constants. */
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union.
+
+ It is important that any condition codes have class NO_REGS.
+ See `register_operand'. */
+
+enum reg_class
+{
+ NO_REGS,
+ CARRY_REG,
+ ACCUM_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "CARRY_REG", \
+ "ACCUM_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS" \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#ifndef SUBTARGET_REG_CLASS_CARRY
+#define SUBTARGET_REG_CLASS_CARRY 0
+#endif
+
+#ifndef SUBTARGET_REG_CLASS_ACCUM
+#define SUBTARGET_REG_CLASS_ACCUM 0
+#endif
+
+#ifndef SUBTARGET_REG_CLASS_GENERAL
+#define SUBTARGET_REG_CLASS_GENERAL 0
+#endif
+
+#ifndef SUBTARGET_REG_CLASS_ALL
+#define SUBTARGET_REG_CLASS_ALL 0
+#endif
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000 }, \
+ { 0x20000 | SUBTARGET_REG_CLASS_CARRY }, \
+ { 0x40000 | SUBTARGET_REG_CLASS_ACCUM }, \
+ { 0x1ffff | SUBTARGET_REG_CLASS_GENERAL }, \
+ { 0x7ffff | SUBTARGET_REG_CLASS_ALL }, \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+extern enum reg_class m32r_regno_reg_class[FIRST_PSEUDO_REGISTER];
+#define REGNO_REG_CLASS(REGNO) (m32r_regno_reg_class[REGNO])
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* A C expression which defines the machine-dependent operand constraint
+ letters for register classes. If CHAR is such a letter, the value should be
+ the register class corresponding to it. Otherwise, the value should be
+ `NO_REGS'. The register letter `r', corresponding to class `GENERAL_REGS',
+ will not be passed to this macro; you do not need to handle it.
+
+ The following letters are unavailable, due to being used as
+ constraints:
+ '0'..'9'
+ '<', '>'
+ 'E', 'F', 'G', 'H'
+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P'
+ 'Q', 'R', 'S', 'T', 'U'
+ 'V', 'X'
+ 'g', 'i', 'm', 'n', 'o', 'p', 'r', 's' */
+
+#define REG_CLASS_FROM_LETTER(C) \
+((C) == 'c' ? CARRY_REG \
+ : (C) == 'a' ? ACCUM_REGS \
+ : NO_REGS)
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? GPR_P (REGNO) || (REGNO) == ARG_POINTER_REGNUM \
+ : GPR_P (reg_renumber[REGNO]))
+#define REGNO_OK_FOR_INDEX_P(REGNO) REGNO_OK_FOR_BASE_P(REGNO)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+(CLASS)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* The letters I, J, K, L, M, N, O, P in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C. */
+/* 'I' is used for 8 bit signed immediates.
+ 'J' is used for 16 bit signed immediates.
+ 'K' is used for 16 bit unsigned immediates.
+ 'L' is used for 16 bit immediates left shifted by 16 (sign ???).
+ 'M' is used for 24 bit unsigned immediates.
+ 'N' is used for any 32 bit non-symbolic value.
+ 'O' is used for 5 bit unsigned immediates (shift count).
+ 'P' is used for 16 bit signed immediates for compares
+ (values in the range -32767 to +32768). */
+
+/* Return true if a value is inside a range */
+#define IN_RANGE_P(VALUE, LOW, HIGH) \
+ (((unsigned HOST_WIDE_INT)((VALUE) - (LOW))) \
+ <= ((unsigned HOST_WIDE_INT)((HIGH) - (LOW))))
+
+#define INT8_P(X) IN_RANGE_P (X, -128, 127)
+#define INT16_P(X) IN_RANGE_P (X, -32768, 32767)
+#define CMP_INT16_P(X) IN_RANGE_P (X, -32769, 32766)
+#define UINT16_P(X) IN_RANGE_P (X, 0, 65535)
+#define UPPER16_P(X) (((X) & ~0xffff0000) == 0)
+#define UINT24_P(X) IN_RANGE_P (X, 0, 0xffffff)
+#if HOST_BITS_PER_LONG > 32
+#define INT32_P(X) IN_RANGE_P (X, (-2147483647L-1), 2147483647L)
+#else
+#define INT32_P(X) 1
+#endif
+#define UINT5_P(X) IN_RANGE_P (X, 0, 31)
+#define INVERTED_SIGNED_8BIT(X) IN_RANGE_P (X, -127, 128)
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+((C) == 'I' ? INT8_P (VALUE) \
+ : (C) == 'J' ? INT16_P (VALUE) \
+ : (C) == 'K' ? UINT16_P (VALUE) \
+ : (C) == 'L' ? UPPER16_P (VALUE) \
+ : (C) == 'M' ? UINT24_P (VALUE) \
+ : (C) == 'N' ? INVERTED_SIGNED_8BIT (VALUE) \
+ : (C) == 'O' ? UINT5_P (VALUE) \
+ : (C) == 'P' ? CMP_INT16_P (VALUE) \
+ : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself.
+ For the m32r, handle a few constants inline.
+ ??? We needn't treat DI and DF modes differently, but for now we do. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+((C) == 'G' ? easy_di_const (VALUE) \
+ : (C) == 'H' ? easy_df_const (VALUE) \
+ : 0)
+
+/* A C expression that defines the optional machine-dependent constraint
+ letters that can be used to segregate specific types of operands,
+ usually memory references, for the target machine. It should return 1 if
+ VALUE corresponds to the operand type represented by the constraint letter
+ C. If C is not defined as an extra constraint, the value returned should
+ be 0 regardless of VALUE. */
+/* Q is for symbolic addresses loadable with ld24.
+ R is for symbolic addresses when ld24 can't be used.
+ S is for stores with pre {inc,dec}rement
+ T is for indirect of a pointer.
+ U is for loads with post increment. */
+
+#define EXTRA_CONSTRAINT(VALUE, C) \
+((C) == 'Q' ? ((TARGET_ADDR24 && GET_CODE (VALUE) == LABEL_REF) \
+ || addr24_operand (VALUE, VOIDmode)) \
+ : (C) == 'R' ? ((TARGET_ADDR32 && GET_CODE (VALUE) == LABEL_REF) \
+ || addr32_operand (VALUE, VOIDmode)) \
+ : (C) == 'S' ? (GET_CODE (VALUE) == MEM \
+ && STORE_PREINC_PREDEC_P (GET_MODE (VALUE), \
+ XEXP (VALUE, 0))) \
+ : (C) == 'T' ? (GET_CODE (VALUE) == MEM \
+ && memreg_operand (VALUE, GET_MODE (VALUE))) \
+ : (C) == 'U' ? (GET_CODE (VALUE) == MEM \
+ && LOAD_POSTINC_P (GET_MODE (VALUE), \
+ XEXP (VALUE, 0))) \
+ : 0)
+
+
+/* Stack layout and stack pointer usage. */
+
+/* Define this macro if pushing a word onto the stack moves the stack
+ pointer to a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset from the frame pointer. */
+/*#define FRAME_GROWS_DOWNWARD*/
+
+/* Offset from frame pointer to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+/* The frame pointer points at the same place as the stack pointer, except if
+ alloca has been called. */
+#define STARTING_FRAME_OFFSET \
+M32R_STACK_ALIGN (current_function_outgoing_args_size)
+
+/* Offset from the stack pointer register to the first location at which
+ outgoing arguments are placed. */
+#define STACK_POINTER_OFFSET 0
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* A C expression whose value is RTL representing the address in a
+ stack frame where the pointer to the caller's frame is stored.
+ Assume that FRAMEADDR is an RTL expression for the address of the
+ stack frame itself.
+
+ If you don't define this macro, the default is to return the value
+ of FRAMEADDR--that is, the stack frame address is also the address
+ of the stack word that points to the previous frame. */
+/*define DYNAMIC_CHAIN_ADDRESS (FRAMEADDR)*/
+
+/* A C expression whose value is RTL representing the value of the
+ return address for the frame COUNT steps up from the current frame.
+ FRAMEADDR is the frame pointer of the COUNT frame, or the frame
+ pointer of the COUNT - 1 frame if `RETURN_ADDR_IN_PREVIOUS_FRAME'
+ is defined. */
+/* The current return address is in r14. */
+#if 0 /* The default value should work. */
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+(((COUNT) == -1) \
+ ? gen_rtx (REG, Pmode, 14) \
+ : copy_to_reg (gen_rtx (MEM, Pmode, \
+ memory_address (Pmode, plus_constant ((FRAME), UNITS_PER_WORD)))))
+#endif
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 15
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 13
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 16
+
+/* The register number of the return address pointer register, which
+ is used to access the current function's return address from the
+ stack. On some machines, the return address is not at a fixed
+ offset from the frame pointer or stack pointer or argument
+ pointer. This register can be defined to point to the return
+ address on the stack, and then be converted by `ELIMINABLE_REGS'
+ into either the frame pointer or stack pointer.
+
+ Do not define this macro unless there is no other way to get the
+ return address from the stack. */
+/* ??? revisit */
+/* #define RETURN_ADDRESS_POINTER_REGNUM */
+
+/* Register in which static-chain is passed to a function. This must
+ not be a register used by the prologue. */
+#define STATIC_CHAIN_REGNUM 7
+
+/* These aren't official macros. */
+#define PROLOGUE_TMP_REGNUM 4
+#define RETURN_ADDR_REGNUM 14
+/* #define GP_REGNUM 12 */
+#define CARRY_REGNUM 17
+#define ACCUM_REGNUM 18
+#define M32R_MAX_INT_REGS 16
+
+#ifndef SUBTARGET_GPR_P
+#define SUBTARGET_GPR_P(REGNO) 0
+#endif
+
+#ifndef SUBTARGET_ACCUM_P
+#define SUBTARGET_ACCUM_P(REGNO) 0
+#endif
+
+#ifndef SUBTARGET_CARRY_P
+#define SUBTARGET_CARRY_P(REGNO) 0
+#endif
+
+#define GPR_P(REGNO) IN_RANGE_P ((REGNO), 0, 15) || SUBTARGET_GPR_P (REGNO)
+#define ACCUM_P(REGNO) ((REGNO) == ACCUM_REGNUM || SUBTARGET_ACCUM_P (REGNO))
+#define CARRY_P(REGNO) ((REGNO) == CARRY_REGNUM || SUBTARGET_CARRY_P (REGNO))
+
+
+/* Eliminating the frame and arg pointers. */
+
+/* A C expression which is nonzero if a function must have and use a
+ frame pointer. This expression is evaluated in the reload pass.
+ If its value is nonzero the function will have a frame pointer. */
+#define FRAME_POINTER_REQUIRED \
+(current_function_calls_alloca)
+
+#if 0
+/* C statement to store the difference between the frame pointer
+ and the stack pointer values immediately after the function prologue.
+ If `ELIMINABLE_REGS' is defined, this macro will be not be used and
+ need not be defined. */
+#define INITIAL_FRAME_POINTER_OFFSET(VAR) \
+((VAR) = m32r_compute_frame_size (get_frame_size ()))
+#endif
+
+/* If defined, this macro specifies a table of register pairs used to
+ eliminate unneeded registers that point into the stack frame. If
+ it is not defined, the only elimination attempted by the compiler
+ is to replace references to the frame pointer with references to
+ the stack pointer.
+
+ Note that the elimination of the argument pointer with the stack
+ pointer is specified first since that is the preferred elimination. */
+
+#define ELIMINABLE_REGS \
+{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }} \
+
+/* A C expression that returns non-zero if the compiler is allowed to
+ try to replace register number FROM-REG with register number
+ TO-REG. This macro need only be defined if `ELIMINABLE_REGS' is
+ defined, and will usually be the constant 1, since most of the
+ cases preventing register elimination are things that the compiler
+ already knows about. */
+
+#define CAN_ELIMINATE(FROM, TO) \
+((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM \
+ ? ! frame_pointer_needed \
+ : 1)
+
+/* This macro is similar to `INITIAL_FRAME_POINTER_OFFSET'. It
+ specifies the initial difference between the specified pair of
+ registers. This macro must be defined if `ELIMINABLE_REGS' is
+ defined. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ int size = m32r_compute_frame_size (get_frame_size ()); \
+ \
+ if ((FROM) == FRAME_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = 0; \
+ else if ((FROM) == ARG_POINTER_REGNUM && (TO) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = size - current_function_pretend_args_size; \
+ else if ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = size - current_function_pretend_args_size; \
+ else \
+ abort (); \
+}
+
+/* Function argument passing. */
+
+/* When a prototype says `char' or `short', really pass an `int'. */
+#define PROMOTE_PROTOTYPES
+
+/* If defined, the maximum amount of space required for outgoing
+ arguments will be computed and placed into the variable
+ `current_function_outgoing_args_size'. No space will be pushed
+ onto the stack for each call; instead, the function prologue should
+ increase the stack frame size by this amount. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Define this macro if functions should assume that stack space has
+ been allocated for arguments even when their values are passed in
+ registers.
+
+ The value of this macro is the size, in bytes, of the area
+ reserved for arguments passed in registers for the function
+ represented by FNDECL.
+
+ This space can be allocated by the caller, or be a part of the
+ machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says
+ which. */
+#if 0
+#define REG_PARM_STACK_SPACE(FNDECL) \
+(M32R_MAX_PARM_REGS * UNITS_PER_WORD)
+#endif
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+#define RETURN_POPS_ARGS(DECL, FUNTYPE, SIZE) 0
+
+/* Nonzero if we do not know how to pass TYPE solely in registers. */
+#define MUST_PASS_IN_STACK(MODE,TYPE) \
+ ((TYPE) != 0 \
+ && (TREE_CODE (TYPE_SIZE (TYPE)) != INTEGER_CST \
+ || TREE_ADDRESSABLE (TYPE)))
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go. */
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT) \
+((CUM) = 0)
+
+/* The number of registers used for parameter passing. Local to this file. */
+#define M32R_MAX_PARM_REGS 4
+
+/* 1 if N is a possible register number for function argument passing. */
+#define FUNCTION_ARG_REGNO_P(N) \
+((unsigned) (N) < M32R_MAX_PARM_REGS)
+
+/* The ROUND_ADVANCE* macros are local to this file. */
+/* Round SIZE up to a word boundary. */
+#define ROUND_ADVANCE(SIZE) \
+(((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Round arg MODE/TYPE up to the next word boundary. */
+#define ROUND_ADVANCE_ARG(MODE, TYPE) \
+((MODE) == BLKmode \
+ ? ROUND_ADVANCE (int_size_in_bytes (TYPE)) \
+ : ROUND_ADVANCE (GET_MODE_SIZE (MODE)))
+
+/* Round CUM up to the necessary point for argument MODE/TYPE. */
+#if 0
+#define ROUND_ADVANCE_CUM(CUM, MODE, TYPE) \
+((((MODE) == BLKmode ? TYPE_ALIGN (TYPE) : GET_MODE_BITSIZE (MODE)) \
+ > BITS_PER_WORD) \
+ ? ((CUM) + 1 & ~1) \
+ : (CUM))
+#else
+#define ROUND_ADVANCE_CUM(CUM, MODE, TYPE) (CUM)
+#endif
+
+/* Return boolean indicating arg of type TYPE and mode MODE will be passed in
+ a reg. This includes arguments that have to be passed by reference as the
+ pointer to them is passed in a reg if one is available (and that is what
+ we're given).
+ This macro is only used in this file. */
+#define PASS_IN_REG_P(CUM, MODE, TYPE, NAMED) \
+(ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE)) < M32R_MAX_PARM_REGS)
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+/* On the M32R the first M32R_MAX_PARM_REGS args are normally in registers
+ and the rest are pushed. */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+(PASS_IN_REG_P ((CUM), (MODE), (TYPE), (NAMED)) \
+ ? gen_rtx (REG, (MODE), ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE))) \
+ : 0)
+
+/* ??? Quick hack to try to get varargs working the normal way. */
+#define FUNCTION_INCOMING_ARG(CUM, MODE, TYPE, NAMED) \
+(((! current_function_varargs || (NAMED)) \
+ && PASS_IN_REG_P ((CUM), (MODE), (TYPE), (NAMED))) \
+ ? gen_rtx (REG, (MODE), ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE))) \
+ : 0)
+
+/* A C expression for the number of words, at the beginning of an
+ argument, must be put in registers. The value must be zero for
+ arguments that are passed entirely in registers or that are entirely
+ pushed on the stack.
+
+ On some machines, certain arguments must be passed partially in
+ registers and partially in memory. On these machines, typically the
+ first @var{n} words of arguments are passed in registers, and the rest
+ on the stack. If a multi-word argument (a @code{double} or a
+ structure) crosses that boundary, its first few words must be passed
+ in registers and the rest must be pushed. This macro tells the
+ compiler when this occurs, and how many of the words should go in
+ registers. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ function_arg_partial_nregs (&CUM, (int)MODE, TYPE, NAMED)
+
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type. */
+/* All arguments greater than 8 bytes are passed this way. */
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
+((TYPE) && int_size_in_bytes (TYPE) > 8)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+((CUM) = (ROUND_ADVANCE_CUM ((CUM), (MODE), (TYPE)) \
+ + ROUND_ADVANCE_ARG ((MODE), (TYPE))))
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments. */
+#if 0
+/* We assume PARM_BOUNDARY == UNITS_PER_WORD here. */
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+(((TYPE) ? TYPE_ALIGN (TYPE) : GET_MODE_BITSIZE (MODE)) <= PARM_BOUNDARY \
+ ? PARM_BOUNDARY \
+ : 2 * PARM_BOUNDARY)
+#endif
+
+#if 0
+/* If defined, is a C expression that produces the machine-specific
+ code for a call to `__builtin_saveregs'. This code will be moved
+ to the very beginning of the function, before any parameter access
+ are made. The return value of this function should be an RTX that
+ contains the value to use as the return of `__builtin_saveregs'.
+
+ The argument ARGS is a `tree_list' containing the arguments that
+ were passed to `__builtin_saveregs'.
+
+ If this macro is not defined, the compiler will output an ordinary
+ call to the library function `__builtin_saveregs'. */
+extern struct rtx *m32r_expand_builtin_savergs ();
+#define EXPAND_BUILTIN_SAVEREGS(ARGS) m32r_expand_builtin_saveregs (ARGS)
+#endif
+
+/* This macro offers an alternative
+ to using `__builtin_saveregs' and defining the macro
+ `EXPAND_BUILTIN_SAVEREGS'. Use it to store the anonymous register
+ arguments into the stack so that all the arguments appear to have
+ been passed consecutively on the stack. Once this is done, you
+ can use the standard implementation of varargs that works for
+ machines that pass all their arguments on the stack.
+
+ The argument ARGS_SO_FAR is the `CUMULATIVE_ARGS' data structure,
+ containing the values that obtain after processing of the named
+ arguments. The arguments MODE and TYPE describe the last named
+ argument--its machine mode and its data type as a tree node.
+
+ The macro implementation should do two things: first, push onto the
+ stack all the argument registers *not* used for the named
+ arguments, and second, store the size of the data thus pushed into
+ the `int'-valued variable whose name is supplied as the argument
+ PRETEND_SIZE. The value that you store here will serve as
+ additional offset for setting up the stack frame.
+
+ If the argument NO_RTL is nonzero, it means that the
+ arguments of the function are being analyzed for the second time.
+ This happens for an inline function, which is not actually
+ compiled until the end of the source file. The macro
+ `SETUP_INCOMING_VARARGS' should not generate any instructions in
+ this case. */
+
+#define SETUP_INCOMING_VARARGS(ARGS_SO_FAR, MODE, TYPE, PRETEND_SIZE, NO_RTL) \
+m32r_setup_incoming_varargs (&ARGS_SO_FAR, MODE, TYPE, &PRETEND_SIZE, NO_RTL)
+
+/* Function results. */
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, 0)
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller. */
+/* ??? What about r1 in DI/DF values. */
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+
+/* A C expression which can inhibit the returning of certain function
+ values in registers, based on the type of value. A nonzero value says
+ to return the function value in memory, just as large structures are
+ always returned. Here TYPE will be a C expression of type `tree',
+ representing the data type of the value. */
+#define RETURN_IN_MEMORY(TYPE) \
+(int_size_in_bytes (TYPE) > 8)
+
+/* Tell GCC to use RETURN_IN_MEMORY. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Register in which address to store a structure value
+ is passed to a function, or 0 to use `invisible' first argument. */
+#define STRUCT_VALUE 0
+
+/* Function entry and exit. */
+
+/* Initialize data used by insn expanders. This is called from
+ init_emit, once for each function, before code is generated. */
+#define INIT_EXPANDERS m32r_init_expanders ()
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+#define FUNCTION_PROLOGUE(FILE, SIZE) \
+m32r_output_function_prologue (FILE, SIZE)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+#define EXIT_IGNORE_STACK 1
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE.
+
+ The function epilogue should not depend on the current stack pointer!
+ It should use the frame pointer only. This is mandatory because
+ of alloca; we also take advantage of it to omit stack adjustments
+ before returning. */
+#define FUNCTION_EPILOGUE(FILE, SIZE) \
+m32r_output_function_epilogue (FILE, SIZE)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+#define FUNCTION_PROFILER(FILE, LABELNO) abort ()
+
+/* Trampolines. */
+
+/* On the M32R, the trampoline is
+
+ ld24 r7,STATIC
+ ld24 r6,FUNCTION
+ jmp r6
+ nop
+
+ ??? Need addr32 support.
+*/
+
+/* Length in bytes of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 12
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+do { \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 0)), \
+ plus_constant ((CXT), 0xe7000000)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 4)), \
+ plus_constant ((FNADDR), 0xe6000000)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 8)), \
+ GEN_INT (0x1fc67000)); \
+ emit_insn (gen_flush_icache (validize_mem (gen_rtx (MEM, SImode, \
+ TRAMP)))); \
+} while (0)
+
+/* Library calls. */
+
+/* Generate calls to memcpy, memcmp and memset. */
+#define TARGET_MEM_FUNCTIONS
+
+/* Addressing modes, and classification of registers for them. */
+
+/* Maximum number of registers that can appear in a valid memory address. */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* We have post-inc load and pre-dec,pre-inc store,
+ but only for 4 byte vals. */
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_INCREMENT 1
+
+/* Recognize any constant value that is a valid address. */
+#define CONSTANT_ADDRESS_P(X) \
+(GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ We don't allow (plus symbol large-constant) as the relocations can't
+ describe it. INTVAL > 32767 handles both 16 bit and 24 bit relocations.
+ We allow all CONST_DOUBLE's as the md file patterns will force the
+ constant to memory if they can't handle them. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+(! (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (X, 0), 1)) > 32767))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifdef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) GPR_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_BASE_P (X)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+(GPR_P (REGNO (X)) \
+ || (REGNO (X)) == ARG_POINTER_REGNUM \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_BASE_P (X)
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address. */
+
+/* local to this file */
+#define RTX_OK_FOR_BASE_P(X) (REG_P (X) && REG_OK_FOR_BASE_P (X))
+
+/* local to this file */
+#define RTX_OK_FOR_OFFSET_P(X) \
+(GET_CODE (X) == CONST_INT && INT16_P (INTVAL (X)))
+
+/* local to this file */
+#define LEGITIMATE_OFFSET_ADDRESS_P(MODE, X) \
+(GET_CODE (X) == PLUS \
+ && RTX_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && RTX_OK_FOR_OFFSET_P (XEXP (X, 1)))
+
+/* local to this file */
+/* For LO_SUM addresses, do not allow them if the MODE is > 1 word,
+ since more than one instruction will be required. */
+#define LEGITIMATE_LO_SUM_ADDRESS_P(MODE, X) \
+(GET_CODE (X) == LO_SUM \
+ && (MODE != BLKmode && GET_MODE_SIZE (MODE) <= UNITS_PER_WORD) \
+ && RTX_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && CONSTANT_P (XEXP (X, 1)))
+
+/* local to this file */
+/* Is this a load and increment operation. */
+#define LOAD_POSTINC_P(MODE, X) \
+(((MODE) == SImode || (MODE) == SFmode) \
+ && GET_CODE (X) == POST_INC \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && RTX_OK_FOR_BASE_P (XEXP (X, 0)))
+
+/* local to this file */
+/* Is this a increment/decrement and store operation. */
+#define STORE_PREINC_PREDEC_P(MODE, X) \
+(((MODE) == SImode || (MODE) == SFmode) \
+ && (GET_CODE (X) == PRE_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && RTX_OK_FOR_BASE_P (XEXP (X, 0)))
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ if (RTX_OK_FOR_BASE_P (X)) \
+ goto ADDR; \
+ if (LEGITIMATE_OFFSET_ADDRESS_P ((MODE), (X))) \
+ goto ADDR; \
+ if (LEGITIMATE_LO_SUM_ADDRESS_P ((MODE), (X))) \
+ goto ADDR; \
+ if (LOAD_POSTINC_P ((MODE), (X))) \
+ goto ADDR; \
+ if (STORE_PREINC_PREDEC_P ((MODE), (X))) \
+ goto ADDR; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ ??? Is there anything useful we can do here for the M32R? */
+
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN)
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
+do { \
+ if (GET_CODE (ADDR) == PRE_DEC \
+ || GET_CODE (ADDR) == PRE_INC \
+ || GET_CODE (ADDR) == POST_INC \
+ || GET_CODE (ADDR) == LO_SUM) \
+ goto LABEL; \
+} while (0)
+
+/* Condition code usage. */
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. */
+#define SELECT_CC_MODE(OP, X, Y) \
+((enum machine_mode)m32r_select_cc_mode ((int)OP, X, Y))
+
+/* Return non-zero if SELECT_CC_MODE will never return MODE for a
+ floating point inequality comparison. */
+#define REVERSIBLE_CC_MODE(MODE) 1 /*???*/
+
+/* Costs. */
+
+/* ??? I'm quite sure I don't understand enough of the subtleties involved
+ in choosing the right numbers to use here, but there doesn't seem to be
+ enough documentation on this. What I've done is define an insn to cost
+ 4 "units" and work from there. COSTS_N_INSNS (N) is defined as (N) * 4 - 2
+ so that seems reasonable. Some values are supposed to be defined relative
+ to each other and thus aren't necessarily related to COSTS_N_INSNS. */
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+/* Small integers are as cheap as registers. 4 byte values can be fetched
+ as immediate constants - let's give that the cost of an extra insn. */
+#define CONST_COSTS(X, CODE, OUTER_CODE) \
+ case CONST_INT : \
+ if (INT16_P (INTVAL (X))) \
+ return 0; \
+ /* fall through */ \
+ case CONST : \
+ case LABEL_REF : \
+ case SYMBOL_REF : \
+ return 4; \
+ case CONST_DOUBLE : \
+ { \
+ rtx high, low; \
+ split_double (X, &high, &low); \
+ return 4 * (!INT16_P (INTVAL (high)) \
+ + !INT16_P (INTVAL (low))); \
+ }
+
+/* Compute the cost of an address. */
+#define ADDRESS_COST(ADDR) m32r_address_cost (ADDR)
+
+/* Compute extra cost of moving data between one register class
+ and another. */
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) 2
+
+/* Compute the cost of moving data between registers and memory. */
+/* Memory is 3 times as expensive as registers.
+ ??? Is that the right way to look at it? */
+#define MEMORY_MOVE_COST(MODE,CLASS,IN_P) \
+(GET_MODE_SIZE (MODE) <= UNITS_PER_WORD ? 6 : 12)
+
+/* The cost of a branch insn. */
+/* A value of 2 here causes GCC to avoid using branches in comparisons like
+ while (a < N && a). Branches aren't that expensive on the M32R so
+ we define this as 1. Defining it as 2 had a heavy hit in fp-bit.c. */
+#define BRANCH_COST ((TARGET_BRANCH_COST) ? 2 : 1)
+
+/* Provide the costs of a rtl expression. This is in the body of a
+ switch on CODE. The purpose for the cost of MULT is to encourage
+ `synth_mult' to find a synthetic multiply when reasonable.
+
+ If we need more than 12 insns to do a multiply, then go out-of-line,
+ since the call overhead will be < 10% of the cost of the multiply. */
+#define RTX_COSTS(X, CODE, OUTER_CODE) \
+ case MULT : \
+ return COSTS_N_INSNS (3); \
+ case DIV : \
+ case UDIV : \
+ case MOD : \
+ case UMOD : \
+ return COSTS_N_INSNS (10);
+
+/* Nonzero if access to memory by bytes is slow and undesirable.
+ For RISC chips, it means that access to memory by bytes is no
+ better than access by words when possible, so grab a whole word
+ and maybe make use of that. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register. */
+#define NO_FUNCTION_CSE
+
+/* Define this macro if it is as good or better for a function to call
+ itself with an explicit address than to call an address kept in a
+ register. */
+#define NO_RECURSIVE_FUNCTION_CSE
+
+/* A C statement (sans semicolon) to update the integer variable COST based on
+ the relationship between INSN that is dependent on DEP_INSN through the
+ dependence LINK. The default is to make no adjustment to COST. This can be
+ used for example to specify to the scheduler that an output- or
+ anti-dependence does not incur the same cost as a data-dependence. */
+
+#define ADJUST_COST(INSN,LINK,DEP_INSN,COST) \
+ (COST) = m32r_adjust_cost (INSN, LINK, DEP_INSN, COST)
+
+/* A C statement (sans semicolon) to update the integer scheduling
+ priority `INSN_PRIORITY(INSN)'. Reduce the priority to execute
+ the INSN earlier, increase the priority to execute INSN later.
+ Do not define this macro if you do not need to adjust the
+ scheduling priorities of insns. */
+#define ADJUST_PRIORITY(INSN) \
+ INSN_PRIORITY (INSN) = m32r_adjust_priority (INSN, INSN_PRIORITY (INSN))
+
+/* Macro to determine whether the Haifa scheduler is used. */
+#ifdef HAIFA
+#define HAIFA_P 1
+#else
+#define HAIFA_P 0
+#endif
+
+/* Indicate how many instructions can be issued at the same time.
+ This is sort of a lie. The m32r can issue only 1 long insn at
+ once, but it can issue 2 short insns. The default therefore is
+ set at 2, but this can be overridden by the command line option
+ -missue-rate=1 */
+#define ISSUE_RATE ((TARGET_ISSUE_RATE) ? 1 : 2)
+
+/* If we have a machine that can issue a variable # of instructions
+ per cycle, indicate how many more instructions can be issued
+ after the current one. */
+#define MD_SCHED_VARIABLE_ISSUE(STREAM, VERBOSE, INSN, HOW_MANY) \
+(HOW_MANY) = m32r_sched_variable_issue (STREAM, VERBOSE, INSN, HOW_MANY)
+
+/* Whether we are on an odd word boundary while scheduling. */
+extern int m32r_sched_odd_word_p;
+
+/* Hook to run before scheduling a block of insns. */
+#define MD_SCHED_INIT(STREAM, VERBOSE) m32r_sched_init (STREAM, VERBOSE)
+
+/* Hook to reorder the list of ready instructions. */
+#define MD_SCHED_REORDER(STREAM, VERBOSE, READY, N_READY) \
+m32r_sched_reorder (STREAM, VERBOSE, READY, N_READY)
+
+/* When the `length' insn attribute is used, this macro specifies the
+ value to be assigned to the address of the first insn in a
+ function. If not specified, 0 is used. */
+#define FIRST_INSN_ADDRESS m32r_first_insn_address ()
+
+
+/* Section selection. */
+
+#define TEXT_SECTION_ASM_OP "\t.section .text"
+#define DATA_SECTION_ASM_OP "\t.section .data"
+#define RODATA_SECTION_ASM_OP "\t.section .rodata"
+#define BSS_SECTION_ASM_OP "\t.section .bss"
+#define SDATA_SECTION_ASM_OP "\t.section .sdata"
+#define SBSS_SECTION_ASM_OP "\t.section .sbss"
+/* This one is for svr4.h. */
+#undef CONST_SECTION_ASM_OP
+#define CONST_SECTION_ASM_OP "\t.section .rodata"
+
+/* A list of names for sections other than the standard two, which are
+ `in_text' and `in_data'. You need not define this macro
+ on a system with no other sections (that GCC needs to use). */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_sdata, in_sbss, in_const, in_ctors, in_dtors
+
+/* One or more functions to be defined in "varasm.c". These
+ functions should do jobs analogous to those of `text_section' and
+ `data_section', for your additional sections. Do not define this
+ macro if you do not define `EXTRA_SECTIONS'. */
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+CONST_SECTION_FUNCTION \
+CTORS_SECTION_FUNCTION \
+DTORS_SECTION_FUNCTION \
+SDATA_SECTION_FUNCTION \
+SBSS_SECTION_FUNCTION
+
+#define SDATA_SECTION_FUNCTION \
+void \
+sdata_section () \
+{ \
+ if (in_section != in_sdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", SDATA_SECTION_ASM_OP); \
+ in_section = in_sdata; \
+ } \
+} \
+
+#define SBSS_SECTION_FUNCTION \
+void \
+sbss_section () \
+{ \
+ if (in_section != in_sbss) \
+ { \
+ fprintf (asm_out_file, "%s\n", SBSS_SECTION_ASM_OP); \
+ in_section = in_sbss; \
+ } \
+} \
+
+/* A C statement or statements to switch to the appropriate section for
+ output of EXP. You can assume that EXP is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether the initial value
+ of EXP requires link-time relocations. */
+extern void m32r_select_section ();
+#undef SELECT_SECTION
+#define SELECT_SECTION(EXP, RELOC) m32r_select_section ((EXP), (RELOC))
+
+/* A C statement or statements to switch to the appropriate section for
+ output of RTX in mode MODE. You can assume that RTX
+ is some kind of constant in RTL. The argument MODE is redundant
+ except in the case of a `const_int' rtx. Select the section by
+ calling `text_section' or one of the alternatives for other
+ sections.
+
+ Do not define this macro if you put all constants in the read-only
+ data section. */
+
+#undef SELECT_RTX_SECTION
+
+/* Define this macro if jump tables (for tablejump insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used.
+ This macro is irrelevant if there is no separate readonly data section. */
+/*#define JUMP_TABLES_IN_TEXT_SECTION*/
+
+/* Define this macro if references to a symbol must be treated
+ differently depending on something about the variable or
+ function named by the symbol (such as what section it is in).
+
+ The macro definition, if any, is executed immediately after the
+ rtl for DECL or other node is created.
+ The value of the rtl will be a `mem' whose address is a
+ `symbol_ref'.
+
+ The usual thing for this macro to do is to store a flag in the
+ `symbol_ref' (such as `SYMBOL_REF_FLAG') or to store a modified
+ name string in the `symbol_ref' (if one bit is not enough
+ information). */
+
+#define SDATA_FLAG_CHAR '@'
+/* Small objects are recorded with no prefix for space efficiency since
+ they'll be the most common. This isn't the case if the user passes
+ -mmodel={medium|large} and one could choose to not mark symbols that
+ are the default, but that complicates things. */
+/*#define SMALL_FLAG_CHAR '#'*/
+#define MEDIUM_FLAG_CHAR '%'
+#define LARGE_FLAG_CHAR '&'
+
+#define SDATA_NAME_P(NAME) (*(NAME) == SDATA_FLAG_CHAR)
+/*#define SMALL_NAME_P(NAME) (*(NAME) == SMALL_FLAG_CHAR)*/
+#define SMALL_NAME_P(NAME) (! ENCODED_NAME_P (NAME) && ! LIT_NAME_P (NAME))
+#define MEDIUM_NAME_P(NAME) (*(NAME) == MEDIUM_FLAG_CHAR)
+#define LARGE_NAME_P(NAME) (*(NAME) == LARGE_FLAG_CHAR)
+/* For string literals, etc. */
+#define LIT_NAME_P(NAME) ((NAME)[0] == '*' && (NAME)[1] == '.')
+
+#define ENCODED_NAME_P(SYMBOL_NAME) \
+(SDATA_NAME_P (SYMBOL_NAME) \
+ /*|| SMALL_NAME_P (SYMBOL_NAME)*/ \
+ || MEDIUM_NAME_P (SYMBOL_NAME) \
+ || LARGE_NAME_P (SYMBOL_NAME))
+
+#define ENCODE_SECTION_INFO(DECL) m32r_encode_section_info (DECL)
+
+/* Decode SYM_NAME and store the real name part in VAR, sans
+ the characters that encode section info. Define this macro if
+ ENCODE_SECTION_INFO alters the symbol's name string. */
+/* Note that we have to handle symbols like "%*start". */
+#define STRIP_NAME_ENCODING(VAR, SYMBOL_NAME) \
+do { \
+ (VAR) = (SYMBOL_NAME) + ENCODED_NAME_P (SYMBOL_NAME); \
+ (VAR) += *(VAR) == '*'; \
+} while (0)
+
+/* PIC */
+
+/* The register number of the register used to address a table of static
+ data addresses in memory. In some cases this register is defined by a
+ processor's ``application binary interface'' (ABI). When this macro
+ is defined, RTL is generated for this register once, as with the stack
+ pointer and frame pointer registers. If this macro is not defined, it
+ is up to the machine-dependent files to allocate such a register (if
+ necessary). */
+/*#define PIC_OFFSET_TABLE_REGNUM 12*/
+
+/* Define this macro if the register defined by PIC_OFFSET_TABLE_REGNUM is
+ clobbered by calls. Do not define this macro if PIC_OFFSET_TABLE_REGNUM
+ is not defined. */
+/* This register is call-saved on the M32R. */
+/*#define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED*/
+
+/* By generating position-independent code, when two different programs (A
+ and B) share a common library (libC.a), the text of the library can be
+ shared whether or not the library is linked at the same address for both
+ programs. In some of these environments, position-independent code
+ requires not only the use of different addressing modes, but also
+ special code to enable the use of these addressing modes.
+
+ The FINALIZE_PIC macro serves as a hook to emit these special
+ codes once the function is being compiled into assembly code, but not
+ before. (It is not done before, because in the case of compiling an
+ inline function, it would lead to multiple PIC prologues being
+ included in functions which used inline functions and were compiled to
+ assembly language.) */
+
+/*#define FINALIZE_PIC m32r_finalize_pic ()*/
+
+/* A C expression that is nonzero if X is a legitimate immediate
+ operand on the target machine when generating position independent code.
+ You can assume that X satisfies CONSTANT_P, so you need not
+ check this. You can also assume `flag_pic' is true, so you need not
+ check it either. You need not define this macro if all constants
+ (including SYMBOL_REF) can be immediate operands when generating
+ position independent code. */
+/*#define LEGITIMATE_PIC_OPERAND_P(X)*/
+
+/* Control the assembler format that we output. */
+
+/* Output at beginning of assembler file. */
+#define ASM_FILE_START(FILE) m32r_asm_file_start (FILE)
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will
+ end at the end of the line. */
+#define ASM_COMMENT_START ";"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+#define ASM_APP_OFF ""
+
+/* This is how to output an assembler line defining a `char' constant. */
+#define ASM_OUTPUT_CHAR(FILE, VALUE) \
+do { \
+ fprintf (FILE, "\t.byte\t"); \
+ output_addr_const (FILE, (VALUE)); \
+ fprintf (FILE, "\n"); \
+} while (0)
+
+/* This is how to output an assembler line defining a `short' constant. */
+#define ASM_OUTPUT_SHORT(FILE, VALUE) \
+do { \
+ fprintf (FILE, "\t.hword\t"); \
+ output_addr_const (FILE, (VALUE)); \
+ fprintf (FILE, "\n"); \
+} while (0)
+
+/* This is how to output an assembler line defining an `int' constant.
+ We also handle symbol output here. */
+#define ASM_OUTPUT_INT(FILE, VALUE) \
+do { \
+ fprintf (FILE, "\t.word\t"); \
+ output_addr_const (FILE, (VALUE)); \
+ fprintf (FILE, "\n"); \
+} while (0)
+
+/* This is how to output an assembler line defining a `float' constant. */
+#define ASM_OUTPUT_FLOAT(FILE, VALUE) \
+do { \
+ long t; \
+ char str[30]; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), t); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t.word\t0x%lx %s %s\n", \
+ t, ASM_COMMENT_START, str); \
+} while (0)
+
+/* This is how to output an assembler line defining a `double' constant. */
+#define ASM_OUTPUT_DOUBLE(FILE, VALUE) \
+do { \
+ long t[2]; \
+ char str[30]; \
+ REAL_VALUE_TO_TARGET_DOUBLE ((VALUE), t); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t.word\t0x%lx %s %s\n\t.word\t0x%lx\n", \
+ t[0], ASM_COMMENT_START, str, t[1]); \
+} while (0)
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(FILE, VALUE) \
+ fprintf (FILE, "\t%s\t0x%x\n", ASM_BYTE_OP, (VALUE))
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+/* On the M32R we need to ensure the next instruction starts on a 32 bit
+ boundary [the previous insn must either be 2 16 bit insns or 1 32 bit]. */
+#define ASM_OUTPUT_LABEL(FILE, NAME) \
+do { \
+ assemble_name (FILE, NAME); \
+ fputs (":\n", FILE); \
+} while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+#define ASM_GLOBALIZE_LABEL(FILE, NAME) \
+do { \
+ fputs ("\t.global\t", FILE); \
+ assemble_name (FILE, NAME); \
+ fputs ("\n", FILE); \
+} while (0)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
+do { \
+ char * real_name; \
+ STRIP_NAME_ENCODING (real_name, (NAME)); \
+ asm_fprintf (FILE, "%U%s", real_name); \
+} while (0)
+
+/* If -Os, don't force line number labels to begin at the beginning of
+ the word; we still want the assembler to try to put things in parallel,
+ should that be possible.
+ For m32r/d, instructions are never in parallel (other than with a nop)
+ and the simulator and stub both handle a breakpoint in the middle of
+ a word so don't ever force line number labels to begin at the beginning
+ of a word. */
+
+#undef ASM_OUTPUT_SOURCE_LINE
+#define ASM_OUTPUT_SOURCE_LINE(file, line) \
+do \
+ { \
+ static int sym_lineno = 1; \
+ fprintf (file, ".stabn 68,0,%d,.LM%d-", \
+ line, sym_lineno); \
+ assemble_name (file, \
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));\
+ fprintf (file, \
+ (optimize_size || TARGET_M32R) \
+ ? "\n\t.debugsym .LM%d\n" \
+ : "\n.LM%d:\n", \
+ sym_lineno); \
+ sym_lineno += 1; \
+ } \
+while (0)
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+do { \
+ (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10); \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)); \
+} while (0)
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+#ifndef SUBTARGET_REGISTER_NAMES
+#define SUBTARGET_REGISTER_NAMES
+#endif
+
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "fp", "lr", "sp", \
+ "ap", "cbit", "a0" \
+ SUBTARGET_REGISTER_NAMES \
+}
+
+/* If defined, a C initializer for an array of structures containing
+ a name and a register number. This macro defines additional names
+ for hard registers, thus allowing the `asm' option in declarations
+ to refer to registers using alternate names. */
+#ifndef SUBTARGET_ADDITIONAL_REGISTER_NAMES
+#define SUBTARGET_ADDITIONAL_REGISTER_NAMES
+#endif
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ /*{ "gp", GP_REGNUM },*/ \
+ { "r13", FRAME_POINTER_REGNUM }, \
+ { "r14", RETURN_ADDR_REGNUM }, \
+ { "r15", STACK_POINTER_REGNUM }, \
+ SUBTARGET_ADDITIONAL_REGISTER_NAMES \
+}
+
+/* A C expression which evaluates to true if CODE is a valid
+ punctuation character for use in the `PRINT_OPERAND' macro. */
+extern char m32r_punct_chars[];
+#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
+m32r_punct_chars[(unsigned char) (CHAR)]
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+#define PRINT_OPERAND(FILE, X, CODE) \
+m32r_print_operand (FILE, X, CODE)
+
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand that is a memory
+ reference whose address is ADDR. ADDR is an RTL expression.
+
+ On some machines, the syntax for a symbolic address depends on
+ the section that the address refers to. On these machines,
+ define the macro `ENCODE_SECTION_INFO' to store the information
+ into the `symbol_ref', and then check for it here. */
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+m32r_print_operand_address (FILE, ADDR)
+
+/* If defined, C string expressions to be used for the `%R', `%L',
+ `%U', and `%I' options of `asm_fprintf' (see `final.c'). These
+ are useful when a single `md' file must support multiple assembler
+ formats. In that case, the various `tm.h' files can define these
+ macros differently. */
+#define REGISTER_PREFIX ""
+#define LOCAL_LABEL_PREFIX ".L"
+#define USER_LABEL_PREFIX ""
+#define IMMEDIATE_PREFIX "#"
+
+/* This is how to output an element of a case-vector that is absolute. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { \
+ char label[30]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ fprintf (FILE, "\t.word\t"); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "\n"); \
+} while (0)
+
+/* This is how to output an element of a case-vector that is relative. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+do { \
+ char label[30]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ fprintf (FILE, "\t.word\t"); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", REL); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, ")\n"); \
+} while (0)
+
+/* The desired alignment for the location counter at the beginning
+ of a loop. */
+/* On the M32R, align loops to 32 byte boundaries (cache line size)
+ if -malign-loops. */
+#define LOOP_ALIGN(LABEL) (TARGET_ALIGN_LOOPS ? 5 : 0)
+
+/* Define this to be the maximum number of insns to move around when moving
+ a loop test from the top of a loop to the bottom
+ and seeing whether to duplicate it. The default is thirty.
+
+ Loop unrolling currently doesn't like this optimization, so
+ disable doing if we are unrolling loops and saving space. */
+#define LOOP_TEST_THRESHOLD (optimize_size \
+ && !flag_unroll_loops \
+ && !flag_unroll_all_loops ? 2 : 30)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+/* .balign is used to avoid confusion. */
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+do { \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.balign %d\n", 1 << (LOG)); \
+} while (0)
+
+/* Like `ASM_OUTPUT_COMMON' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used in
+ place of `ASM_OUTPUT_COMMON', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits. */
+
+#define SCOMMON_ASM_OP ".scomm"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do { \
+ if (! TARGET_SDATA_NONE \
+ && (SIZE) > 0 && (SIZE) <= g_switch_value) \
+ fprintf ((FILE), "\t%s\t", SCOMMON_ASM_OP); \
+ else \
+ fprintf ((FILE), "\t%s\t", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u,%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+} while (0)
+
+/* Like `ASM_OUTPUT_BSS' except takes the required alignment as a
+ separate, explicit argument. If you define this macro, it is used in
+ place of `ASM_OUTPUT_BSS', and gives you more flexibility in
+ handling the required alignment of the variable. The alignment is
+ specified as the number of bits.
+
+ For the M32R we need sbss support. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+do { \
+ ASM_GLOBALIZE_LABEL (FILE, NAME); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+/* Debugging information. */
+
+/* Generate DBX and DWARF debugging information. */
+#undef DBX_DEBUGGING_INFO
+#undef DWARF_DEBUGGING_INFO
+#undef DWARF2_DEBUGGING_INFO
+
+#define DBX_DEBUGGING_INFO
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+
+/* Prefer STABS (for now). */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* How to renumber registers for dbx and gdb. */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Turn off splitting of long stabs. */
+#define DBX_CONTIN_LENGTH 0
+
+/* Miscellaneous. */
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE Pmode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* It's not clear what PIC will look like or whether we want to use -fpic
+ for the embedded form currently being talked about. For now require -fpic
+ to get pc relative switch tables. */
+/*#define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* We assume that the store-condition-codes instructions store 0 for false
+ and some other value for true. This is the value stored for true. */
+#define STORE_FLAG_VALUE 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+/* ??? The M32R doesn't have full 32 bit pointers, but making this PSImode has
+ it's own problems (you have to add extendpsisi2 and truncsipsi2).
+ Try to avoid it. */
+#define Pmode SImode
+
+/* A function address in a call instruction. */
+#define FUNCTION_MODE SImode
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to TYPE. */
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+m32r_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+/* A C expression that returns zero if the attributes on TYPE1 and TYPE2 are
+ incompatible, one if they are compatible, and two if they are
+ nearly compatible (which causes a warning to be generated). */
+#define COMP_TYPE_ATTRIBUTES(TYPE1, TYPE2) \
+m32r_comp_type_attributes (TYPE1, TYPE2)
+
+/* Give newly defined TYPE some default attributes. */
+#define SET_DEFAULT_TYPE_ATTRIBUTES(TYPE) \
+m32r_set_default_type_attributes (TYPE)
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+extern struct rtx_def * m32r_compare_op0;
+extern struct rtx_def * m32r_compare_op1;
+
+/* M32R function types. */
+enum m32r_function_type
+{
+ M32R_FUNCTION_UNKNOWN,
+ M32R_FUNCTION_NORMAL,
+ M32R_FUNCTION_INTERRUPT
+};
+
+#define M32R_INTERRUPT_P(TYPE) ((TYPE) == M32R_FUNCTION_INTERRUPT)
+
+/* Define this if you have defined special-purpose predicates in the
+ file `MACHINE.c'. This macro is called within an initializer of an
+ array of structures. The first field in the structure is the name
+ of a predicate and the second field is an array of rtl codes. For
+ each predicate, list all rtl codes that can be in expressions
+ matched by the predicate. The list should have a trailing comma. */
+
+#define PREDICATE_CODES \
+{ "conditional_compare_operand",{ EQ, NE }}, \
+{ "binary_parallel_operator", { PLUS, MINUS, MULT, AND, IOR, XOR }}, \
+{ "unary_parallel_operator", { NOT, NEG }}, \
+{ "reg_or_zero_operand", { REG, SUBREG, CONST_INT }}, \
+{ "carry_compare_operand", { EQ, NE }}, \
+{ "eqne_comparison_operator", { EQ, NE }}, \
+{ "signed_comparison_operator", { EQ, NE, LT, LE, GT, GE }}, \
+{ "move_dest_operand", { REG, SUBREG, MEM }}, \
+{ "move_src_operand", { REG, SUBREG, MEM, CONST_INT, \
+ CONST_DOUBLE, LABEL_REF, CONST, \
+ SYMBOL_REF }}, \
+{ "move_double_src_operand", { REG, SUBREG, MEM, CONST_INT, \
+ CONST_DOUBLE }}, \
+{ "two_insn_const_operand", { CONST_INT }}, \
+{ "symbolic_operand", { SYMBOL_REF, LABEL_REF, CONST }}, \
+{ "seth_add3_operand", { SYMBOL_REF, LABEL_REF, CONST }}, \
+{ "int8_operand", { CONST_INT }}, \
+{ "uint16_operand", { CONST_INT }}, \
+{ "reg_or_int16_operand", { REG, SUBREG, CONST_INT }}, \
+{ "reg_or_uint16_operand", { REG, SUBREG, CONST_INT }}, \
+{ "reg_or_cmp_int16_operand", { REG, SUBREG, CONST_INT }}, \
+{ "reg_or_eq_int16_operand", { REG, SUBREG, CONST_INT }}, \
+{ "cmp_int16_operand", { CONST_INT }}, \
+{ "call_address_operand", { SYMBOL_REF, LABEL_REF, CONST }}, \
+{ "extend_operand", { REG, SUBREG, MEM }}, \
+{ "small_insn_p", { INSN, CALL_INSN, JUMP_INSN }}, \
+{ "m32r_not_same_reg", { REG, SUBREG }}, \
+{ "m32r_block_immediate_operand",{ CONST_INT }}, \
+{ "large_insn_p", { INSN, CALL_INSN, JUMP_INSN }},
+
+/* Functions declared in m32r.c */
+#ifndef PROTO
+#if defined (USE_PROTOTYPES) ? USE_PROTOTYPES : defined (__STDC__)
+#define PROTO(ARGS) ARGS
+#else
+#define PROTO(ARGS) ()
+#endif
+#endif
+
+#ifdef BUFSIZ /* stdio.h has been included, ok to use FILE * */
+#define STDIO_PROTO(ARGS) PROTO(ARGS)
+#else
+#define STDIO_PROTO(ARGS) ()
+#endif
+
+#ifndef TREE_CODE
+union tree_node;
+#define Tree union tree_node *
+#else
+#define Tree tree
+#endif
+
+#ifndef RTX_CODE
+struct rtx_def;
+#define Rtx struct rtx_def *
+#else
+#define Rtx rtx
+#endif
+
+extern void sbss_section PROTO((void));
+extern void sdata_section PROTO((void));
+extern void m32r_init PROTO((void));
+extern int m32r_valid_machine_decl_attribute PROTO((Tree, Tree, Tree, Tree));
+extern int m32r_comp_type_attributes PROTO((Tree, Tree));
+extern void m32r_select_section PROTO((Tree, int));
+extern void m32r_encode_section_info PROTO((Tree));
+extern void m32r_init_expanders PROTO((void));
+extern int call_address_operand PROTO((Rtx, int));
+extern int call_operand PROTO((Rtx, int));
+extern int symbolic_operand PROTO((Rtx, int));
+extern int small_data_operand PROTO((Rtx, int));
+extern int addr24_operand PROTO((Rtx, int));
+extern int addr32_operand PROTO((Rtx, int));
+extern int call26_operand PROTO((Rtx, int));
+extern int seth_add3_operand PROTO((Rtx, int));
+extern int int8_operand PROTO((Rtx, int));
+extern int cmp_int16_operand PROTO((Rtx, int));
+extern int uint16_operand PROTO((Rtx, int));
+extern int reg_or_int16_operand PROTO((Rtx, int));
+extern int reg_or_uint16_operand PROTO((Rtx, int));
+extern int reg_or_cmp_nt16_operand PROTO((Rtx, int));
+extern int reg_or_eqne_nt16_operand PROTO((Rtx, int));
+extern int two_insn_const_operand PROTO((Rtx, int));
+extern int move_src_operand PROTO((Rtx, int));
+extern int move_double_src_operand PROTO((Rtx, int));
+extern int move_dest_operand PROTO((Rtx, int));
+extern int easy_di_const PROTO((Rtx));
+extern int easy_df_const PROTO((Rtx));
+extern int eqne_comparison_operator PROTO((Rtx, int));
+extern int signed_comparison_operator PROTO((Rtx, int));
+extern int memreg_operand PROTO((Rtx, int));
+extern int extend_operand PROTO((Rtx, int));
+extern int reg_or_zero_operand PROTO((Rtx, int));
+extern int small_insn_p PROTO((Rtx, int));
+extern int large_insn_p PROTO((Rtx, int));
+extern int direct_return PROTO((void));
+extern int m32r_select_cc_mode PROTO((int, Rtx, Rtx));
+extern Rtx gen_compare PROTO((int, Rtx, Rtx, Rtx));
+extern Rtx gen_split_move_double PROTO((Rtx *));
+extern int function_arg_partial_nregs PROTO((CUMULATIVE_ARGS *,
+ int, Tree, int));
+extern void m32r_setup_incoming_varargs PROTO((CUMULATIVE_ARGS *,
+ int, Tree, int *,
+ int));
+extern int m32r_address_cost PROTO((Rtx));
+extern int m32r_adjust_cost PROTO((Rtx, Rtx, Rtx, int));
+extern int m32r_adjust_priority PROTO((Rtx, int));
+extern void m32r_sched_init STDIO_PROTO((FILE *, int));
+extern void m32r_sched_reorder STDIO_PROTO((FILE *, int, Rtx *, int));
+extern int m32r_sched_variable_issue STDIO_PROTO((FILE *, int, Rtx, int));
+extern enum m32r_function_type m32r_compute_function_type
+ PROTO((Tree));
+extern unsigned m32r_compute_frame_size PROTO((int));
+extern int m32r_first_insn_address PROTO((void));
+extern void m32r_expand_prologue PROTO((void));
+extern void m32r_output_function_prologue STDIO_PROTO((FILE *, int));
+extern void m32r_output_function_epilogue STDIO_PROTO((FILE *, int));
+extern void m32r_finalize_pic PROTO((void));
+extern void m32r_initialize_trampoline PROTO((Rtx, Rtx, Rtx));
+extern void m32r_asm_file_start STDIO_PROTO((FILE *));
+extern void m32r_print_operand STDIO_PROTO((FILE *, Rtx, int));
+extern void m32r_print_operand_address STDIO_PROTO((FILE *, Rtx));
+extern int zero_and_one PROTO((Rtx, Rtx));
+extern int conditional_move_operand PROTO((Rtx, int));
+extern int carry_compare_operand PROTO((Rtx, int));
+extern char *emit_cond_move PROTO((Rtx *, Rtx));
+extern int conditional_compare_operand PROTO((Rtx, int));
+extern int binary_parallel_operand PROTO((Rtx, int));
+extern int unary_parallel_operand PROTO((Rtx, int));
+extern char *emit_binary_cond_exec PROTO((Rtx *, int));
+extern char *emit_unary_cond_exec PROTO((Rtx *, int));
+
+/* Externals that are referenced, but may not have the proper include file
+ dragged in. */
+extern int optimize;
+extern int optimize_size;
+
+/* END CYGNUS LOCAL -- meissner/m32r work */
+
+extern int m32r_not_same_reg PROTO((Rtx, Rtx));
+extern char * m32r_output_block_move PROTO((Rtx, Rtx *));
+extern int m32r_block_immediate_operand PROTO((Rtx, int));
+extern void m32r_expand_block_move PROTO((Rtx *));
diff --git a/gcc/config/m32r/m32r.md b/gcc/config/m32r/m32r.md
new file mode 100755
index 0000000..859ed1b
--- /dev/null
+++ b/gcc/config/m32r/m32r.md
@@ -0,0 +1,2649 @@
+;; CYGNUS LOCAL -- meissner/m32r work
+;; Machine description of the Mitsubishi M32R cpu for GNU C compiler
+;; Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; unspec usage
+;; 0 - blockage
+;; 1 - flush_icache
+;; 2 - load_sda_base
+;; 3 - setting carry in addx/subx instructions.
+
+;; Insn type. Used to default other attribute values.
+(define_attr "type"
+ "int2,int4,load2,load4,load8,store2,store4,store8,shift2,shift4,mul2,div4,uncond_branch,branch,call,multi,misc"
+ (const_string "misc"))
+
+;; Length in bytes.
+(define_attr "length" ""
+ (cond [(eq_attr "type" "int2,load2,store2,shift2,mul2")
+ (const_int 2)
+
+ (eq_attr "type" "int4,load4,store4,shift4,div4")
+ (const_int 4)
+
+ (eq_attr "type" "multi")
+ (const_int 8)
+
+ (eq_attr "type" "uncond_branch,branch,call")
+ (const_int 4)]
+
+ (const_int 4)))
+
+;; The length here is the length of a single asm. Unfortunately it might be
+;; 2 or 4 so we must allow for 4. That's ok though.
+(define_asm_attributes
+ [(set_attr "length" "4")
+ (set_attr "type" "multi")])
+
+
+;; Whether an instruction is 16-bit or 32-bit
+(define_attr "insn_size" "short,long"
+ (if_then_else (eq_attr "type" "int2,load2,store2,shift2,mul2")
+ (const_string "short")
+ (const_string "long")))
+
+(define_attr "debug" "no,yes"
+ (const (symbol_ref "(TARGET_DEBUG != 0)")))
+
+(define_attr "opt_size" "no,yes"
+ (const (symbol_ref "(optimize_size != 0)")))
+
+(define_attr "m32r" "no,yes"
+ (const (symbol_ref "(TARGET_M32R != 0)")))
+
+(define_attr "m32rx" "no,yes"
+ (const (symbol_ref "(TARGET_M32RX != 0)")))
+
+(define_attr "m32rx_pipeline" "either,s,o,long,m32r"
+ (cond [(eq_attr "m32rx" "no")
+ (const_string "m32r")
+
+ (eq_attr "insn_size" "!short")
+ (const_string "long")]
+
+ (cond [(eq_attr "type" "int2")
+ (const_string "either")
+
+ (eq_attr "type" "load2,store2,shift2,uncond_branch,branch,call")
+ (const_string "o")
+
+ (eq_attr "type" "mul2")
+ (const_string "s")]
+
+ (const_string "long"))))
+
+
+;; ::::::::::::::::::::
+;; ::
+;; :: Function Units
+;; ::
+;; ::::::::::::::::::::
+
+;; On most RISC machines, there are instructions whose results are not
+;; available for a specific number of cycles. Common cases are instructions
+;; that load data from memory. On many machines, a pipeline stall will result
+;; if the data is referenced too soon after the load instruction.
+
+;; In addition, many newer microprocessors have multiple function units,
+;; usually one for integer and one for floating point, and often will incur
+;; pipeline stalls when a result that is needed is not yet ready.
+
+;; The descriptions in this section allow the specification of how much time
+;; must elapse between the execution of an instruction and the time when its
+;; result is used. It also allows specification of when the execution of an
+;; instruction will delay execution of similar instructions due to function
+;; unit conflicts.
+
+;; For the purposes of the specifications in this section, a machine is divided
+;; into "function units", each of which execute a specific class of
+;; instructions in first-in-first-out order. Function units that accept one
+;; instruction each cycle and allow a result to be used in the succeeding
+;; instruction (usually via forwarding) need not be specified. Classic RISC
+;; microprocessors will normally have a single function unit, which we can call
+;; `memory'. The newer "superscalar" processors will often have function units
+;; for floating point operations, usually at least a floating point adder and
+;; multiplier.
+
+;; Each usage of a function units by a class of insns is specified with a
+;; `define_function_unit' expression, which looks like this:
+
+;; (define_function_unit NAME MULTIPLICITY SIMULTANEITY TEST READY-DELAY
+;; ISSUE-DELAY [CONFLICT-LIST])
+
+;; NAME is a string giving the name of the function unit.
+
+;; MULTIPLICITY is an integer specifying the number of identical units in the
+;; processor. If more than one unit is specified, they will be scheduled
+;; independently. Only truly independent units should be counted; a pipelined
+;; unit should be specified as a single unit. (The only common example of a
+;; machine that has multiple function units for a single instruction class that
+;; are truly independent and not pipelined are the two multiply and two
+;; increment units of the CDC 6600.)
+
+;; SIMULTANEITY specifies the maximum number of insns that can be executing in
+;; each instance of the function unit simultaneously or zero if the unit is
+;; pipelined and has no limit.
+
+;; All `define_function_unit' definitions referring to function unit NAME must
+;; have the same name and values for MULTIPLICITY and SIMULTANEITY.
+
+;; TEST is an attribute test that selects the insns we are describing in this
+;; definition. Note that an insn may use more than one function unit and a
+;; function unit may be specified in more than one `define_function_unit'.
+
+;; READY-DELAY is an integer that specifies the number of cycles after which
+;; the result of the instruction can be used without introducing any stalls.
+
+;; ISSUE-DELAY is an integer that specifies the number of cycles after the
+;; instruction matching the TEST expression begins using this unit until a
+;; subsequent instruction can begin. A cost of N indicates an N-1 cycle delay.
+;; A subsequent instruction may also be delayed if an earlier instruction has a
+;; longer READY-DELAY value. This blocking effect is computed using the
+;; SIMULTANEITY, READY-DELAY, ISSUE-DELAY, and CONFLICT-LIST terms. For a
+;; normal non-pipelined function unit, SIMULTANEITY is one, the unit is taken
+;; to block for the READY-DELAY cycles of the executing insn, and smaller
+;; values of ISSUE-DELAY are ignored.
+
+;; CONFLICT-LIST is an optional list giving detailed conflict costs for this
+;; unit. If specified, it is a list of condition test expressions to be
+;; applied to insns chosen to execute in NAME following the particular insn
+;; matching TEST that is already executing in NAME. For each insn in the list,
+;; ISSUE-DELAY specifies the conflict cost; for insns not in the list, the cost
+;; is zero. If not specified, CONFLICT-LIST defaults to all instructions that
+;; use the function unit.
+
+;; Typical uses of this vector are where a floating point function unit can
+;; pipeline either single- or double-precision operations, but not both, or
+;; where a memory unit can pipeline loads, but not stores, etc.
+
+;; As an example, consider a classic RISC machine where the result of a load
+;; instruction is not available for two cycles (a single "delay" instruction is
+;; required) and where only one load instruction can be executed
+;; simultaneously. This would be specified as:
+
+;; (define_function_unit "memory" 1 1 (eq_attr "type" "load") 2 0)
+
+;; For the case of a floating point function unit that can pipeline
+;; either single or double precision, but not both, the following could be
+;; specified:
+;;
+;; (define_function_unit "fp" 1 0
+;; (eq_attr "type" "sp_fp") 4 4
+;; [(eq_attr "type" "dp_fp")])
+;;
+;; (define_function_unit "fp" 1 0
+;; (eq_attr "type" "dp_fp") 4 4
+;; [(eq_attr "type" "sp_fp")])
+
+;; Note: The scheduler attempts to avoid function unit conflicts and uses all
+;; the specifications in the `define_function_unit' expression. It has
+;; recently come to our attention that these specifications may not allow
+;; modeling of some of the newer "superscalar" processors that have insns using
+;; multiple pipelined units. These insns will cause a potential conflict for
+;; the second unit used during their execution and there is no way of
+;; representing that conflict. We welcome any examples of how function unit
+;; conflicts work in such processors and suggestions for their representation.
+
+;; Function units of the M32R
+;; Units that take one cycle do not need to be specified.
+
+;; (define_function_unit {name} {multiplicity} {simulataneity} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+
+;; Hack to get GCC to better pack the instructions.
+;; We pretend there is a separate long function unit that conflicts with
+;; both the left and right 16 bit insn slots.
+
+(define_function_unit "short" 2 2
+ (and (eq_attr "m32r" "yes")
+ (and (eq_attr "insn_size" "short")
+ (eq_attr "type" "!load2")))
+ 1 0
+ [(eq_attr "insn_size" "long")])
+
+(define_function_unit "short" 2 2 ;; load delay of 1 clock for mem execution + 1 clock for WB
+ (and (eq_attr "m32r" "yes")
+ (eq_attr "type" "load2"))
+ 3 0
+ [(eq_attr "insn_size" "long")])
+
+(define_function_unit "long" 1 1
+ (and (eq_attr "m32r" "yes")
+ (and (eq_attr "insn_size" "long")
+ (eq_attr "type" "!load4,load8")))
+ 1 0
+ [(eq_attr "insn_size" "short")])
+
+(define_function_unit "long" 1 1 ;; load delay of 1 clock for mem execution + 1 clock for WB
+ (and (eq_attr "m32r" "yes")
+ (and (eq_attr "insn_size" "long")
+ (eq_attr "type" "load4,load8")))
+ 3 0
+ [(eq_attr "insn_size" "short")])
+
+(define_function_unit "left" 1 1
+ (and (eq_attr "m32rx_pipeline" "o,either")
+ (eq_attr "type" "!load2"))
+ 1 0
+ [(eq_attr "insn_size" "long")])
+
+(define_function_unit "left" 1 1 ;; load delay of 1 clock for mem execution + 1 clock for WB
+ (and (eq_attr "m32rx_pipeline" "o,either")
+ (eq_attr "type" "load2"))
+ 3 0
+ [(eq_attr "insn_size" "long")])
+
+(define_function_unit "right" 1 1
+ (eq_attr "m32rx_pipeline" "s,either")
+ 1 0
+ [(eq_attr "insn_size" "long")])
+
+(define_function_unit "long" 1 1
+ (and (eq_attr "m32rx" "yes")
+ (and (eq_attr "insn_size" "long")
+ (eq_attr "type" "!load4,load8")))
+ 2 0
+ [(eq_attr "insn_size" "short")])
+
+(define_function_unit "long" 1 1 ;; load delay of 1 clock for mem execution + 1 clock for WB
+ (and (eq_attr "m32rx" "yes")
+ (and (eq_attr "insn_size" "long")
+ (eq_attr "type" "load4,load8")))
+ 3 0
+ [(eq_attr "insn_size" "short")])
+
+
+;; Instruction grouping
+
+(define_insn "*small_sequence"
+ [(group_sequence [(match_insn2 0 "small_insn_p")
+ (match_insn2 1 "small_insn_p")])]
+ ""
+ "*
+{
+ abort ();
+}"
+ [(set_attr "length" "4")
+ (set_attr "type" "multi")])
+
+(define_insn "*small_parallel"
+ [(group_parallel [(match_insn2 0 "small_insn_p")
+ (match_insn2 1 "small_insn_p")])]
+ "TARGET_M32RX"
+ "*
+{
+ abort ();
+}"
+ [(set_attr "length" "4")
+ (set_attr "type" "multi")])
+
+(define_insn "*long_group"
+ [(group_sequence [(match_insn2 0 "large_insn_p")])]
+ ""
+ "*
+{
+ abort ();
+}"
+ [(set_attr "length" "4")
+ (set_attr "type" "multi")])
+
+
+;; Expand prologue as RTL
+(define_expand "prologue"
+ [(const_int 1)]
+ ""
+ "
+{
+ m32r_expand_prologue ();
+ DONE;
+}")
+
+
+;; Move instructions.
+;;
+;; For QI and HI moves, the register must contain the full properly
+;; sign-extended value. nonzero_bits assumes this [otherwise
+;; SHORT_IMMEDIATES_SIGN_EXTEND must be used, but the comment for it
+;; says it's a kludge and the .md files should be fixed instead].
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Everything except mem = const or mem = mem can be done easily.
+ Objects in the small data area are handled too. */
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "move_dest_operand" "=r,r,r,r,r,T,m")
+ (match_operand:QI 1 "move_src_operand" "r,I,JQR,T,m,r,r"))]
+ "register_operand (operands[0], QImode) || register_operand (operands[1], QImode)"
+ "@
+ mv %0,%1
+ ldi %0,%#%1
+ ldi %0,%#%1
+ ldub %0,%1
+ ldub %0,%1
+ stb %1,%0
+ stb %1,%0"
+ [(set_attr "type" "int2,int2,int4,load2,load4,store2,store4")
+ (set_attr "length" "2,2,4,2,4,2,4")])
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "move_dest_operand" "=r,r,r,r,r,r,T,m")
+ (match_operand:HI 1 "move_src_operand" "r,I,JQR,K,T,m,r,r"))]
+ "register_operand (operands[0], HImode) || register_operand (operands[1], HImode)"
+ "@
+ mv %0,%1
+ ldi %0,%#%1
+ ldi %0,%#%1
+ ld24 %0,%#%1
+ lduh %0,%1
+ lduh %0,%1
+ sth %1,%0
+ sth %1,%0"
+ [(set_attr "type" "int2,int2,int4,int4,load2,load4,store2,store4")
+ (set_attr "length" "2,2,4,4,2,4,2,4")])
+
+(define_expand "movsi_push"
+ [(set (mem:SI (pre_dec:SI (match_operand:SI 0 "register_operand" "")))
+ (match_operand:SI 1 "register_operand" ""))]
+ ""
+ "")
+
+(define_expand "movsi_pop"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mem:SI (post_inc:SI (match_operand:SI 1 "register_operand" ""))))]
+ ""
+ "")
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+
+ /* Small Data Area reference? */
+ if (small_data_operand (operands[1], SImode))
+ {
+ emit_insn (gen_movsi_sda (operands[0], operands[1]));
+ DONE;
+ }
+
+ /* If medium or large code model, symbols have to be loaded with
+ seth/add3. */
+ if (addr32_operand (operands[1], SImode))
+ {
+ emit_insn (gen_movsi_addr32 (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+;; ??? Do we need a const_double constraint here for large unsigned values?
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "move_dest_operand" "=r,r,r,r,r,r,r,r,r,T,S,m")
+ (match_operand:SI 1 "move_src_operand" "r,I,J,MQ,L,n,T,U,m,r,r,r"))]
+ "register_operand (operands[0], SImode) || register_operand (operands[1], SImode)"
+ "*
+{
+ if (GET_CODE (operands[0]) == REG || GET_CODE (operands[1]) == SUBREG)
+ {
+ switch (GET_CODE (operands[1]))
+ {
+ HOST_WIDE_INT value;
+
+ default:
+ break;
+
+ case REG:
+ case SUBREG:
+ return \"mv %0,%1\";
+
+ case MEM:
+ if (GET_CODE (XEXP (operands[1], 0)) == POST_INC
+ && XEXP (XEXP (operands[1], 0), 0) == stack_pointer_rtx)
+ return \"pop %0\";
+
+ return \"ld %0,%1\";
+
+ case CONST_INT:
+ value = INTVAL (operands[1]);
+ if (INT16_P (value))
+ return \"ldi %0,%#%1\\t; %X1\";
+
+ if (UINT24_P (value))
+ return \"ld24 %0,%#%1\\t; %X1\";
+
+ if (UPPER16_P (value))
+ return \"seth %0,%#%T1\\t; %X1\";
+
+ return \"#\";
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ if (TARGET_ADDR24)
+ return \"ld24 %0,%#%1\";
+
+ return \"#\";
+ }
+ }
+
+ else if (GET_CODE (operands[0]) == MEM
+ && (GET_CODE (operands[1]) == REG || GET_CODE (operands[1]) == SUBREG))
+ {
+ if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
+ && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx)
+ return \"push %1\";
+
+ return \"st %1,%0\";
+ }
+
+ fatal_insn (\"bad movsi insn\", insn);
+}"
+ [(set_attr "type" "int2,int2,int4,int4,int4,multi,load2,load2,load4,store2,store2,store4")
+ (set_attr "length" "2,2,4,4,4,8,2,2,4,2,2,4")])
+
+; Try to use a four byte / two byte pair for constants not loadable with
+; ldi, ld24, seth.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "two_insn_const_operand" ""))]
+ ""
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (ior:SI (match_dup 0) (match_dup 3)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT tmp;
+ int shift;
+
+ /* In all cases we will emit two instructions. However we try to
+ use 2 byte instructions wherever possible. We can assume the
+ constant isn't loadable with any of ldi, ld24, or seth. */
+
+ /* See if we can load a 24 bit unsigned value and invert it. */
+ if (UINT24_P (~ val))
+ {
+ emit_insn (gen_movsi (operands[0], GEN_INT (~ val)));
+ emit_insn (gen_one_cmplsi2 (operands[0], operands[0]));
+ DONE;
+ }
+
+ /* See if we can load a 24 bit unsigned value and shift it into place.
+ 0x01fffffe is just beyond ld24's range. */
+ for (shift = 1, tmp = 0x01fffffe;
+ shift < 8;
+ ++shift, tmp <<= 1)
+ {
+ if ((val & ~tmp) == 0)
+ {
+ emit_insn (gen_movsi (operands[0], GEN_INT (val >> shift)));
+ emit_insn (gen_ashlsi3 (operands[0], operands[0], GEN_INT (shift)));
+ DONE;
+ }
+ }
+
+ /* Can't use any two byte insn, fall back to seth/or3. Use ~0xffff instead
+ of 0xffff0000, since the later fails on a 64-bit host. */
+ operands[2] = GEN_INT ((val) & ~0xffff);
+ operands[3] = GEN_INT ((val) & 0xffff);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "seth_add3_operand" "i"))]
+ "TARGET_ADDR32"
+ [(set (match_dup 0)
+ (high:SI (match_dup 1)))
+ (set (match_dup 0)
+ (lo_sum:SI (match_dup 0)
+ (match_dup 1)))]
+ "")
+
+;; Small data area support.
+;; The address of _SDA_BASE_ is loaded into a register and all objects in
+;; the small data area are indexed off that. This is done for each reference
+;; but cse will clean things up for us. We let the compiler choose the
+;; register to use so we needn't allocate (and maybe even fix) a special
+;; register to use. Since the load and store insns have a 16 bit offset the
+;; total size of the data area can be 64K. However, if the data area lives
+;; above 16M (24 bits), _SDA_BASE_ will have to be loaded with seth/add3 which
+;; would then yield 3 instructions to reference an object [though there would
+;; be no net loss if two or more objects were referenced]. The 3 insns can be
+;; reduced back to 2 if the size of the small data area were reduced to 32K
+;; [then seth + ld/st would work for any object in the area]. Doing this
+;; would require special handling of _SDA_BASE_ (its value would be
+;; (.sdata + 32K) & 0xffff0000) and reloc computations would be different
+;; [I think]. What to do about this is deferred until later and for now we
+;; require .sdata to be in the first 16M.
+
+(define_expand "movsi_sda"
+ [(set (match_dup 2)
+ (unspec [(const_int 0)] 2))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lo_sum:SI (match_dup 2)
+ (match_operand:SI 1 "small_data_operand" "")))]
+ ""
+ "
+{
+ if (reload_in_progress || reload_completed)
+ operands[2] = operands[0];
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*load_sda_base"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec [(const_int 0)] 2))]
+ ""
+ "ld24 %0,#_SDA_BASE_"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+;; 32 bit address support.
+
+(define_expand "movsi_addr32"
+ [(set (match_dup 2)
+ ; addr32_operand isn't used because it's too restrictive,
+ ; seth_add3_operand is more general and thus safer.
+ (high:SI (match_operand:SI 1 "seth_add3_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lo_sum:SI (match_dup 2) (match_dup 1)))]
+ ""
+ "
+{
+ if (reload_in_progress || reload_completed)
+ operands[2] = operands[0];
+ else
+ operands[2] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "set_hi_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (match_operand 1 "symbolic_operand" "")))]
+ ""
+ "seth %0,%#shigh(%1)"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+(define_insn "lo_sum_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")))]
+ ""
+ "add3 %0,%1,%#%B2"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DImode, operands[1]);
+}")
+
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "move_dest_operand" "=r,r,r,r,m")
+ (match_operand:DI 1 "move_double_src_operand" "r,nG,F,m,r"))]
+ "register_operand (operands[0], DImode) || register_operand (operands[1], DImode)"
+ "#"
+ [(set_attr "type" "multi,multi,multi,load8,store8")
+ (set_attr "length" "4,4,16,6,6")])
+
+(define_split
+ [(set (match_operand:DI 0 "move_dest_operand" "")
+ (match_operand:DI 1 "move_double_src_operand" ""))]
+ "reload_completed"
+ [(match_dup 2)]
+ "operands[2] = gen_split_move_double (operands);")
+
+;; Floating point move insns.
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+}")
+
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "move_dest_operand" "=r,r,r,r,r,T,S,m")
+ (match_operand:SF 1 "move_src_operand" "r,F,U,S,m,r,r,r"))]
+ "register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)"
+ "@
+ mv %0,%1
+ #
+ ld %0,%1
+ ld %0,%1
+ ld %0,%1
+ st %1,%0
+ st %1,%0
+ st %1,%0"
+ ;; ??? Length of alternative 1 is either 2, 4 or 8.
+ [(set_attr "type" "int2,multi,load2,load2,load4,store2,store2,store4")
+ (set_attr "length" "2,8,2,2,4,2,2,4")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ operands[2] = operand_subword (operands[0], 0, 0, SFmode);
+ operands[3] = GEN_INT (l);
+}")
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+{
+ /* Everything except mem = const or mem = mem can be done easily. */
+
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+}")
+
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "move_dest_operand" "=r,r,r,m")
+ (match_operand:DF 1 "move_double_src_operand" "r,F,m,r"))]
+ "register_operand (operands[0], DFmode) || register_operand (operands[1], DFmode)"
+ "#"
+ [(set_attr "type" "multi,multi,load8,store8")
+ (set_attr "length" "4,16,6,6")])
+
+(define_split
+ [(set (match_operand:DF 0 "move_dest_operand" "")
+ (match_operand:DF 1 "move_double_src_operand" ""))]
+ "reload_completed"
+ [(match_dup 2)]
+ "operands[2] = gen_split_move_double (operands);")
+
+;; Zero extension instructions.
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (zero_extend:HI (match_operand:QI 1 "extend_operand" "r,T,m")))]
+ ""
+ "@
+ and3 %0,%1,%#255
+ ldub %0,%1
+ ldub %0,%1"
+ [(set_attr "type" "int4,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (zero_extend:SI (match_operand:QI 1 "extend_operand" "r,T,m")))]
+ ""
+ "@
+ and3 %0,%1,%#255
+ ldub %0,%1
+ ldub %0,%1"
+ [(set_attr "type" "int4,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (zero_extend:SI (match_operand:HI 1 "extend_operand" "r,T,m")))]
+ ""
+ "@
+ and3 %0,%1,%#65535
+ lduh %0,%1
+ lduh %0,%1"
+ [(set_attr "type" "int4,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+;; Signed conversions from a smaller integer to a larger integer
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r")
+ (sign_extend:HI (match_operand:QI 1 "extend_operand" "0,T,m")))]
+ ""
+ "@
+ #
+ ldb %0,%1
+ ldb %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "2,2,4")])
+
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
+ "
+{
+ rtx op0 = gen_lowpart (SImode, operands[0]);
+ rtx shift = gen_rtx (CONST_INT, VOIDmode, 24);
+
+ operands[2] = gen_ashlsi3 (op0, op0, shift);
+ operands[3] = gen_ashrsi3 (op0, op0, shift);
+}")
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:QI 1 "extend_operand" "0,T,m")))]
+ ""
+ "@
+ #
+ ldb %0,%1
+ ldb %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
+ "
+{
+ rtx op0 = gen_lowpart (SImode, operands[0]);
+ rtx shift = gen_rtx (CONST_INT, VOIDmode, 24);
+
+ operands[2] = gen_ashlsi3 (op0, op0, shift);
+ operands[3] = gen_ashrsi3 (op0, op0, shift);
+}")
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:HI 1 "extend_operand" "0,T,m")))]
+ ""
+ "@
+ #
+ ldh %0,%1
+ ldh %0,%1"
+ [(set_attr "type" "multi,load2,load4")
+ (set_attr "length" "4,2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
+ "reload_completed"
+ [(match_dup 2)
+ (match_dup 3)]
+ "
+{
+ rtx op0 = gen_lowpart (SImode, operands[0]);
+ rtx shift = gen_rtx (CONST_INT, VOIDmode, 16);
+
+ operands[2] = gen_ashlsi3 (op0, op0, shift);
+ operands[3] = gen_ashrsi3 (op0, op0, shift);
+}")
+
+;; Arithmetic instructions.
+
+; ??? Adding an alternative to split add3 of small constants into two
+; insns yields better instruction packing but slower code. Adds of small
+; values is done a lot.
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,r")
+ (match_operand:SI 2 "nonmemory_operand" "r,I,J")))]
+ ""
+ "@
+ add %0,%2
+ addi %0,%#%2
+ add3 %0,%1,%#%2"
+ [(set_attr "type" "int2,int2,int4")
+ (set_attr "length" "2,2,4")])
+
+;(define_split
+; [(set (match_operand:SI 0 "register_operand" "")
+; (plus:SI (match_operand:SI 1 "register_operand" "")
+; (match_operand:SI 2 "int8_operand" "")))]
+; "reload_completed
+; && REGNO (operands[0]) != REGNO (operands[1])
+; && INT8_P (INTVAL (operands[2]))
+; && INTVAL (operands[2]) != 0"
+; [(set (match_dup 0) (match_dup 1))
+; (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 2)))]
+; "")
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+;; ??? The cmp clears the condition bit. Can we speed up somehow?
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (match_operand 3 "" ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 3)
+ (const_int 0))
+ (use (match_dup 4))])
+ (parallel [(set (match_dup 4)
+ (plus:SI (match_dup 4)
+ (plus:SI (match_dup 5)
+ (match_dup 3))))
+ (set (match_dup 3)
+ (unspec [(const_int 0)] 3))])
+ (parallel [(set (match_dup 6)
+ (plus:SI (match_dup 6)
+ (plus:SI (match_dup 7)
+ (match_dup 3))))
+ (set (match_dup 3)
+ (unspec [(const_int 0)] 3))])]
+ "
+{
+ operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
+ operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
+ operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
+ operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
+}")
+
+(define_insn "*clear_c"
+ [(set (reg:SI 17)
+ (const_int 0))
+ (use (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "cmp %0,%0"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+(define_insn "*add_carry"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0")
+ (plus:SI (match_operand:SI 2 "register_operand" "r")
+ (reg:SI 17))))
+ (set (reg:SI 17)
+ (unspec [(const_int 0)] 3))]
+ ""
+ "addx %0,%2"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "sub %0,%2"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+;; ??? The cmp clears the condition bit. Can we speed up somehow?
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (minus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (clobber (match_operand 3 "" ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 3)
+ (const_int 0))
+ (use (match_dup 4))])
+ (parallel [(set (match_dup 4)
+ (minus:SI (match_dup 4)
+ (minus:SI (match_dup 5)
+ (match_dup 3))))
+ (set (match_dup 3)
+ (unspec [(const_int 0)] 3))])
+ (parallel [(set (match_dup 6)
+ (minus:SI (match_dup 6)
+ (minus:SI (match_dup 7)
+ (match_dup 3))))
+ (set (match_dup 3)
+ (unspec [(const_int 0)] 3))])]
+ "
+{
+ operands[4] = operand_subword (operands[0], (WORDS_BIG_ENDIAN != 0), 0, DImode);
+ operands[5] = operand_subword (operands[2], (WORDS_BIG_ENDIAN != 0), 0, DImode);
+ operands[6] = operand_subword (operands[0], (WORDS_BIG_ENDIAN == 0), 0, DImode);
+ operands[7] = operand_subword (operands[2], (WORDS_BIG_ENDIAN == 0), 0, DImode);
+}")
+
+(define_insn "*sub_carry"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "%0")
+ (minus:SI (match_operand:SI 2 "register_operand" "r")
+ (reg:SI 17))))
+ (set (reg:SI 17)
+ (unspec [(const_int 0)] 3))]
+ ""
+ "subx %0,%2"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+; Multiply/Divide instructions.
+
+(define_insn "mulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" "r"))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
+ ""
+ "mullo %1,%2\;mvfacmi %0"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "mul %0,%2"
+ [(set_attr "type" "mul2")
+ (set_attr "length" "2")])
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "div %0,%2"
+ [(set_attr "type" "div4")
+ (set_attr "length" "4")])
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "divu %0,%2"
+ [(set_attr "type" "div4")
+ (set_attr "length" "4")])
+
+(define_insn "modsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mod:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "rem %0,%2"
+ [(set_attr "type" "div4")
+ (set_attr "length" "4")])
+
+(define_insn "umodsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (umod:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "remu %0,%2"
+ [(set_attr "type" "div4")
+ (set_attr "length" "4")])
+
+;; Boolean instructions.
+;;
+;; We don't define the DImode versions as expand_binop does a good enough job.
+;; And if it doesn't it should be fixed.
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
+ ""
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && GET_CODE (operands[2]) == CONST_INT
+ && INT8_P (INTVAL (operands[2])))
+ return \"#\";
+
+ else if (GET_CODE (operands[2]) == CONST_INT)
+ return \"and3 %0,%1,%#%X2\";
+
+ return \"and %0,%2\";
+}"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (and:SI (match_dup 1) (match_dup 0)))]
+ "")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
+ ""
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && GET_CODE (operands[2]) == CONST_INT
+ && INT8_P (INTVAL (operands[2])))
+ return \"#\";
+
+ else if (GET_CODE (operands[2]) == CONST_INT)
+ return \"or3 %0,%1,%#%X2\";
+
+ return \"or %0,%2\";
+}"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (ior:SI (match_dup 1) (match_dup 0)))]
+ "")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,K")))]
+ ""
+ "*
+{
+ /* If we are worried about space, see if we can break this up into two
+ short instructions, which might eliminate a NOP being inserted. */
+ if (optimize_size
+ && m32r_not_same_reg (operands[0], operands[1])
+ && GET_CODE (operands[2]) == CONST_INT
+ && INT8_P (INTVAL (operands[2])))
+ return \"#\";
+
+ else if (GET_CODE (operands[2]) == CONST_INT)
+ return \"xor3 %0,%1,%#%X2\";
+
+ return \"xor %0,%2\";
+}"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "int8_operand" "")))]
+ "optimize_size && m32r_not_same_reg (operands[0], operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (xor:SI (match_dup 1) (match_dup 0)))]
+ "")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "neg %0,%1"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "not %0,%1"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+;; Shift instructions.
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
+ ""
+ "@
+ sll %0,%2
+ slli %0,%#%2
+ sll3 %0,%1,%#%2"
+ [(set_attr "type" "shift2,shift2,shift4")
+ (set_attr "length" "2,2,4")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
+ ""
+ "@
+ sra %0,%2
+ srai %0,%#%2
+ sra3 %0,%1,%#%2"
+ [(set_attr "type" "shift2,shift2,shift4")
+ (set_attr "length" "2,2,4")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "reg_or_uint16_operand" "r,O,K")))]
+ ""
+ "@
+ srl %0,%2
+ srli %0,%#%2
+ srl3 %0,%1,%#%2"
+ [(set_attr "type" "shift2,shift2,shift4")
+ (set_attr "length" "2,2,4")])
+
+;; Compare instructions.
+;; This controls RTL generation and register allocation.
+
+;; We generate RTL for comparisons and branches by having the cmpxx
+;; patterns store away the operands. Then the bcc patterns
+;; emit RTL for both the compare and the branch.
+;;
+;; On the m32r it is more efficient to use the bxxz instructions and
+;; thus merge the compare and branch into one instruction, so they are
+;; preferred.
+
+(define_expand "cmpsi"
+ [(set (reg:SI 17)
+ (compare:CC (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "reg_or_cmp_int16_operand" "")))]
+ ""
+ "
+{
+ m32r_compare_op0 = operands[0];
+ m32r_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_insn "cmp_eqsi_zero_insn"
+ [(set (reg:SI 17)
+ (eq:SI (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_zero_operand" "r,P")))]
+ "TARGET_M32RX"
+ "@
+ cmpeq %0, %1
+ cmpz %0"
+ [(set_attr "type" "int4")
+ (set_attr "length" "4")])
+
+;; The cmp_xxx_insn patterns set the condition bit to the result of the
+;; comparison. There isn't a "compare equal" instruction so cmp_eqsi_insn
+;; is quite inefficient. However, it is rarely used.
+
+(define_expand "cmp_eqsi_insn"
+ [(parallel [(set (reg:SI 17)
+ (eq:SI (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "reg_or_uint16_operand" "")))
+ (clobber (match_dup 2))])]
+ ""
+ "operands[2] = gen_reg_rtx (SImode);")
+
+(define_insn "*cmp_eqsi_insn_internal"
+ [(set (reg:SI 17)
+ (eq:SI (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_uint16_operand" "r,K")))
+ (clobber (match_operand:SI 2 "register_operand" "=0,r"))]
+ "reload_completed"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6,8")])
+
+(define_split
+ [(set (reg:SI 17)
+ (eq:SI (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "reg_or_uint16_operand" "")))
+ (clobber (match_operand:SI 2 "register_operand" ""))]
+ ""
+ [(set (match_dup 2)
+ (xor:SI (match_dup 0)
+ (match_dup 1)))
+ (set (reg:SI 17)
+ (ltu:SI (match_dup 2)
+ (const_int 1)))]
+ "")
+
+(define_insn "cmp_ltsi_insn"
+ [(set (reg:SI 17)
+ (lt:SI (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
+ ""
+ "@
+ cmp %0,%1
+ cmpi %0,%#%1"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+(define_insn "cmp_ltusi_insn"
+ [(set (reg:SI 17)
+ (ltu:SI (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "reg_or_int16_operand" "r,J")))]
+ ""
+ "@
+ cmpu %0,%1
+ cmpui %0,%#%1"
+ [(set_attr "type" "int2,int4")
+ (set_attr "length" "2,4")])
+
+
+;; These control RTL generation for conditional jump insns.
+
+(define_expand "beq"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)EQ, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "bne"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)NE, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "blt"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)LT, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "ble"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)LE, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "bgt"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)GT, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "bge"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)GE, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "bltu"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)LTU, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "bleu"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)LEU, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "bgtu"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)GTU, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+(define_expand "bgeu"
+ [(match_operand 0 "" "")]
+ ""
+ "
+{
+ emit_insn (gen_compare ((int)GEU, operands[0], m32r_compare_op0,
+ m32r_compare_op1));
+ DONE;
+}")
+
+;; Now match both normal and inverted jump.
+
+(define_insn "*branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(reg 17) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ static char instruction[40];
+ sprintf (instruction, \"%s%s %%l0\",
+ (GET_CODE (operands[1]) == NE) ? \"bc\" : \"bnc\",
+ (get_attr_length (insn) == 2) ? \".s\" : \"\");
+ return instruction;
+}"
+ [(set_attr "type" "branch")
+ ; We use 400/800 instead of 512,1024 to account for inaccurate insn
+ ; lengths and insn alignments that are complex to track.
+ ; It's not important that we be hyper-precise here. It may be more
+ ; important blah blah blah when the chip supports parallel execution
+ ; blah blah blah but until then blah blah blah this is simple and
+ ; suffices.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 400))
+ (const_int 800))
+ (const_int 2)
+ (const_int 4)))])
+
+(define_insn "*rev_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(reg 17) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ;"REVERSIBLE_CC_MODE (GET_MODE (XEXP (operands[1], 0)))"
+ ""
+ "*
+{
+ static char instruction[40];
+ sprintf (instruction, \"%s%s %%l0\",
+ (GET_CODE (operands[1]) == EQ) ? \"bc\" : \"bnc\",
+ (get_attr_length (insn) == 2) ? \".s\" : \"\");
+ return instruction;
+}"
+ [(set_attr "type" "branch")
+ ; We use 400/800 instead of 512,1024 to account for inaccurate insn
+ ; lengths and insn alignments that are complex to track.
+ ; It's not important that we be hyper-precise here. It may be more
+ ; important blah blah blah when the chip supports parallel execution
+ ; blah blah blah but until then blah blah blah this is simple and
+ ; suffices.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 400))
+ (const_int 800))
+ (const_int 2)
+ (const_int 4)))])
+
+; reg/reg compare and branch insns
+
+(define_insn "*reg_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "register_operand" "r")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ /* Is branch target reachable with beq/bne? */
+ if (get_attr_length (insn) == 4)
+ {
+ if (GET_CODE (operands[1]) == EQ)
+ return \"beq %2,%3,%l0\";
+ else
+ return \"bne %2,%3,%l0\";
+ }
+ else
+ {
+ if (GET_CODE (operands[1]) == EQ)
+ return \"bne %2,%3,1f\;bra %l0\;1:\";
+ else
+ return \"beq %2,%3,1f\;bra %l0\;1:\";
+ }
+}"
+ [(set_attr "type" "branch")
+ ; We use 25000/50000 instead of 32768/65536 to account for slot filling
+ ; which is complex to track and inaccurate length specs.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 25000))
+ (const_int 50000))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn "*rev_reg_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (match_operand:SI 3 "register_operand" "r")])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ /* Is branch target reachable with beq/bne? */
+ if (get_attr_length (insn) == 4)
+ {
+ if (GET_CODE (operands[1]) == NE)
+ return \"beq %2,%3,%l0\";
+ else
+ return \"bne %2,%3,%l0\";
+ }
+ else
+ {
+ if (GET_CODE (operands[1]) == NE)
+ return \"bne %2,%3,1f\;bra %l0\;1:\";
+ else
+ return \"beq %2,%3,1f\;bra %l0\;1:\";
+ }
+}"
+ [(set_attr "type" "branch")
+ ; We use 25000/50000 instead of 32768/65536 to account for slot filling
+ ; which is complex to track and inaccurate length specs.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 25000))
+ (const_int 50000))
+ (const_int 4)
+ (const_int 8)))])
+
+; reg/zero compare and branch insns
+
+(define_insn "*zero_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "signed_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ char *br,*invbr;
+ char asmtext[40];
+
+ switch (GET_CODE (operands[1]))
+ {
+ case EQ : br = \"eq\"; invbr = \"ne\"; break;
+ case NE : br = \"ne\"; invbr = \"eq\"; break;
+ case LE : br = \"le\"; invbr = \"gt\"; break;
+ case GT : br = \"gt\"; invbr = \"le\"; break;
+ case LT : br = \"lt\"; invbr = \"ge\"; break;
+ case GE : br = \"ge\"; invbr = \"lt\"; break;
+ }
+
+ /* Is branch target reachable with bxxz? */
+ if (get_attr_length (insn) == 4)
+ {
+ sprintf (asmtext, \"b%sz %%2,%%l0\", br);
+ output_asm_insn (asmtext, operands);
+ }
+ else
+ {
+ sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", invbr);
+ output_asm_insn (asmtext, operands);
+ }
+ return \"\";
+}"
+ [(set_attr "type" "branch")
+ ; We use 25000/50000 instead of 32768/65536 to account for slot filling
+ ; which is complex to track and inaccurate length specs.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 25000))
+ (const_int 50000))
+ (const_int 4)
+ (const_int 8)))])
+
+(define_insn "*rev_zero_branch_insn"
+ [(set (pc)
+ (if_then_else (match_operator 1 "eqne_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "r")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ char *br,*invbr;
+ char asmtext[40];
+
+ switch (GET_CODE (operands[1]))
+ {
+ case EQ : br = \"eq\"; invbr = \"ne\"; break;
+ case NE : br = \"ne\"; invbr = \"eq\"; break;
+ case LE : br = \"le\"; invbr = \"gt\"; break;
+ case GT : br = \"gt\"; invbr = \"le\"; break;
+ case LT : br = \"lt\"; invbr = \"ge\"; break;
+ case GE : br = \"ge\"; invbr = \"lt\"; break;
+ }
+
+ /* Is branch target reachable with bxxz? */
+ if (get_attr_length (insn) == 4)
+ {
+ sprintf (asmtext, \"b%sz %%2,%%l0\", invbr);
+ output_asm_insn (asmtext, operands);
+ }
+ else
+ {
+ sprintf (asmtext, \"b%sz %%2,1f\;bra %%l0\;1:\", br);
+ output_asm_insn (asmtext, operands);
+ }
+ return \"\";
+}"
+ [(set_attr "type" "branch")
+ ; We use 25000/50000 instead of 32768/65536 to account for slot filling
+ ; which is complex to track and inaccurate length specs.
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 25000))
+ (const_int 50000))
+ (const_int 4)
+ (const_int 8)))])
+
+;; S<cc> operations to set a register to 1/0 based on a comparison
+
+(define_expand "seq"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (TARGET_M32RX)
+ {
+ if (! reg_or_zero_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_seq_insn_m32rx (op0, op1, op2));
+ DONE;
+ }
+
+ if (GET_CODE (op2) == CONST_INT && INTVAL (op2) == 0)
+ {
+ emit_insn (gen_seq_zero_insn (op0, op1));
+ DONE;
+ }
+
+ if (! reg_or_eq_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_seq_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "seq_insn_m32rx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "reg_or_zero_operand" "rP")))
+ (clobber (reg:SI 17))]
+ "TARGET_M32RX"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_zero_operand" "")))
+ (clobber (reg:SI 17))]
+ "TARGET_M32RX"
+ [(set (reg:SI 17)
+ (eq:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))]
+ "")
+
+(define_insn "seq_zero_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:SI 17))]
+ "TARGET_M32R"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:SI 17))]
+ "TARGET_M32R"
+ [(match_dup 3)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+
+ start_sequence ();
+ emit_insn (gen_cmp_ltusi_insn (op1, GEN_INT (1)));
+ emit_insn (gen_movcc_insn (op0));
+ operands[3] = gen_sequence ();
+ end_sequence ();
+}")
+
+(define_insn "seq_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,??r,r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_eq_int16_operand" "r,r,r,PK")))
+ (clobber (reg:SI 17))
+ (clobber (match_scratch:SI 3 "=1,2,&r,r"))]
+ "TARGET_M32R"
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,8,10,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_eq_int16_operand" "")))
+ (clobber (reg:SI 17))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_M32R && reload_completed"
+ [(match_dup 4)]
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+ rtx op2 = operands[2];
+ rtx op3 = operands[3];
+ HOST_WIDE_INT value;
+
+ if (GET_CODE (op2) == REG && GET_CODE (op3) == REG
+ && REGNO (op2) == REGNO (op3))
+ {
+ op1 = operands[2];
+ op2 = operands[1];
+ }
+
+ start_sequence ();
+ if (GET_CODE (op1) == REG && GET_CODE (op3) == REG
+ && REGNO (op1) != REGNO (op3))
+ {
+ emit_move_insn (op3, op1);
+ op1 = op3;
+ }
+
+ if (GET_CODE (op2) == CONST_INT && (value = INTVAL (op2)) != 0
+ && CMP_INT16_P (value))
+ emit_insn (gen_addsi3 (op3, op1, GEN_INT (-value)));
+ else
+ emit_insn (gen_xorsi3 (op3, op1, op2));
+
+ emit_insn (gen_cmp_ltusi_insn (op3, GEN_INT (1)));
+ emit_insn (gen_movcc_insn (op0));
+ operands[4] = gen_sequence ();
+ end_sequence ();
+}")
+
+(define_expand "sne"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (GET_CODE (op2) != CONST_INT
+ || (INTVAL (op2) != 0 && UINT16_P (INTVAL (op2))))
+ {
+ rtx reg;
+
+ if (reload_completed || reload_in_progress)
+ FAIL;
+
+ reg = gen_reg_rtx (SImode);
+ emit_insn (gen_xorsi3 (reg, op1, op2));
+ op1 = reg;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ emit_insn (gen_sne_zero_insn (op0, op1));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "sne_zero_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:SI 17))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:SI 17))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (const_int 0))
+ (set (reg:SI 17)
+ (ltu:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))]
+ "")
+
+(define_expand "slt"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_slt_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "slt_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4,6")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lt:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ ""
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))]
+ "")
+
+(define_expand "sle"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (GET_CODE (op2) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (op2);
+ if (value >= 2147483647)
+ {
+ emit_move_insn (op0, GEN_INT (1));
+ DONE;
+ }
+
+ op2 = GEN_INT (value+1);
+ if (value < -32768 || value >= 32767)
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_slt_insn (op0, op1, op2));
+ DONE;
+ }
+
+ if (! register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sle_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sle_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (le:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (le:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:SI 17))]
+ "!optimize_size"
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (le:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:SI 17))]
+ "optimize_size"
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_expand "sgt"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_slt_insn (op0, op2, op1));
+ DONE;
+}")
+
+(define_expand "sge"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sge_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sge_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ge:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ge:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ "!optimize_size"
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ge:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ "optimize_size"
+ [(set (reg:SI 17)
+ (lt:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_expand "sltu"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sltu_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sltu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ltu:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "6,8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ltu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ ""
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))]
+ "")
+
+(define_expand "sleu"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (GET_CODE (op2) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (op2);
+ if (value >= 2147483647)
+ {
+ emit_move_insn (op0, GEN_INT (1));
+ DONE;
+ }
+
+ op2 = GEN_INT (value+1);
+ if (value < 0 || value >= 32767)
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sltu_insn (op0, op1, op2));
+ DONE;
+ }
+
+ if (! register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sleu_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sleu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (leu:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (leu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:SI 17))]
+ "!optimize_size"
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (leu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:SI 17))]
+ "optimize_size"
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 2)
+ (match_dup 1)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_expand "sgtu"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! register_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sltu_insn (op0, op2, op1));
+ DONE;
+}")
+
+(define_expand "sgeu"
+ [(match_operand:SI 0 "register_operand" "")]
+ ""
+ "
+{
+ rtx op0 = operands[0];
+ rtx op1 = m32r_compare_op0;
+ rtx op2 = m32r_compare_op1;
+ enum machine_mode mode = GET_MODE (op0);
+
+ if (mode != SImode)
+ FAIL;
+
+ if (! register_operand (op1, mode))
+ op1 = force_reg (mode, op1);
+
+ if (! reg_or_int16_operand (op2, mode))
+ op2 = force_reg (mode, op2);
+
+ emit_insn (gen_sgeu_insn (op0, op1, op2));
+ DONE;
+}")
+
+(define_insn "sgeu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (geu:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "reg_or_int16_operand" "r,J")))
+ (clobber (reg:SI 17))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8,10")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (geu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ "!optimize_size"
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (xor:SI (match_dup 0)
+ (const_int 1)))]
+ "")
+
+;; If optimizing for space, use -(reg - 1) to invert the comparison rather than
+;; xor reg,reg,1 which might eliminate a NOP being inserted.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (geu:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "reg_or_int16_operand" "")))
+ (clobber (reg:SI 17))]
+ "optimize_size"
+ [(set (reg:SI 17)
+ (ltu:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (reg:SI 17))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (set (match_dup 0)
+ (neg:SI (match_dup 0)))]
+ "")
+
+(define_insn "movcc_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (reg:SI 17))]
+ ""
+ "mvfc %0, cbr"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2")])
+
+
+;; Unconditional and other jump instructions.
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "bra %l0"
+ [(set_attr "type" "uncond_branch")
+ (set (attr "length") (if_then_else (ltu (plus (minus (match_dup 0) (pc))
+ (const_int 400))
+ (const_int 800))
+ (const_int 2)
+ (const_int 4)))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
+ ""
+ "jmp %a0"
+ [(set_attr "type" "uncond_branch")
+ (set_attr "length" "2")])
+
+(define_insn "return"
+ [(return)]
+ "direct_return ()"
+ "jmp lr"
+ [(set_attr "type" "uncond_branch")
+ (set_attr "length" "2")])
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp %a0"
+ [(set_attr "type" "uncond_branch")
+ (set_attr "length" "2")])
+
+(define_expand "call"
+ ;; operands[1] is stack_size_rtx
+ ;; operands[2] is next_arg_register
+ [(parallel [(call (match_operand:SI 0 "call_operand" "")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 14))])]
+ ""
+ "")
+
+(define_insn "*call_via_reg"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "r"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "jl %0"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")])
+
+(define_insn "*call_via_label"
+ [(call (mem:SI (match_operand:SI 0 "call_address_operand" ""))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+{
+ int call26_p = call26_operand (operands[0], FUNCTION_MODE);
+
+ if (! call26_p)
+ {
+ /* We may not be able to reach with a `bl' insn so punt and leave it to
+ the linker.
+ We do this here, rather than doing a force_reg in the define_expand
+ so these insns won't be separated, say by scheduling, thus simplifying
+ the linker. */
+ return \"seth r14,%T0\;add3 r14,r14,%B0\;jl r14\";
+ }
+ else
+ return \"bl %0\";
+}"
+ [(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq (symbol_ref "call26_operand (operands[0], FUNCTION_MODE)")
+ (const_int 0))
+ (const_int 12) ; 10 + 2 for nop filler
+ ; The return address must be on a 4 byte boundary so
+ ; there's no point in using a value of 2 here. A 2 byte
+ ; insn may go in the left slot but we currently can't
+ ; use such knowledge.
+ (const_int 4)))])
+
+(define_expand "call_value"
+ ;; operand 2 is stack_size_rtx
+ ;; operand 3 is next_arg_register
+ [(parallel [(set (match_operand 0 "register_operand" "=r")
+ (call (match_operand:SI 1 "call_operand" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 14))])]
+ ""
+ "")
+
+(define_insn "*call_value_via_reg"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 14))]
+ ""
+ "jl %1"
+ [(set_attr "type" "call")
+ (set_attr "length" "2")])
+
+(define_insn "*call_value_via_label"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "call_address_operand" ""))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 14))]
+ ""
+ "*
+{
+ int call26_p = call26_operand (operands[1], FUNCTION_MODE);
+
+ if (! call26_p)
+ {
+ /* We may not be able to reach with a `bl' insn so punt and leave it to
+ the linker.
+ We do this here, rather than doing a force_reg in the define_expand
+ so these insns won't be separated, say by scheduling, thus simplifying
+ the linker. */
+ return \"seth r14,%T1\;add3 r14,r14,%B1\;jl r14\";
+ }
+ else
+ return \"bl %1\";
+}"
+ [(set_attr "type" "call")
+ (set (attr "length")
+ (if_then_else (eq (symbol_ref "call26_operand (operands[1], FUNCTION_MODE)")
+ (const_int 0))
+ (const_int 12) ; 10 + 2 for nop filler
+ ; The return address must be on a 4 byte boundary so
+ ; there's no point in using a value of 2 here. A 2 byte
+ ; insn may go in the left slot but we currently can't
+ ; use such knowledge.
+ (const_int 4)))])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "")
+
+;; Special pattern to flush the icache.
+
+(define_insn "flush_icache"
+ [(unspec_volatile [(match_operand 0 "memory_operand" "m")] 0)]
+ ""
+ "* return \"nop ; flush-icache\";"
+ [(set_attr "type" "int2")
+ (set_attr "length" "2")])
+
+;; Speed up fabs and provide correct sign handling for -0
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=r")
+ (abs:DF (match_operand:DF 1 "register_operand" "0")))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (abs:DF (match_operand:DF 1 "register_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (ashift:SI (match_dup 2)
+ (const_int 1)))
+ (set (match_dup 2)
+ (lshiftrt:SI (match_dup 2)
+ (const_int 1)))]
+ "operands[2] = gen_highpart (SImode, operands[0]);")
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (abs:SF (match_operand:SF 1 "register_operand" "0")))]
+ ""
+ "#"
+ [(set_attr "type" "multi")
+ (set_attr "length" "4")])
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (abs:SF (match_operand:SF 1 "register_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (ashift:SI (match_dup 2)
+ (const_int 1)))
+ (set (match_dup 2)
+ (lshiftrt:SI (match_dup 2)
+ (const_int 1)))]
+ "operands[2] = gen_highpart (SImode, operands[0]);")
+
+
+;; These patterns are currently commented out as the customer does
+;; not wish the compiler to generate code using instructions which
+;; are not yet open to the public.
+
+;; ;; Move of SImode integer. Currently we can handle register to register moves
+;; ;; and constant into register moves.
+;;
+;; (define_expand "movsicc"
+;; [
+;; (set (match_operand:SI 0 "register_operand" "r,r,r,r")
+;; (if_then_else:SI (match_operand 1 "" "")
+;; (match_operand:SI 2 "conditional_move_operand" "r,r,P,P")
+;; (match_operand:SI 3 "conditional_move_operand" "r,P,r,P")
+;; )
+;; )
+;; ]
+;; ""
+;; "
+;; {
+;; if (! TARGET_M32RX && ! zero_and_one (operands [2], operands [3]))
+;; FAIL;
+;;
+;; /* Generate the comparision that will set the carry flag. */
+;; operands[1] = gen_compare ((int)GET_CODE (operands[1]), m32r_compare_op0, m32r_compare_op1);
+;;
+;; /* If the conditional move is putting either the constant 0 or
+;; the constant 1 into the destination register the RTL to do
+;; this will be optimised away by the combine pass (because
+;; cmp_ne_small_const_insn claims that it can do everything in
+;; one pattern, when this is not really true). Catch this case
+;; here and generate a block to prevent the combiner from
+;; optimising us away.. */
+;; if (zero_and_one (operands [2], operands [3]))
+;; emit_insn (gen_blockage());
+;; }")
+;;
+;; ;; Generate the conditional instructions based on how the carry flag is examined.
+;; (define_insn "*movsicc_internal"
+;; [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+;; (if_then_else:SI (match_operand 1 "carry_compare_operand" "")
+;; (match_operand:SI 2 "conditional_move_operand" "r,r,P,P")
+;; (match_operand:SI 3 "conditional_move_operand" "r,P,r,P")
+;; )
+;; )]
+;; "TARGET_M32RX || zero_and_one (operands [2], operands[3])"
+;; "* return emit_cond_move (operands, insn);"
+;; [(set_attr "type" "multi")
+;; (set_attr "length" "8")]
+;; )
+
+;; The movhicc and movqicc patterns are not here because
+;; PROMOTE_MODE() will porbably turn any such references into
+;; SImode operations. If they are ever needed then they are
+;; easy to add since they are just duplications of the above
+;; two patterns with SImode replaced by HImode or QImode.
+;;
+;; ;; Conditional Execution
+;; ;; This code is based on the code used for the d10v
+;;
+;; (define_insn "*cond_exec_si_binary_true"
+;; [(if_then_else
+;; (match_operand 3 "conditional_compare_operand" "")
+;; (set (match_operand:SI 0 "register_operand" "=r,r")
+;; (match_operator:SI 4 "binary_parallel_operator"
+;; [(match_operand:SI 1 "register_operand" "0,0")
+;; (match_operand:SI 2 "nonmemory_operand" "r,I")
+;; ]
+;; )
+;; )
+;; (const_int 0)
+;; )]
+;; "TARGET_M32RX"
+;; "* return emit_binary_cond_exec (operands, TRUE);"
+;; [(set_attr "type" "multi")
+;; (set_attr "length" "8")]
+;; )
+;;
+;; (define_insn "*cond_exec_si_binary_false"
+;; [(if_then_else
+;; (match_operand 3 "conditional_compare_operand" "")
+;; (const_int 0)
+;; (set (match_operand:SI 0 "register_operand" "=r,r")
+;; (match_operator:SI 4 "binary_parallel_operator"
+;; [(match_operand:SI 1 "register_operand" "0,0")
+;; (match_operand:SI 2 "nonmemory_operand" "r,I")
+;; ]
+;; )
+;; )
+;; )]
+;; "TARGET_M32RX"
+;; "* return emit_binary_cond_exec (operands, FALSE);"
+;; [(set_attr "type" "multi")
+;; (set_attr "length" "8")]
+;; )
+;;
+;; (define_insn "*cond_exec_si_unary_true"
+;; [(if_then_else
+;; (match_operand 2 "conditional_compare_operand" "")
+;; (set (match_operand:SI 0 "register_operand" "=r")
+;; (match_operator:SI 3 "unary_parallel_operator"
+;; [(match_operand:SI 1 "register_operand" "r")]
+;; )
+;; )
+;; (const_int 0)
+;; )]
+;; "TARGET_M32RX"
+;; "* return emit_unary_cond_exec (operands, TRUE);"
+;; [(set_attr "type" "multi")
+;; (set_attr "length" "8")]
+;; )
+;;
+;; (define_insn "*cond_exec_si_unary_false"
+;; [(if_then_else
+;; (match_operand 2 "conditional_compare_operand" "")
+;; (const_int 0)
+;; (set (match_operand:SI 0 "register_operand" "=r")
+;; (match_operator:SI 3 "unary_parallel_operator"
+;; [(match_operand:SI 1 "register_operand" "r")]
+;; )
+;; )
+;; )]
+;; "TARGET_M32RX"
+;; "* return emit_unary_cond_exec (operands, FALSE);"
+;; [(set_attr "type" "multi")
+;; (set_attr "length" "8")]
+;; )
+;;
+
+;; END CYGNUS LOCAL -- meissner/m32r work
+
+;; Block moves, see m32r.c for more details.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movstrsi"
+ [(parallel [(set (match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" ""))
+ (use (match_operand:SI 2 "immediate_operand" ""))
+ (use (match_operand:SI 3 "immediate_operand" ""))])]
+ ""
+ "
+{
+ if (operands[0]) /* avoid unused code messages */
+ {
+ m32r_expand_block_move (operands);
+ DONE;
+ }
+}")
+
+;; Insn generated by block moves
+
+(define_insn "movstrsi_internal"
+ [(set (mem:BLK (match_operand:SI 0 "register_operand" "r")) ;; destination
+ (mem:BLK (match_operand:SI 1 "register_operand" "r"))) ;; source
+ (use (match_operand:SI 2 "m32r_block_immediate_operand" "J"));; # bytes to move
+ (set (match_dup 0) (plus:SI (match_dup 0) (minus:SI (match_dup 2) (const_int 4))))
+ (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=&r")) ;; temp 1
+ (clobber (match_scratch:SI 4 "=&r"))] ;; temp 2
+ ""
+ "* return m32r_output_block_move (insn, operands);"
+ [(set_attr "type" "store8")
+ (set_attr "length" "72")]) ;; Maximum
diff --git a/gcc/config/m32r/t-m32r b/gcc/config/m32r/t-m32r
new file mode 100755
index 0000000..1f6e3d7
--- /dev/null
+++ b/gcc/config/m32r/t-m32r
@@ -0,0 +1,70 @@
+# lib1funcs.asm is currently empty.
+CROSS_LIBGCC1 =
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+# Turn off the SDA while compiling libgcc2. There are no headers for it
+# and we want maximal upward compatibility here.
+
+TARGET_LIBGCC2_CFLAGS = -G 0
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+# We need to use -fpic when we are using gcc to compile the routines in
+# initfini.c. This is only really needed when we are going to use gcc/g++
+# to produce a shared library, but since we don't know ahead of time when
+# we will be doing that, we just always use -fpic when compiling the
+# routines in initfini.c.
+# -fpic currently isn't supported for the m32r.
+
+CRTSTUFF_T_CFLAGS =
+
+# .init/.fini section routines
+
+crtinit.o: $(srcdir)/config/m32r/initfini.c $(GCC_PASSES) $(CONFIG_H)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(CRTSTUFF_T_CFLAGS) \
+ -DCRT_INIT -finhibit-size-directive -fno-inline-functions \
+ -g0 -c $(srcdir)/config/m32r/initfini.c -o crtinit.o
+
+crtfini.o: $(srcdir)/config/m32r/initfini.c $(GCC_PASSES) $(CONFIG_H)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(CRTSTUFF_T_CFLAGS) \
+ -DCRT_FINI -finhibit-size-directive -fno-inline-functions \
+ -g0 -c $(srcdir)/config/m32r/initfini.c -o crtfini.o
+
+m32rx/crtinit.o: $(srcdir)/config/m32r/initfini.c $(GCC_PASSES) $(CONFIG_H)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(CRTSTUFF_T_CFLAGS) \
+ -DCRT_INIT -finhibit-size-directive -fno-inline-functions \
+ -g0 -c $(srcdir)/config/m32r/initfini.c -m32rx -o m32rx/crtinit.o
+
+m32rx/crtfini.o: $(srcdir)/config/m32r/initfini.c $(GCC_PASSES) $(CONFIG_H)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(CRTSTUFF_T_CFLAGS) \
+ -DCRT_FINI -finhibit-size-directive -fno-inline-functions \
+ -g0 -c $(srcdir)/config/m32r/initfini.c -m32rx -o m32rx/crtfini.o
+
+# -mmodel={small,medium} requires separate libraries.
+# We don't build libraries for the large model, instead we use the medium
+# libraries. The only difference is that the large model can handle jumps
+# more than 26 signed bits away.
+
+MULTILIB_OPTIONS = mmodel=small/mmodel=medium
+MULTILIB_DIRNAMES = small medium
+MULTILIB_MATCHES = mmodel?medium=mmodel?large
+
+MULTILIB_OPTIONS = mmodel=small/mmodel=medium m32r/m32rx
+MULTILIB_DIRNAMES = small medium m32r m32rx
+
+# Set MULTILIB_EXTRA_OPTS so shipped libraries have small data in .sdata and
+# SHN_M32R_SCOMMON.
+# This is important for objects referenced in system header files.
+MULTILIB_EXTRA_OPTS = msdata=sdata
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc/config/m32r/xm-m32r.h b/gcc/config/m32r/xm-m32r.h
new file mode 100755
index 0000000..57100c8
--- /dev/null
+++ b/gcc/config/m32r/xm-m32r.h
@@ -0,0 +1,47 @@
+/* Configuration for GNU C-compiler for the M32R processor.
+ Copyright (C) 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Doubles are stored in memory with the high order word first. This
+ matters when cross-compiling. */
+#define HOST_WORDS_BIG_ENDIAN 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* If compiled with Sun CC, the use of alloca requires this #include. */
+#ifndef __GNUC__
+#include "alloca.h"
+#endif