summaryrefslogtreecommitdiff
path: root/gcc/config/convex
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/convex')
-rwxr-xr-xgcc/config/convex/convex.c675
-rwxr-xr-xgcc/config/convex/convex.h1503
-rwxr-xr-xgcc/config/convex/convex.md1885
-rwxr-xr-xgcc/config/convex/fixinc.convex416
-rwxr-xr-xgcc/config/convex/x-convex5
-rwxr-xr-xgcc/config/convex/xm-convex.h48
6 files changed, 4532 insertions, 0 deletions
diff --git a/gcc/config/convex/convex.c b/gcc/config/convex/convex.c
new file mode 100755
index 0000000..cd2eb55
--- /dev/null
+++ b/gcc/config/convex/convex.c
@@ -0,0 +1,675 @@
+/* Subroutines for insn-output.c for Convex.
+ Copyright (C) 1988, 1993, 1994, 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 1, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include <stdio.h>
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "output.h"
+#include "expr.h"
+
+/* Tables used in convex.h */
+
+char regno_ok_for_index_p_base[1 + LAST_VIRTUAL_REGISTER + 1];
+enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER];
+enum reg_class reg_class_from_letter[256];
+
+/* Target cpu index. */
+
+int target_cpu;
+
+/* Boolean to keep track of whether the current section is .text or not.
+ Used by .align handler in convex.h. */
+
+int current_section_is_text;
+
+/* Communication between output_compare and output_condjump. */
+
+static rtx cmp_operand0, cmp_operand1;
+static char cmp_modech;
+
+/* Forwards */
+
+static rtx frame_argblock;
+static int frame_argblock_size;
+static rtx convert_arg_pushes ();
+static void expand_movstr_call ();
+
+/* Here from OVERRIDE_OPTIONS at startup. Initialize constant tables. */
+
+init_convex ()
+{
+ int regno;
+
+ /* Set A and S reg classes. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (A_REGNO_P (regno))
+ {
+ regno_ok_for_index_p[regno] = 1;
+ regno_reg_class[regno] = INDEX_REGS;
+ }
+ else
+ {
+ regno_ok_for_index_p[regno] = 0;
+ regno_reg_class[regno] = S_REGS;
+ }
+
+ /* Can't index off the stack pointer, register 0. */
+ regno_ok_for_index_p[STACK_POINTER_REGNUM] = 0;
+ regno_reg_class[STACK_POINTER_REGNUM] = SP_REGS;
+
+ /* Can't index off aliases of the stack pointer. */
+ regno_ok_for_index_p[VIRTUAL_INCOMING_ARGS_REGNUM] = 1;
+ regno_ok_for_index_p[VIRTUAL_STACK_VARS_REGNUM] = 1;
+ regno_ok_for_index_p[VIRTUAL_STACK_DYNAMIC_REGNUM] = 0;
+ regno_ok_for_index_p[VIRTUAL_OUTGOING_ARGS_REGNUM] = 0;
+
+ /* Can't index off hard reg -1 == pseudos not assigned */
+ regno_ok_for_index_p[-1] = 0;
+
+ /* Set reg class letters */
+ reg_class_from_letter['a'] = A_REGS;
+ reg_class_from_letter['A'] = INDEX_REGS;
+ reg_class_from_letter['d'] = S_REGS;
+
+ /* Turn off floating point exception enables in the psw. */
+ psw_disable_float ();
+}
+
+psw_disable_float ()
+{
+#if __convex__ && __GNUC__
+ register int *p;
+ asm ("mov fp,%0" : "=a" (p));
+ while (p)
+ {
+ p[1] &= ~0x1000c400;
+ p = (int *) p[2];
+ }
+#endif
+}
+
+/* Here to output code for a compare insn. Output nothing, just
+ record the operands and their mode. */
+
+char *
+output_cmp (operand0, operand1, modech)
+ rtx operand0, operand1;
+ char modech;
+{
+ cmp_operand0 = operand0;
+ cmp_operand1 = operand1;
+ cmp_modech = modech;
+ return "";
+}
+
+/* Output code for a conditional jump. The preceding instruction
+ is necessarily a compare. Output two instructions, for example
+ eq.w a1,a2
+ jbra.t L5
+ for
+ (cmpsi a1 a2)
+ (beq L5)
+ */
+
+char *
+output_condjump (label, cond, jbr_sense)
+ rtx label;
+ char *cond;
+ char jbr_sense;
+{
+ rtx operands[3];
+ char cmp_op[4];
+ char buf[80];
+ char jbr_regch;
+
+ strcpy (cmp_op, cond);
+
+ /* [BL] mean the value is being compared against immediate 0.
+ Use neg.x, which produces the same carry that eq.x #0 would if it
+ existed. In this case operands[1] is a scratch register, not a
+ compare operand. */
+
+ if (cmp_modech == 'B' || cmp_modech == 'L')
+ {
+ cmp_modech = cmp_modech - 'A' + 'a';
+ strcpy (cmp_op, "neg");
+ }
+
+ /* [WH] mean the value being compared resulted from "add.[wh] #-1,rk"
+ when rk was nonnegative -- we can omit equality compares against -1
+ or inequality compares against 0. */
+
+ else if (cmp_modech == 'W' || cmp_modech == 'H')
+ {
+ if (! strcmp (cmp_op, "eq") && cmp_operand1 == constm1_rtx)
+ jbr_sense ^= 't' ^ 'f';
+ else if (! strcmp (cmp_op, "lt") && cmp_operand1 == const0_rtx)
+ ;
+ else
+ cmp_modech = cmp_modech - 'A' + 'a';
+ }
+
+ /* Constant must be first; swap operands if necessary.
+ If lt, le, ltu, leu are swapped, change to le, lt, leu, ltu
+ and reverse the sense of the jump. */
+
+ if (! REG_P (cmp_operand1))
+ {
+ operands[0] = cmp_operand1;
+ operands[1] = cmp_operand0;
+ if (cmp_op[0] == 'l')
+ {
+ cmp_op[1] ^= 'e' ^ 't';
+ jbr_sense ^= 't' ^ 'f';
+ }
+ }
+ else
+ {
+ operands[0] = cmp_operand0;
+ operands[1] = cmp_operand1;
+ }
+
+ operands[2] = label;
+
+ if (S_REG_P (operands[1]))
+ jbr_regch = 's';
+ else if (A_REG_P (operands[1]))
+ jbr_regch = 'a';
+ else
+ abort ();
+
+ if (cmp_modech == 'W' || cmp_modech == 'H')
+ sprintf (buf, "jbr%c.%c %%l2", jbr_regch, jbr_sense);
+ else
+ sprintf (buf, "%s.%c %%0,%%1\n\tjbr%c.%c %%l2",
+ cmp_op, cmp_modech, jbr_regch, jbr_sense);
+ output_asm_insn (buf, operands);
+ return "";
+}
+
+/* Return 1 if OP is valid for cmpsf.
+ In IEEE mode, +/- zero compares are not handled by
+ the immediate versions of eq.s and on some machines, lt.s, and le.s.
+ So disallow 0.0 as the immediate operand of xx.s compares in IEEE mode. */
+
+int
+nonmemory_cmpsf_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+#if _IEEE_FLOAT_
+ if (op == CONST0_RTX (SFmode))
+ return 0;
+#endif
+
+ return nonmemory_operand (op, mode);
+}
+
+/* Convex /bin/as does not like unary minus in some contexts.
+ Simplify CONST addresses to remove it. */
+
+rtx
+simplify_for_convex (x)
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case MINUS:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) < 0)
+ {
+ PUT_CODE (x, PLUS);
+ XEXP (x, 1) = GEN_INT (- INTVAL (XEXP (x, 1)));
+ }
+ break;
+
+ case CONST:
+ return simplify_for_convex (XEXP (x, 0));
+ }
+
+ return x;
+}
+
+/* Routines to separate CONST_DOUBLEs into component parts. */
+
+int
+const_double_high_int (x)
+ rtx x;
+{
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return CONST_DOUBLE_LOW (x);
+ else
+ return CONST_DOUBLE_HIGH (x);
+}
+
+int
+const_double_low_int (x)
+ rtx x;
+{
+ if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ return CONST_DOUBLE_HIGH (x);
+ else
+ return CONST_DOUBLE_LOW (x);
+}
+
+/* Inline block copy. */
+
+void
+expand_movstr (operands)
+ rtx *operands;
+{
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ int align = INTVAL (operands[3]);
+ int nregs, maxsize;
+ unsigned len;
+ enum machine_mode mode;
+ rtx reg, load, store, prev_store, prev_store_2;
+ int size;
+
+ /* Decide how many regs to use, depending on load latency, and what
+ size pieces to move, depending on whether machine does unaligned
+ loads and stores efficiently. */
+
+ if (TARGET_C1)
+ {
+ /* ld.l latency is 4, no alignment problems. */
+ nregs = 3, maxsize = 8;
+ }
+ else if (TARGET_C2)
+ {
+ /* loads are latency 2 if we avoid ld.l not at least word aligned. */
+ if (align >= 4)
+ nregs = 2, maxsize = 8;
+ else
+ nregs = 2, maxsize = 4;
+ }
+ else if (TARGET_C34)
+ {
+ /* latency is 4 if aligned, horrible if not. */
+ nregs = 3, maxsize = align;
+ }
+ else if (TARGET_C38)
+ {
+ /* latency is 2 if at least word aligned, 3 or 4 if unaligned. */
+ if (align >= 4)
+ nregs = 2, maxsize = 8;
+ else
+ nregs = 3, maxsize = 8;
+ }
+ else
+ abort ();
+
+ /* Caller is not necessarily prepared for us to fail in this
+ expansion. So fall back by generating memcpy call here. */
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || (len = INTVAL (operands[2])) > (unsigned) 32 * maxsize)
+ {
+ expand_movstr_call (operands);
+ return;
+ }
+
+ reg = 0;
+ prev_store = prev_store_2 = 0;
+
+ while (len > 0)
+ {
+ if (len >= 8 && maxsize >= 8)
+ mode = DImode;
+ else if (len >= 4 && maxsize >= 4)
+ mode = SImode;
+ else if (len >= 2 && maxsize >= 2)
+ mode = HImode;
+ else
+ mode = QImode;
+
+ /* If no temp pseudo to reuse, or not the right mode, make one */
+ if (! reg || GET_MODE (reg) != mode)
+ reg = gen_reg_rtx (mode);
+
+ /* Get src and dest in the right mode */
+ if (GET_MODE (src) != mode)
+ src = change_address (src, mode, 0),
+ dest = change_address (dest, mode, 0);
+
+ /* Make load and store patterns for this piece */
+ load = gen_rtx (SET, VOIDmode, reg, src);
+ store = gen_rtx (SET, VOIDmode, dest, reg);
+
+ /* Emit the load and the store from last time.
+ When we emit a store, we can reuse its temp reg. */
+ emit_insn (load);
+ if (prev_store)
+ {
+ reg = SET_SRC (prev_store);
+ emit_insn (prev_store);
+ }
+ else
+ reg = 0;
+
+ /* Queue up the store, for next time or the time after that. */
+ if (nregs == 2)
+ prev_store = store;
+ else
+ prev_store = prev_store_2, prev_store_2 = store;
+
+ /* Advance to next piece. */
+ size = GET_MODE_SIZE (mode);
+ src = adj_offsettable_operand (src, size);
+ dest = adj_offsettable_operand (dest, size);
+ len -= size;
+ }
+
+ /* Finally, emit the last stores. */
+ if (prev_store)
+ emit_insn (prev_store);
+ if (prev_store_2)
+ emit_insn (prev_store_2);
+}
+
+static void
+expand_movstr_call (operands)
+ rtx *operands;
+{
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0,
+ VOIDmode, 3,
+ XEXP (operands[0], 0), Pmode,
+ XEXP (operands[1], 0), Pmode,
+ convert_to_mode (TYPE_MODE (sizetype), operands[2],
+ TREE_UNSIGNED (sizetype)),
+ TYPE_MODE (sizetype));
+}
+
+#if _IEEE_FLOAT_
+#define MAX_FLOAT 3.4028234663852886e+38
+#define MIN_FLOAT 1.1754943508222875e-38
+#else
+#define MAX_FLOAT 1.7014117331926443e+38
+#define MIN_FLOAT 2.9387358770557188e-39
+#endif
+
+int
+check_float_value (mode, dp, overflow)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE *dp;
+ int overflow;
+{
+ REAL_VALUE_TYPE d = *dp;
+
+ if (overflow)
+ {
+ *dp = MAX_FLOAT;
+ return 1;
+ }
+
+ if (mode == SFmode)
+ {
+ if (d > MAX_FLOAT)
+ {
+ *dp = MAX_FLOAT;
+ return 1;
+ }
+ else if (d < -MAX_FLOAT)
+ {
+ *dp = -MAX_FLOAT;
+ return 1;
+ }
+ else if ((d > 0 && d < MIN_FLOAT) || (d < 0 && d > -MIN_FLOAT))
+ {
+ *dp = 0.0;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Output the label at the start of a function.
+ Precede it with the number of formal args so debuggers will have
+ some idea of how many args to print. */
+
+void
+asm_declare_function_name (file, name, decl)
+ FILE *file;
+ char *name;
+ tree decl;
+{
+ tree parms;
+ int nargs = list_length (DECL_ARGUMENTS (decl));
+
+ char *p, c;
+ extern char *version_string;
+ static char vers[4];
+ int i;
+
+ p = version_string;
+ for (i = 0; i < 3; ) {
+ c = *p;
+ if (c - '0' < (unsigned) 10)
+ vers[i++] = c;
+ if (c == 0 || c == ' ')
+ vers[i++] = '0';
+ else
+ p++;
+ }
+ fprintf (file, "\tds.b \"g%s\"\n", vers);
+
+ if (nargs < 100)
+ fprintf (file, "\tds.b \"+%02d\\0\"\n", nargs);
+ else
+ fprintf (file, "\tds.b \"+00\\0\"\n");
+
+ ASM_OUTPUT_LABEL (file, name);
+}
+
+/* Print an instruction operand X on file FILE.
+ CODE is the code from the %-spec that requested printing this operand;
+ if `%z3' was used to print operand 3, then CODE is 'z'. */
+/* Convex codes:
+ %u prints a CONST_DOUBLE's high word
+ %v prints a CONST_DOUBLE's low word
+ %z prints a CONST_INT shift count as a multiply operand -- viz. 1 << n.
+ */
+
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ char code;
+{
+ long u[2];
+ REAL_VALUE_TYPE d;
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ break;
+
+ case MEM:
+ output_address (XEXP (x, 0));
+ break;
+
+ case CONST_DOUBLE:
+ REAL_VALUE_FROM_CONST_DOUBLE (d, x);
+ switch (GET_MODE (x)) {
+ case DFmode:
+#if 0 /* doesn't work, produces dfloats */
+ REAL_VALUE_TO_TARGET_DOUBLE (d, u);
+#else
+ {
+ union { double d; int i[2]; } t;
+ t.d = d;
+ u[0] = t.i[0];
+ u[1] = t.i[1];
+ }
+#endif
+ if (code == 'u')
+ fprintf (file, "#%#x", u[0]);
+ else if (code == 'v')
+ fprintf (file, "#%#x", u[1]);
+ else
+ outfloat (file, d, "%.17e", "#", "");
+ break;
+ case SFmode:
+ outfloat (file, d, "%.9e", "#", "");
+ break;
+ default:
+ if (code == 'u')
+ fprintf (file, "#%d", CONST_DOUBLE_HIGH (x));
+ else
+ fprintf (file, "#%d", CONST_DOUBLE_LOW (x));
+ }
+ break;
+
+ default:
+ if (code == 'z')
+ {
+ if (GET_CODE (x) != CONST_INT)
+ abort ();
+ fprintf (file, "#%d", 1 << INTVAL (x));
+ }
+ else
+ {
+ putc ('#', file);
+ output_addr_const (file, x);
+ }
+ }
+}
+
+/* Print a memory operand whose address is X, on file FILE. */
+
+print_operand_address (file, addr)
+ FILE *file;
+ rtx addr;
+{
+ rtx index = 0;
+ rtx offset = 0;
+
+ if (GET_CODE (addr) == MEM)
+ {
+ fprintf (file, "@");
+ addr = XEXP (addr, 0);
+ }
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ index = addr;
+ break;
+
+ case PLUS:
+ index = XEXP (addr, 0);
+ if (REG_P (index))
+ offset = XEXP (addr, 1);
+ else
+ {
+ offset = XEXP (addr, 0);
+ index = XEXP (addr, 1);
+ if (! REG_P (index))
+ abort ();
+ }
+ break;
+
+ default:
+ offset = addr;
+ break;
+ }
+
+ if (offset)
+ output_addr_const (file, offset);
+
+ if (index)
+ fprintf (file, "(%s)", reg_names[REGNO (index)]);
+}
+
+/* Output a float to FILE, value VALUE, format FMT, preceded by PFX
+ and followed by SFX. */
+
+outfloat (file, value, fmt, pfx, sfx)
+ FILE *file;
+ REAL_VALUE_TYPE value;
+ char *fmt, *pfx, *sfx;
+{
+ char buf[64];
+ fputs (pfx, file);
+ REAL_VALUE_TO_DECIMAL (value, fmt, buf);
+ fputs (buf, file);
+ fputs (sfx, file);
+}
+
+/* Here during RTL generation of return. If we are at the final return
+ in a function, go through the function and replace pushes with stores
+ into a frame arg block. This is similar to what ACCUMULATE_OUTGOING_ARGS
+ does, but we must index off the frame pointer, not the stack pointer,
+ and the calling sequence does not require the arg block to be at the
+ top of the stack. */
+
+replace_arg_pushes ()
+{
+ /* Doesn't work yet. */
+}
+
+/* Output the insns needed to do a call. operands[] are
+ 0 - MEM, the place to call
+ 1 - CONST_INT, the number of bytes in the arg list
+ 2 - CONST_INT, the number of arguments
+ 3 - CONST_INT, the number of bytes to pop
+ 4 - address of the arg list.
+ */
+
+char *
+output_call (insn, operands)
+ rtx insn, *operands;
+{
+ if (operands[4] == stack_pointer_rtx)
+ output_asm_insn ("mov sp,ap", operands);
+ else
+ abort ();
+
+ if (TARGET_ARGCOUNT)
+ output_asm_insn ("pshea %a2", operands);
+
+ output_asm_insn ("calls %0", operands);
+
+ output_asm_insn ("ld.w 12(fp),ap", operands);
+
+ if (operands[4] == stack_pointer_rtx && operands[3] != const0_rtx)
+ output_asm_insn ("add.w %3,sp", operands);
+
+ return "";
+}
+
+
+/* Here after reloading, before the second scheduling pass. */
+
+emit_ap_optimizations ()
+{
+ /* Removed for now. */
+}
+
diff --git a/gcc/config/convex/convex.h b/gcc/config/convex/convex.h
new file mode 100755
index 0000000..f455f96
--- /dev/null
+++ b/gcc/config/convex/convex.h
@@ -0,0 +1,1503 @@
+/* Definitions of target machine for GNU compiler. Convex version.
+ Copyright (C) 1988, 1994, 1995, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Standard GCC variables that we reference. */
+
+extern int target_flags;
+
+/* Convex machine-specific flags
+ -mc1 target instruction set, libraries, scheduling
+ -mc2
+ -mc32
+ -mc34
+ -mc38
+ -margcount use standard calling sequence, with arg count word
+ -mno-argcount don't push arg count, depend on symbol table
+ -margcount-nop place arg count in a nop instruction (faster than push)
+ -mvolatile-cache use data cache for volatile mem refs (default)
+ -mvolatile-nocache bypass data cache for volatile mem refs
+ -mlong32 cc- and libc-compatible 32-bit longs
+ -mlong64 64-bit longs
+*/
+
+/* Macro to define tables used to set -mXXX flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 0
+#endif
+
+#define TARGET_SWITCHES \
+ { { "c1", 001 }, \
+ { "c2", 002 }, \
+ { "c32", 004 }, \
+ { "c34", 010 }, \
+ { "c38", 020 }, \
+ { "argcount", 0100 }, \
+ { "argcount-nop", 0200 }, \
+ { "no-argcount", -0300 }, \
+ { "volatile-cache", -0400 }, \
+ { "no-volatile-cache", 0400 }, \
+ { "volatile-nocache", 0400 }, \
+ { "long64", 01000 }, \
+ { "long32", -01000 }, \
+ { "", TARGET_DEFAULT | TARGET_CPU_DEFAULT}}
+
+/* Macros used in the machine description to test the flags. */
+
+#define TARGET_C1 (target_cpu == 0)
+#define TARGET_C2 (target_cpu == 1)
+#define TARGET_C34 (target_cpu == 2)
+#define TARGET_C38 (target_cpu == 3)
+#define TARGET_ARGCOUNT (target_flags & 0100)
+#define TARGET_ARGCOUNT_NOP (target_flags & 0200)
+#define TARGET_LONG64 (target_flags & 01000)
+#define TARGET_VOLATILE_NOCACHE (target_flags & 0400)
+
+#define OVERRIDE_OPTIONS \
+{ \
+ init_convex (); \
+ if ((target_flags & 077) != ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & 077)) \
+ target_flags &= ~ (TARGET_DEFAULT | TARGET_CPU_DEFAULT); \
+ if (target_flags & 001) \
+ target_cpu = 0; \
+ else if (target_flags & 006) \
+ target_cpu = 1; \
+ else if (target_flags & 010) \
+ target_cpu = 2; \
+ else if (target_flags & 020) \
+ target_cpu = 3; \
+}
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define CPP_PREDEFINES "-Dconvex -Dunix -Asystem(unix) -Acpu(convex) -Amachine(convex)"
+
+/* Print subsidiary information on the compiler version in use. */
+
+#define TARGET_VERSION fprintf (stderr, " (convex)");
+
+/* Target-dependent specs.
+ Some libraries come in c1 and c2+ versions; use the appropriate ones.
+ Make a target-dependent __convex_cxx__ define to relay the target cpu
+ to the program being compiled. */
+
+#if (TARGET_DEFAULT | TARGET_CPU_DEFAULT) & 1
+
+/* C1 default */
+
+#if _IEEE_FLOAT_
+
+#define CPP_SPEC \
+"%{!mc2:%{!mc32:%{!mc34:%{!mc38:-D__convex_c1__}}}} \
+ %{mc2:-D__convex_c2__} \
+ %{mc32:-D__convex_c32__} \
+ %{mc34:-D__convex_c34__} \
+ %{mc38:-D__convex_c38__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_IEEE_FLOAT_ \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#else
+
+#define CPP_SPEC \
+"%{!mc2:%{!mc32:%{!mc34:%{!mc38:-D__convex_c1__}}}} \
+ %{mc2:-D__convex_c2__} \
+ %{mc32:-D__convex_c32__} \
+ %{mc34:-D__convex_c34__} \
+ %{mc38:-D__convex_c38__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_CONVEX_FLOAT_ \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#endif
+
+#define LIB_SPEC \
+"%{!mc2:%{!mc32:%{!mc34:%{!mc38:-lC1%{traditional:_old}%{p:_p}%{pg:_p}}}}} \
+ %{mc2:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc32:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ -lc%{traditional:_old}%{p:_p}%{pg:_p}"
+
+#endif
+
+#if (TARGET_DEFAULT | TARGET_CPU_DEFAULT) & 2
+
+/* C2 default */
+
+#if _IEEE_FLOAT_
+
+#define CPP_SPEC \
+"%{mc1:-D__convex_c1__} \
+ %{!mc1:%{!mc32:%{!mc34:%{!mc38:-D__convex_c2__}}}} \
+ %{mc32:-D__convex_c32__} \
+ %{mc34:-D__convex_c34__} \
+ %{mc38:-D__convex_c38__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_IEEE_FLOAT_ \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#else
+
+#define CPP_SPEC \
+"%{mc1:-D__convex_c1__} \
+ %{!mc1:%{!mc32:%{!mc34:%{!mc38:-D__convex_c2__}}}} \
+ %{mc32:-D__convex_c32__} \
+ %{mc34:-D__convex_c34__} \
+ %{mc38:-D__convex_c38__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_CONVEX_FLOAT_ \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#endif
+
+#define LIB_SPEC \
+"%{mc1:-lC1%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{!mc1:%{!mc32:%{!mc34:%{!mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}}}}} \
+ %{mc32:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ -lc%{traditional:_old}%{p:_p}%{pg:_p}"
+
+#endif
+
+#if (TARGET_DEFAULT | TARGET_CPU_DEFAULT) & 4
+
+/* C32 default */
+
+#if _IEEE_FLOAT_
+
+#define CPP_SPEC \
+"%{mc1:-D__convex_c1__} \
+ %{mc2:-D__convex_c2__} \
+ %{!mc1:%{!mc2:%{!mc34:%{!mc38:-D__convex_c32__}}}} \
+ %{mc34:-D__convex_c34__} \
+ %{mc38:-D__convex_c38__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_IEEE_FLOAT_ \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#else
+
+#define CPP_SPEC \
+"%{mc1:-D__convex_c1__} \
+ %{mc2:-D__convex_c2__} \
+ %{!mc1:%{!mc2:%{!mc34:%{!mc38:-D__convex_c32__}}}} \
+ %{mc34:-D__convex_c34__} \
+ %{mc38:-D__convex_c38__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_CONVEX_FLOAT_ \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#endif
+
+#define LIB_SPEC \
+"%{mc1:-lC1%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc2:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{!mc1:%{!mc2:%{!mc34:%{!mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}}}}} \
+ %{mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ -lc%{traditional:_old}%{p:_p}%{pg:_p}"
+
+#endif
+
+#if (TARGET_DEFAULT | TARGET_CPU_DEFAULT) & 010
+
+/* C34 default */
+
+#if _IEEE_FLOAT_
+
+#define CPP_SPEC \
+"%{mc1:-D__convex_c1__} \
+ %{mc2:-D__convex_c2__} \
+ %{mc32:-D__convex_c32__} \
+ %{!mc1:%{!mc2:%{!mc32:%{!mc38:-D__convex_c34__}}}} \
+ %{mc38:-D__convex_c38__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_IEEE_FLOAT_ \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#else
+
+#define CPP_SPEC \
+"%{mc1:-D__convex_c1__} \
+ %{mc2:-D__convex_c2__} \
+ %{mc32:-D__convex_c32__} \
+ %{!mc1:%{!mc2:%{!mc32:%{!mc38:-D__convex_c34__}}}} \
+ %{mc38:-D__convex_c38__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_CONVEX_FLOAT_ \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#endif
+
+#define LIB_SPEC \
+"%{mc1:-lC1%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc2:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc32:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{!mc1:%{!mc2:%{!mc32:%{!mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}}}}} \
+ %{mc38:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ -lc%{traditional:_old}%{p:_p}%{pg:_p}"
+
+#endif
+
+#if (TARGET_DEFAULT | TARGET_CPU_DEFAULT) & 020
+
+/* C38 default */
+
+#if _IEEE_FLOAT_
+
+#define CPP_SPEC \
+"%{mc1:-D__convex_c1__} \
+ %{mc2:-D__convex_c2__} \
+ %{mc32:-D__convex_c32__} \
+ %{mc34:-D__convex_c34__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_IEEE_FLOAT_ \
+ %{!mc1:%{!mc2:%{!mc32:%{!mc34:-D__convex_c38__}}}} \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#else
+
+#define CPP_SPEC \
+"%{mc1:-D__convex_c1__} \
+ %{mc2:-D__convex_c2__} \
+ %{mc32:-D__convex_c32__} \
+ %{mc34:-D__convex_c34__} \
+ %{fno-builtin:-D__NO_INLINE} \
+ -D__NO_INLINE_MATH -D__NO_INLINE_STDLIB \
+ -D_CONVEX_FLOAT_ \
+ %{!mc1:%{!mc2:%{!mc32:%{!mc34:-D__convex_c38__}}}} \
+ %{.S:-P} \
+ %{!traditional:-D__stdc__} \
+ %{!traditional:-D_LONGLONG} \
+ %{!traditional:-Ds64_t=long\\ long -Du64_t=unsigned\\ long\\ long} \
+ %{!ansi:-D_POSIX_SOURCE} \
+ %{!ansi:-D_CONVEX_SOURCE}"
+
+#endif
+
+#define LIB_SPEC \
+"%{mc1:-lC1%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc2:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc32:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}} \
+ %{!mc1:%{!mc2:%{!mc32:%{!mc34:-lC2%{traditional:_old}%{p:_p}%{pg:_p}}}}} \
+ -lc%{traditional:_old}%{p:_p}%{pg:_p}"
+
+#endif
+
+#if _IEEE_FLOAT_
+
+/* ieee default */
+
+#define ASM_SPEC "-fi"
+
+#define LINK_SPEC \
+"-E%{traditional:no}posix \
+ -X \
+ %{F} %{M*} %{y*} \
+ -fi \
+ -A__iob=___ap$iob \
+ -A_use_libc_sema=___ap$use_libc_sema \
+ %{traditional:-A___gcc_cleanup=__cleanup} \
+ %{!traditional:-A___gcc_cleanup=___ap$do_registered_functions} \
+ -L/usr/lib"
+
+#define STARTFILE_SPEC \
+"%{!pg:%{!p:/usr/lib/crt/crt0.o}} \
+ %{!pg:%{p:/usr/lib/crt/mcrt0.o}} \
+ %{pg:/usr/lib/crt/gcrt0.o} \
+ /usr/lib/crt/fpmode_i.o"
+
+#else
+
+/* native default */
+
+#define ASM_SPEC "-fn"
+
+#define LINK_SPEC \
+"-E%{traditional:no}posix \
+ -X \
+ %{F} %{M*} %{y*} \
+ -fn \
+ -A__iob=___ap$iob \
+ -A_use_libc_sema=___ap$use_libc_sema \
+ %{traditional:-A___gcc_cleanup=__cleanup} \
+ %{!traditional:-A___gcc_cleanup=___ap$do_registered_functions} \
+ -L/usr/lib"
+
+#define STARTFILE_SPEC \
+"%{!pg:%{!p:/usr/lib/crt/crt0.o}} \
+ %{!pg:%{p:/usr/lib/crt/mcrt0.o}} \
+ %{pg:/usr/lib/crt/gcrt0.o}"
+
+#endif
+
+/* Use /path/libgcc.a instead of -lgcc, makes bootstrap work more smoothly. */
+
+#define LINK_LIBGCC_SPECIAL_1
+
+/* Since IEEE support was added to gcc, most things seem to like it
+ better if we disable exceptions and check afterward for infinity. */
+
+#if __convex__
+#if _IEEE_FLOAT_
+#define REAL_VALUE_ISNAN(x) 0
+#define REAL_VALUE_ISINF(x) ((*(short *) &(x) & 0x7ff0) == 0x7ff0)
+#else
+#define REAL_VALUE_ISNAN(x) 0
+#define REAL_VALUE_ISINF(x) ((*(short *) &(x) & 0xfff0) == 0x8000)
+#endif
+#endif
+
+/* Target machine storage layout */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is numbered. */
+#define WORDS_BIG_ENDIAN 1
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 8
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY 64
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 16
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* No data type wants to be aligned rounder than this. */
+/* beware of doubles in structs -- 64 is incompatible with cc */
+#define BIGGEST_ALIGNMENT 32
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* Define sizes of basic C types to conform to ordinary usage -- these
+ types depend on BITS_PER_WORD otherwise. */
+#define CHAR_TYPE_SIZE 8
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE (TARGET_LONG64 ? 64 : 32)
+#define LONG_LONG_TYPE_SIZE 64
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+/* This prevents cexp.c from depending on LONG_TYPE_SIZE. */
+#define MAX_LONG_TYPE_SIZE 64
+
+/* Declare the standard types used by builtins to match convex stddef.h --
+ with int rather than long. */
+
+#define SIZE_TYPE "unsigned int"
+#define PTRDIFF_TYPE "int"
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+#define FIRST_PSEUDO_REGISTER 16
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ For Convex, these are AP, FP, and SP. */
+#define FIXED_REGISTERS \
+ { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1 }
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS \
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }
+
+/* List the order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS.
+ For Convex, put S0 (the return register) last. */
+#define REG_ALLOC_ORDER \
+ { 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 0, 8, 14, 15 }
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ On Convex, S registers can hold any type, A registers any nonfloat. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ (S_REGNO_P (REGNO) \
+ || (GET_MODE_SIZE (MODE) <= 4 && (MODE) != SFmode))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((GET_MODE_SIZE (MODE1) <= 4 && (MODE1) != SFmode) \
+ == (GET_MODE_SIZE (MODE2) <= 4 && (MODE2) != SFmode))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+#define S0_REGNUM 0
+#define A0_REGNUM 8
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM A0_REGNUM
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM (A0_REGNUM + 7)
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 1
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM (A0_REGNUM + 6)
+
+/* Register in which static-chain is passed to a function.
+ Use S0, not an A reg, because this rare use would otherwise prevent
+ an A reg from being available to global-alloc across calls. */
+#define STATIC_CHAIN_REGNUM S0_REGNUM
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM (A0_REGNUM + 1)
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* Convex has classes A (address) and S (scalar).
+ A is further divided into SP_REGS (stack pointer) and INDEX_REGS.
+ SI_REGS is S_REGS + INDEX_REGS -- all the regs except SP. */
+
+enum reg_class {
+ NO_REGS, S_REGS, INDEX_REGS, SP_REGS, A_REGS, SI_REGS,
+ ALL_REGS, LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Since GENERAL_REGS is the same class as ALL_REGS,
+ don't give it a different class number; just make it an alias. */
+
+#define GENERAL_REGS ALL_REGS
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+ {"NO_REGS", "S_REGS", "INDEX_REGS", "SP_REGS", "A_REGS", "SI_REGS", \
+ "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+ { 0, 0x00ff, 0xfe00, 0x0100, 0xff00, 0xfeff, 0xffff }
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) (regno_reg_class[REGNO])
+
+#define S_REGNO_P(REGNO) (((REGNO) - S0_REGNUM) < (unsigned) 8)
+#define A_REGNO_P(REGNO) (((REGNO) - A0_REGNUM) < (unsigned) 8)
+
+#define S_REG_P(X) (REG_P (X) && S_REGNO_P (REGNO (X)))
+#define A_REG_P(X) (REG_P (X) && A_REGNO_P (REGNO (X)))
+
+/* The class value for index registers, and the one for base regs. */
+
+#define INDEX_REG_CLASS INDEX_REGS
+#define BASE_REG_CLASS INDEX_REGS
+
+/* Get reg_class from a letter such as appears in the machine description. */
+/* a => A_REGS
+ d => S_REGS ('s' is taken)
+ A => INDEX_REGS (i.e., A_REGS except sp) */
+
+#define REG_CLASS_FROM_LETTER(C) \
+ reg_class_from_letter[(unsigned char) (C)]
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C. */
+/* 'I' is used to pass any CONST_INT and reject any CONST_DOUBLE.
+ CONST_DOUBLE integers are handled by G and H constraint chars. */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) 1
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself. */
+/* Convex uses G, H:
+ value usable in ld.d (low word 0) or ld.l (high word all sign) */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ (((C) == 'G' && LD_D_P (VALUE)) || \
+ ((C) == 'H' && LD_L_P (VALUE)) || \
+ 0)
+
+#define LD_D_P(X) (const_double_low_int (X) == 0)
+
+#define LD_L_P(X) (const_double_low_int (X) >= 0 \
+ ? const_double_high_int (X) == 0 \
+ : const_double_high_int (X) == -1)
+
+/* Optional extra constraints for this machine.
+ For Convex, 'Q' means that OP is a volatile MEM.
+ For volatile scalars, we use instructions that bypass the data cache. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? (GET_CODE (OP) == MEM && MEM_VOLATILE_P (OP) \
+ && ! TARGET_C1 && TARGET_VOLATILE_NOCACHE) \
+ : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+
+/* Put 2-word constants that can't be immediate operands into memory. */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((GET_CODE (X) != CONST_DOUBLE \
+ || GET_MODE (X) == SFmode \
+ || LD_L_P (X) || LD_D_P (X)) ? (CLASS) : NO_REGS)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+#define CLASS_MAX_NREGS(CLASS, MODE) ((GET_MODE_SIZE (MODE) + 7) / 8)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD
+
+/* Define this if should default to -fcaller-saves. */
+#define DEFAULT_CALLER_SAVES
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by. */
+#define PUSH_ROUNDING(BYTES) (((BYTES) + 3) & ~3)
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) (SIZE)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx (REG, TYPE_MODE (VALTYPE), S0_REGNUM)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, S0_REGNUM)
+
+/* Define this if PCC uses the nonreentrant convention for returning
+ structure and union values. */
+
+#define PCC_STATIC_STRUCT_RETURN
+
+/* 1 if N is a possible register number for a function value.
+ On the Convex, S0 is the only register thus used. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == S0_REGNUM)
+
+/* 1 if N is a possible register number for function argument passing. */
+
+#define FUNCTION_ARG_REGNO_P(N) 0
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go. */
+/* On convex, simply count the arguments in case TARGET_ARGCOUNT is set. */
+
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT) \
+ ((CUM) = 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ ((CUM) += 1)
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ Convex: all args go on the stack. But return the arg count
+ as the "next arg register" to be passed to gen_call. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ ((MODE) == VOIDmode ? GEN_INT ((CUM)) : 0)
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+#define FUNCTION_PROLOGUE(FILE, SIZE) \
+{ \
+ int size = ((SIZE) + 7) & -8; \
+ if (size != 0) \
+ fprintf (FILE, "\tsub.w #%d,sp\n", size); \
+}
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE. */
+
+#define FUNCTION_EPILOGUE(FILE, SIZE) \
+{ \
+ /* Follow function with a zero to stop c34 icache prefetching. */ \
+ fprintf (FILE, "\tds.h 0\n"); \
+}
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts. */
+
+/* On convex, the code for a trampoline is
+ ld.w #<link>,s0
+ jmp <func> */
+
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf (FILE, "\tld.w #69696969,s0\n"); \
+ fprintf (FILE, "\tjmp 52525252\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 12
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, Pmode, plus_constant (TRAMP, 2)), CXT); \
+ emit_move_insn (gen_rtx (MEM, Pmode, plus_constant (TRAMP, 8)), FNADDR); \
+ emit_call_insn (gen_call_pop (gen_rtx (MEM, QImode, \
+ gen_rtx (SYMBOL_REF, Pmode, \
+ "__enable_execute_stack")), \
+ const0_rtx, const0_rtx, const0_rtx)); \
+}
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tldea LP%d,a1\n\tcallq mcount\n", (LABELNO));
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* Store in the variable DEPTH the initial difference between the
+ frame pointer reg contents and the stack pointer reg contents,
+ as of the start of the function body. This depends on the layout
+ of the fixed parts of the stack frame and on how registers are saved. */
+#define INITIAL_FRAME_POINTER_OFFSET(DEPTH) \
+{ (DEPTH) = (get_frame_size () + 7) & -8; }
+
+/* Addressing modes, and classification of registers for them. */
+
+/* #define HAVE_POST_INCREMENT 0 */
+/* #define HAVE_POST_DECREMENT 0 */
+
+/* #define HAVE_PRE_DECREMENT 0 */
+/* #define HAVE_PRE_INCREMENT 0 */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(regno) \
+ ((regno) <= LAST_VIRTUAL_REGISTER \
+ ? regno_ok_for_index_p[regno] \
+ : regno_ok_for_index_p[reg_renumber[regno]])
+
+#define REGNO_OK_FOR_BASE_P(regno) REGNO_OK_FOR_INDEX_P (regno)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 1
+
+/* 1 if X is an rtx for a constant that is a valid address. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+/* For convex, bounce 2-word constants that can't be immediate operands. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) != CONST_DOUBLE \
+ || GET_MODE (X) == SFmode \
+ || LD_L_P (X) || LD_D_P (X))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) > LAST_VIRTUAL_REGISTER || regno_ok_for_index_p[REGNO (X)])
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_INDEX_P (X)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ For Convex, valid addresses are
+ indirectable or (MEM indirectable)
+ where indirectable is
+ const, reg, (PLUS reg const)
+
+ We don't use indirection since with insn scheduling, load + indexing
+ is better. */
+
+/* 1 if X is an address that we could indirect through. */
+#define INDIRECTABLE_ADDRESS_P(X) \
+ (CONSTANT_ADDRESS_P (X) \
+ || (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ || (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 1)) \
+ && CONSTANT_ADDRESS_P (XEXP (X, 0))))
+
+/* Go to ADDR if X is a valid address. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ register rtx xfoob = (X); \
+ if (INDIRECTABLE_ADDRESS_P (xfoob)) \
+ goto ADDR; \
+ if (GET_CODE (xfoob) == PRE_DEC && XEXP (xfoob, 0) == stack_pointer_rtx) \
+ goto ADDR; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ For Convex, nothing needs to be done. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) {}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) {}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Define this if the case instruction drops through after the table
+ when the index is out of range. Don't define it if the case insn
+ jumps to the default label instead. */
+/* #define CASE_DROPS_THROUGH */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+#define FIXUNS_TRUNC_LIKE_FIX_TRUNC
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 8
+
+/* Define this if zero-extension is slow (more than one real instruction). */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS (! TARGET_C2)
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+/* #define SHIFT_COUNT_TRUNCATED */
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* On Convex, it is as good to call a constant function address as to
+ call an address kept in a register. */
+#define NO_FUNCTION_CSE
+
+/* When a prototype says `char' or `short', really pass an `int'. */
+#define PROMOTE_PROTOTYPES
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE QImode
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ case CONST_INT: \
+ case CONST_DOUBLE: \
+ return 0;
+
+/* Provide the costs of a rtl expression. This is in the body of a
+ switch on CODE. */
+
+#define RTX_COSTS(RTX,CODE,OUTER_CODE) \
+ case PLUS: \
+ if (regno_pointer_flag != 0 \
+ && GET_CODE (XEXP (RTX, 0)) == REG \
+ && REGNO_POINTER_FLAG (REGNO (XEXP (RTX, 0))) \
+ && GET_CODE (XEXP (RTX, 1)) == CONST_INT) \
+ return 0; \
+ else break; \
+ case MULT: \
+ return 4 * (char) (0x03060403 >> target_cpu * 8); \
+ case ASHIFT: \
+ case LSHIFTRT: \
+ case ASHIFTRT: \
+ return 4 * (char) (0x03010403 >> target_cpu * 8); \
+ case MEM: \
+ return 5;
+
+/* Compute the cost of an address. This is meant to approximate the size
+ and/or execution delay of an insn using that address. If the cost is
+ approximated by the RTL complexity, including CONST_COSTS above, as
+ is usually the case for CISC machines, this macro should not be defined.
+ For aggressively RISCy machines, only one insn format is allowed, so
+ this macro should be a constant. The value of this macro only matters
+ for valid addresses. */
+
+#define ADDRESS_COST(RTX) 0
+
+/* Specify the cost of a branch insn; roughly the number of extra insns that
+ should be added to avoid a branch. */
+
+#define BRANCH_COST 0
+
+/* Adjust the cost of dependences. */
+
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+{ \
+ /* Antidependencies don't block issue. */ \
+ if (REG_NOTE_KIND (LINK) != 0) \
+ (COST) = 0; \
+ /* C38 situations where delay depends on context */ \
+ else if (TARGET_C38 \
+ && GET_CODE (PATTERN (INSN)) == SET \
+ && GET_CODE (PATTERN (DEP)) == SET) \
+ { \
+ enum attr_type insn_type = get_attr_type (INSN); \
+ enum attr_type dep_type = get_attr_type (DEP); \
+ /* index register must be ready one cycle early */ \
+ if (insn_type == TYPE_MLDW || insn_type == TYPE_MLDL \
+ || (insn_type == TYPE_MST \
+ && reg_mentioned_p (SET_DEST (PATTERN (DEP)), \
+ SET_SRC (PATTERN (INSN))))) \
+ (COST) += 1; \
+ /* alu forwarding off alu takes two */ \
+ if (dep_type == TYPE_ALU \
+ && insn_type != TYPE_ALU \
+ && ! (insn_type == TYPE_MST \
+ && SET_DEST (PATTERN (DEP)) == SET_SRC (PATTERN (INSN)))) \
+ (COST) += 1; \
+ } \
+}
+
+/* Convex uses Vax or IEEE floats.
+ Follow the host format. */
+#define TARGET_FLOAT_FORMAT HOST_FLOAT_FORMAT
+
+/* But must prevent real.c from constructing Vax dfloats */
+#define REAL_VALUE_ATOF(X,S) atof (X)
+extern double atof();
+
+/* Check a `double' value for validity for a particular machine mode. */
+#define CHECK_FLOAT_VALUE(MODE, D, OVERFLOW) \
+ OVERFLOW = check_float_value (MODE, &D, OVERFLOW)
+
+/* Tell final.c how to eliminate redundant test instructions. */
+
+/* Here we define machine-dependent flags and fields in cc_status
+ (see `conditions.h'). No extra ones are needed for convex. */
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) {}
+
+/* Control the assembler format that we output. */
+
+/* Output at beginning of assembler file. */
+
+#if _IEEE_FLOAT_
+#define ASM_FILE_START(FILE) fprintf (FILE, ";NO_APP\n.fpmode ieee\n")
+#else
+#define ASM_FILE_START(FILE) fprintf (FILE, ";NO_APP\n.fpmode native\n")
+#endif
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON ";APP\n"
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF ";NO_APP\n"
+
+/* Alignment with Convex's assembler goes like this:
+ .text can be .aligned up to a halfword.
+ .data and .bss can be .aligned up to a longword.
+ .lcomm is not supported, explicit declarations in .bss must be used instead.
+ We get alignment for word and longword .text data by conventionally
+ using .text 2 for word-aligned data and .text 3 for longword-aligned
+ data. This requires that the data's size be a multiple of its alignment,
+ which seems to be always true. */
+
+/* Output before read-only data. */
+
+#define TEXT_SECTION_ASM_OP (current_section_is_text = 1, ".text")
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP (current_section_is_text = 0, ".data")
+
+/* Output before uninitialized data. */
+
+#define BSS_SECTION_ASM_OP (current_section_is_text = 0, ".bss")
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if (current_section_is_text && (LOG) > 1) \
+ fprintf (FILE, ".text %d\n", LOG); \
+ else if (current_section_is_text) \
+ fprintf (FILE, ".text\n.align %d\n", 1 << (LOG)); \
+ else \
+ fprintf (FILE, ".align %d\n", 1 << (LOG))
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{ \
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", \
+ "sp", "a1", "a2", "a3", "a4", "a5", "ap", "fp", \
+}
+
+/* This is BSD, so it wants DBX format. */
+
+#define DBX_DEBUGGING_INFO
+
+/* How to renumber registers for dbx and gdb. */
+
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Do not break .stabs pseudos into continuations. */
+
+#define DBX_CONTIN_LENGTH 0
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+
+#define DBX_CONTIN_CHAR '?'
+
+/* Don't use stab extensions until GDB v4 port is available for convex. */
+
+#define DEFAULT_GDB_EXTENSIONS 0
+#define DBX_NO_XREFS
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs (".globl ", FILE); assemble_name (FILE, NAME); fputs ("\n", FILE);} while (0)
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#define USER_LABEL_PREFIX "_"
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, "%s%d:\n", PREFIX, NUM)
+
+/* Put case tables in .text 2, where they will be word-aligned */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM)
+
+#define ASM_OUTPUT_CASE_END(FILE,NUM,TABLE) \
+ ASM_OUTPUT_ALIGN (FILE, 1)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s%d", PREFIX, NUM)
+
+/* This is how to output an assembler line defining a `double' constant. */
+
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+ outfloat (FILE, VALUE, "%.17e", "\tds.d ", "\n")
+
+/* This is how to output an assembler line defining a `float' constant. */
+
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ outfloat (FILE, VALUE, "%.9e", "\tds.s ", "\n")
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE,VALUE) \
+{ \
+ fprintf (FILE, "\tds.w "); \
+ output_addr_const (FILE, simplify_for_convex (VALUE)); \
+ fprintf (FILE, "\n"); \
+}
+
+/* Likewise for a `long long int' constant. */
+
+#define ASM_OUTPUT_DOUBLE_INT(FILE,VALUE) \
+{ \
+ if (GET_CODE (VALUE) == CONST_DOUBLE) \
+ fprintf (FILE, "\tds.w %d,%d\n", \
+ const_double_high_int (VALUE), const_double_low_int (VALUE)); \
+ else if (GET_CODE (VALUE) == CONST_INT) \
+ { \
+ int val = INTVAL (VALUE); \
+ fprintf (FILE, "\tds.w %d,%d\n", val < 0 ? -1 : 0, val); \
+ } \
+ else \
+ abort (); \
+}
+
+/* Likewise for `char' and `short' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+( fprintf (FILE, "\tds.h "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+( fprintf (FILE, "\tds.b "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ fprintf (FILE, "\tds.b %#x\n", (VALUE))
+
+/* This is how to output a string */
+
+#define ASM_OUTPUT_ASCII(FILE,STR,SIZE) do { \
+ int i; \
+ fprintf ((FILE), "\tds.b \""); \
+ for (i = 0; i < (SIZE); i++) { \
+ register int c = (STR)[i] & 0377; \
+ if (c >= ' ' && c < 0177 && c != '\\' && c != '"') \
+ putc (c, (FILE)); \
+ else \
+ fprintf ((FILE), "\\%03o", c);} \
+ fprintf ((FILE), "\"\n");} while (0)
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ fprintf (FILE, "\tpsh.%c %s\n", \
+ S_REGNO_P (REGNO) ? 'l' : 'w', \
+ reg_names[REGNO])
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ fprintf (FILE, "\tpop.%c %s\n", \
+ S_REGNO_P (REGNO) ? 'l' : 'w', \
+ reg_names[REGNO])
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\tds.w L%d\n", VALUE)
+
+/* This is how to output an element of a case-vector that is relative.
+ (not used on Convex) */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\tds.w L%d-L%d\n", VALUE, REL)
+
+/* This is how to output an assembler line
+ that says to advance the location counter by SIZE bytes. */
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\tds.b %u(0)\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+( bss_section (), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ":\tbs.b %u\n", (ROUNDED)))
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+/* Output an arg count before function entries. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ asm_declare_function_name (FILE, NAME, DECL)
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Print an instruction operand X on file FILE.
+ CODE is the code from the %-spec that requested printing this operand;
+ if `%z3' was used to print operand 3, then CODE is 'z'. */
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+ print_operand (FILE, X, CODE)
+
+/* Print a memory operand whose address is X, on file FILE. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ print_operand_address (FILE, ADDR)
+
+/* Do not put out GNU stabs for constructors and destructors.
+ ld bounces them. */
+
+#define FASCIST_ASSEMBLER
+
+/* __gcc_cleanup is loader-aliased to __ap$do_registered_functions if we
+ are linking against standard libc, 0 if old (-traditional) libc. */
+
+#define EXIT_BODY \
+{ \
+ extern void __gcc_cleanup (); \
+ if (__gcc_cleanup != _cleanup) \
+ __gcc_cleanup (); \
+ _cleanup (); \
+}
+
+/* Header for convex.c.
+ Here at the end so we can use types defined above. */
+
+extern int target_cpu;
+extern int current_section_is_text;
+extern enum reg_class regno_reg_class[];
+extern enum reg_class reg_class_from_letter[];
+extern char regno_ok_for_index_p_base[];
+#define regno_ok_for_index_p (regno_ok_for_index_p_base + 1)
+
+extern int const_double_low_int ();
+extern int const_double_high_int ();
+extern char *output_cmp ();
+extern char *output_condjump ();
+extern char *output_call ();
+extern void gen_ap_for_call ();
+extern int check_float_value ();
+extern void asm_declare_function_name ();
diff --git a/gcc/config/convex/convex.md b/gcc/config/convex/convex.md
new file mode 100755
index 0000000..cb6f64d
--- /dev/null
+++ b/gcc/config/convex/convex.md
@@ -0,0 +1,1885 @@
+;;- Machine description for GNU compiler, Convex Version
+;; Copyright (C) 1988, 1994, 1995 Free Software Foundation, Inc.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; Attribute specifications
+
+; Target CPU
+(define_attr "cpu" "c1,c32,c34,c38"
+ (const (symbol_ref "(enum attr_cpu) target_cpu")))
+
+;; Instruction classification
+
+(define_attr "type"
+ "alu,xalu,mldw,mldl,mldb,mst,adds,addd,mulw,mull,muls,muld,divw,divl,divs,divd,shfw,shfl,cvts,cvtd"
+ (const_string "alu"))
+
+;; Instruction times
+
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "mldw")) 2 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "mldl")) 4 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "mldw,mldl")) 2 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "mldw,mldl")) 4 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "mldw,mldl")) 2 0)
+
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "mldb")) 9 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "mldb")) 36 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "mldb")) 21 0)
+
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "xalu")) 1 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "xalu")) 1 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "xalu")) 5 0)
+(define_function_unit "mem" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "xalu")) 2 0)
+
+(define_function_unit "add" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "adds,addd")) 3 2)
+(define_function_unit "add" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "adds,addd")) 2 1)
+(define_function_unit "add" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "adds,addd")) 5 2)
+(define_function_unit "add" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "adds,addd")) 2 1)
+
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "mulw,muls")) 3 2)
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "mulw,muls")) 4 2)
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "mulw,muls")) 6 2)
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "mulw,muls")) 3 2)
+
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "mull,muld")) 4 3)
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "mull")) 10 7)
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "muld")) 5 2)
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "mull,muld")) 7 3)
+(define_function_unit "mul" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "mull,muld")) 4 3)
+
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "divw")) 24 24)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "divw")) 44 6)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "divw")) 14 10)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "divw")) 11 10)
+
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "divl")) 41 42)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "divl")) 76 5)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "divl")) 22 18)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "divl")) 19 18)
+
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "divs")) 22 22)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "divs")) 8 6)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "divs")) 13 9)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "divs")) 10 9)
+
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "divd")) 37 38)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "divd")) 12 8)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "divd")) 20 16)
+(define_function_unit "div" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "divd")) 17 16)
+
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "cvts,cvtd")) 4 3)
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "cvts")) 9 7)
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "cvtd")) 9 6)
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "cvts")) 6 2)
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c34") (eq_attr "type" "cvtd")) 6 1)
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "cvts,cvtd")) 3 1)
+
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c1") (eq_attr "type" "shfw,shfl")) 3 2)
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "shfw")) 7 5)
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c32") (eq_attr "type" "shfl")) 7 4)
+(define_function_unit "misc" 1 0
+ (and (eq_attr "cpu" "c38") (eq_attr "type" "shfw,shfl")) 3 1)
+
+(define_function_unit "mystery_latch" 1 1
+ (and (eq_attr "type" "!alu,mldw,mldl,adds,addd") (eq_attr "cpu" "c32")) 2 2)
+
+;(define_function_unit "ip" 1 1
+; (and (eq_attr "cpu" "c1")
+; (eq_attr "type" "divw,divl,divs,divd,xalu")) 2 2)
+;(define_function_unit "ip" 1 1
+; (and (eq_attr "cpu" "c1")
+; (eq_attr "type" "!divw,divl,divs,divd,xalu")) 1 1)
+;(define_function_unit "ip" 1 1
+; (and (eq_attr "cpu" "c32")
+; (eq_attr "type" "mull,muld,divl,divd,shfl,cvtd,xalu")) 2 2)
+;(define_function_unit "ip" 1 1
+; (and (eq_attr "cpu" "c32")
+; (eq_attr "type" "!mull,muld,divl,divd,shfl,cvtd,xalu")) 1 1)
+;(define_function_unit "ip" 1 1
+; (and (eq_attr "cpu" "c34")
+; (eq_attr "type" "addd,mull,muld,divl,divd,cvtd,xalu")) 2 2)
+;(define_function_unit "ip" 1 1
+; (and (eq_attr "cpu" "c34")
+; (eq_attr "type" "!addd,mull,muld,divl,divd,cvtd,xalu")) 1 1)
+
+;; Make the first thing a real insn in case of genattrtab bug
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+;; Moves
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "general_operand" "=d,d,d,d,d,<,m")
+ (match_operand:DF 1 "general_operand" "d,Q,m,G,H,d,d"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "@
+ mov %1,%0
+ ldb.d %1,%0
+ ld.d %1,%0
+ ld.d %u1,%0
+ ld.l %v1,%0
+ psh.l %1
+ st.d %1,%0"
+ [(set_attr "type" "alu,mldb,mldl,alu,alu,alu,mst")])
+
+;; This is here so we can load any result of RTL constant folding
+;; but do not use it on constants that can be loaded from memory.
+;; It is never better and can be worse.
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (match_operand:DF 1 "const_double_operand" "F"))]
+ "CONST_DOUBLE_MEM (operands[1]) == const0_rtx"
+ "ld.u %u1,%0\;ld.w %v1,%0"
+ [(set_attr "type" "xalu")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "general_operand" "=d,d,d,d,<,m")
+ (match_operand:SF 1 "general_operand" "d,Q,m,F,d,d"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ mov.s %1,%0
+ ldb.s %1,%0
+ ld.s %1,%0
+ ld.s %1,%0
+ psh.w %1
+ st.s %1,%0"
+ [(set_attr "type" "alu,mldb,mldw,alu,alu,mst")])
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=d,d,d,d,d,<,m")
+ (match_operand:DI 1 "general_operand" "d,Q,m,G,HI,d,d"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "@
+ mov %1,%0
+ ldb.l %1,%0
+ ld.l %1,%0
+ ld.d %u1,%0
+ ld.l %1,%0
+ psh.l %1
+ st.l %1,%0"
+ [(set_attr "type" "alu,mldb,mldl,alu,alu,alu,mst")])
+
+;; This is here so we can load any result of RTL constant folding
+;; but do not use it on constants that can be loaded from memory.
+;; It is never better and can be worse.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (match_operand:DI 1 "const_double_operand" "F"))]
+ "CONST_DOUBLE_MEM (operands[1]) == const0_rtx"
+ "ld.u %u1,%0\;ld.w %v1,%0"
+ [(set_attr "type" "xalu")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "push_operand" "=<,<")
+ (match_operand:SI 1 "nonmemory_operand" "Ad,i"))]
+ ""
+ "@
+ psh.w %1
+ pshea %a1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=d,r,d,r,r,m")
+ (match_operand:SI 1 "general_operand" "d,r,Q,m,i,r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ mov.w %1,%0
+ mov %1,%0
+ ldb.w %1,%0
+ ld.w %1,%0
+ ld.w %1,%0
+ st.w %1,%0"
+ [(set_attr "type" "alu,alu,mldb,mldw,alu,mst")])
+
+(define_expand "movstrictsi"
+ [(set (strict_low_part (match_operand:SI 0 "general_operand" ""))
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);")
+
+(define_insn ""
+ [(set (strict_low_part (match_operand:SI 0 "general_operand" "=d,r,d,r,r,m"))
+ (match_operand:SI 1 "general_operand" "d,r,Q,m,i,r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ mov.w %1,%0
+ mov %1,%0
+ ldb.w %1,%0
+ ld.w %1,%0
+ ld.w %1,%0
+ st.w %1,%0"
+ [(set_attr "type" "alu,alu,mldb,mldw,alu,mst")])
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=d,r,d,r,r,<,m")
+ (match_operand:HI 1 "general_operand" "d,r,Q,m,i,Ad,r"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ mov.w %1,%0
+ mov %1,%0
+ ldb.h %1,%0
+ ld.h %1,%0
+ ld.w %1,%0
+ psh.w %1
+ st.h %1,%0"
+ [(set_attr "type" "alu,alu,mldb,mldw,alu,alu,mst")])
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "=d,r,d,r,r,<,m")
+ (match_operand:QI 1 "general_operand" "d,r,Q,m,i,Ad,r"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ mov.w %1,%0
+ mov %1,%0
+ ldb.b %1,%0
+ ld.b %1,%0
+ ld.w %1,%0
+ psh.w %1
+ st.b %1,%0"
+ [(set_attr "type" "alu,alu,mldb,mldw,alu,alu,mst")])
+
+;; Expand block moves manually to get code that pipelines the loads.
+
+(define_expand "movstrsi"
+ [(set (match_operand:BLK 0 "memory_operand" "=m")
+ (match_operand:BLK 1 "memory_operand" "m"))
+ (use (match_operand:SI 2 "const_int_operand" "i"))
+ (use (match_operand:SI 3 "const_int_operand" "i"))]
+ ""
+ " expand_movstr (operands); DONE; ")
+
+;; Extension and truncation insns.
+;; Those for integer source operand
+;; are ordered widest source type first.
+
+(define_insn "truncsiqi2"
+ [(set (match_operand:QI 0 "register_operand" "=d,a")
+ (truncate:QI (match_operand:SI 1 "register_operand" "d,a")))]
+ ""
+ "cvtw.b %1,%0")
+
+(define_insn "truncsihi2"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (truncate:HI (match_operand:SI 1 "register_operand" "d,a")))]
+ ""
+ "cvtw.h %1,%0")
+
+(define_insn "trunchiqi2"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (truncate:QI (match_operand:HI 1 "register_operand" "0")))]
+ ""
+ "")
+
+(define_insn "truncdisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (truncate:SI (match_operand:DI 1 "register_operand" "d")))]
+ ""
+ "cvtl.w %1,%0")
+
+(define_insn "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "cvtw.l %1,%0")
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "d,a")))]
+ ""
+ "cvth.w %1,%0")
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "d,a")))]
+ ""
+ "cvtb.w %1,%0")
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "d,a")))]
+ ""
+ "cvtb.w %1,%0")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "d")))]
+ ""
+ "cvts.d %1,%0"
+ [(set_attr "type" "cvts")])
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "d")))]
+ ""
+ "cvtd.s %1,%0"
+ [(set_attr "type" "cvtd")])
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "0")))]
+ ""
+ "and #0xffff,%0")
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (zero_extend:HI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "and #0xff,%0")
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "and #0xff,%0")
+
+(define_insn "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "0")))]
+ ""
+ "ld.u #0,%0")
+
+;; Fix-to-float conversion insns.
+;; Note that the ones that start with SImode come first.
+;; That is so that an operand that is a CONST_INT
+;; (and therefore lacks a specific machine mode).
+;; will be recognized as SImode (which is always valid)
+;; rather than as QImode or HImode.
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (float:SF (match_operand:SI 1 "register_operand" "d")))]
+ ""
+ "cvtw.s %1,%0"
+ [(set_attr "type" "cvts")])
+
+(define_insn "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (float:SF (match_operand:DI 1 "register_operand" "d")))]
+ ""
+ "cvtl.s %1,%0"
+ [(set_attr "type" "cvtd")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (float:DF (match_operand:SI 1 "register_operand" "d")))]
+ "! TARGET_C1"
+ "cvtw.d %1,%0"
+ [(set_attr "type" "cvts")])
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (float:DF (match_operand:DI 1 "register_operand" "d")))]
+ ""
+ "cvtl.d %1,%0"
+ [(set_attr "type" "cvtd")])
+
+;; These are a little slower than gcc's normal way of doing unsigned
+;; DI floats (if the DI number is "negative") but they avoid double
+;; rounding and they avoid explicit constants.
+
+(define_expand "floatunsdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (float:DF (match_operand:DI 1 "register_operand" "d")))
+ (set (cc0) (compare:DI (match_dup 3) (match_dup 1)))
+ (set (pc)
+ (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_dup 4))
+ (pc)))
+ (set (match_dup 2) (lshiftrt:DI (match_dup 1) (const_int 1)))
+ (set (match_dup 0) (float:DF (match_dup 2)))
+ (set (match_dup 0) (plus:DF (match_dup 0) (match_dup 0)))
+ (match_dup 4)
+ (set (match_dup 0) (match_dup 0))]
+ ""
+ "
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = force_reg (DImode, const0_rtx);
+ operands[4] = gen_label_rtx ();
+}")
+
+(define_expand "floatunsdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (float:SF (match_operand:DI 1 "register_operand" "d")))
+ (set (cc0) (compare:DI (match_dup 3) (match_dup 1)))
+ (set (pc)
+ (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_dup 4))
+ (pc)))
+ (set (match_dup 2) (lshiftrt:DI (match_dup 1) (const_int 1)))
+ (set (match_dup 0) (float:SF (match_dup 2)))
+ (set (match_dup 0) (plus:SF (match_dup 0) (match_dup 0)))
+ (match_dup 4)
+ (set (match_dup 0) (match_dup 0))]
+ ""
+ "
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = force_reg (DImode, const0_rtx);
+ operands[4] = gen_label_rtx ();
+}")
+
+;; These patterns are identical to gcc's default action
+;; if DI->DF and DI->SF are not present. There are here
+;; only to prevent SI->*F from promoting to DI->*F.
+
+(define_expand "floatunssidf2"
+ [(set (match_dup 2)
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "")))
+ (set (match_operand:DF 0 "register_operand" "")
+ (float:DF (match_dup 2)))]
+ ""
+ "operands[2] = gen_reg_rtx (DImode);")
+
+(define_expand "floatunssisf2"
+ [(set (match_dup 2)
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "")))
+ (set (match_operand:SF 0 "register_operand" "")
+ (float:SF (match_dup 2)))]
+ ""
+ "operands[2] = gen_reg_rtx (DImode);")
+
+;; Float-to-fix conversion insns.
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "d"))))]
+ ""
+ "cvts.w %1,%0"
+ [(set_attr "type" "cvts")])
+
+(define_insn "fix_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "d"))))]
+ ""
+ "cvts.l %1,%0"
+ [(set_attr "type" "cvts")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "d"))))]
+ ""
+ "cvtd.l %1,%0"
+ [(set_attr "type" "cvtd")])
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "d"))))]
+ ""
+ "cvtd.l %1,%0"
+ [(set_attr "type" "cvtd")])
+
+;;- All kinds of add instructions.
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (plus:DF (match_operand:DF 1 "register_operand" "%0")
+ (match_operand:DF 2 "register_operand" "d")))]
+ ""
+ "add.d %2,%0"
+ [(set_attr "type" "addd")])
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (plus:SF (match_operand:SF 1 "register_operand" "%0")
+ (match_operand:SF 2 "nonmemory_operand" "dF")))]
+ ""
+ "add.s %2,%0"
+ [(set_attr "type" "adds")])
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "d")))]
+ ""
+ "add.l %2,%0")
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (match_operand:SI 1 "register_operand" "%A")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ "operands[1] == frame_pointer_rtx || operands[1] == arg_pointer_rtx"
+ "ldea %a2(%1),%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (match_operand:SI 1 "register_operand" "%a")
+ (match_operand:SI 2 "nonmemory_operand" "ri")))]
+ "operands[1] == stack_pointer_rtx && operands[0] != stack_pointer_rtx"
+ "mov %1,%0\;add.w %2,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "push_operand" "=<")
+ (plus:SI (match_operand:SI 1 "register_operand" "A")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ "operands[1] != stack_pointer_rtx"
+ "pshea %a2(%1)"
+ [(set_attr "type" "mst")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d,a,a")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,A")
+ (match_operand:SI 2 "nonmemory_operand" "di,ri,i")))]
+ "TARGET_C1"
+ "@
+ add.w %2,%0
+ add.w %2,%0
+ ldea %a2(%1),%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d,a,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,A")
+ (match_operand:SI 2 "nonmemory_operand" "di,ri,i")))]
+ ""
+ "@
+ add.w %2,%0
+ add.w %2,%0
+ ldea %a2(%1),%0")
+
+(define_insn "addhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (plus:HI (match_operand:HI 1 "register_operand" "%0,0")
+ (match_operand:HI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "add.h %2,%0")
+
+(define_insn "addqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d,d")
+ (plus:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "d,i")))]
+ ""
+ "@
+ add.b %2,%0
+ add.w %2,%0")
+
+;;- All kinds of subtract instructions.
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (minus:DF (match_operand:DF 1 "register_operand" "0")
+ (match_operand:DF 2 "register_operand" "d")))]
+ ""
+ "sub.d %2,%0"
+ [(set_attr "type" "addd")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (minus:SF (match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "nonmemory_operand" "dF")))]
+ ""
+ "sub.s %2,%0"
+ [(set_attr "type" "adds")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (minus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "d")))]
+ ""
+ "sub.l %2,%0")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,a,?d,?a")
+ (minus:SI (match_operand:SI 1 "nonmemory_operand" "0,0,di,ai")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai,0,0")))]
+ ""
+ "@
+ sub.w %2,%0
+ sub.w %2,%0
+ sub.w %1,%0\;neg.w %0,%0
+ sub.w %1,%0\;neg.w %0,%0")
+
+(define_insn "subhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (minus:HI (match_operand:HI 1 "register_operand" "0,0")
+ (match_operand:HI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "sub.h %2,%0")
+
+(define_insn "subqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d,d")
+ (minus:QI (match_operand:QI 1 "register_operand" "0,0")
+ (match_operand:QI 2 "nonmemory_operand" "d,i")))]
+ ""
+ "@
+ sub.b %2,%0
+ sub.w %2,%0")
+
+;;- Multiply instructions.
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (mult:DF (match_operand:DF 1 "register_operand" "%0")
+ (match_operand:DF 2 "register_operand" "d")))]
+ ""
+ "mul.d %2,%0"
+ [(set_attr "type" "muld")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (mult:SF (match_operand:SF 1 "register_operand" "%0")
+ (match_operand:SF 2 "nonmemory_operand" "dF")))]
+ ""
+ "mul.s %2,%0"
+ [(set_attr "type" "muls")])
+
+(define_insn "muldi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (mult:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "d")))]
+ ""
+ "mul.l %2,%0"
+ [(set_attr "type" "mull")])
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "mul.w %2,%0"
+ [(set_attr "type" "mulw")])
+
+(define_insn "mulhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (mult:HI (match_operand:HI 1 "register_operand" "%0,0")
+ (match_operand:HI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "mul.h %2,%0"
+ [(set_attr "type" "mulw")])
+
+(define_insn "mulqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d,d")
+ (mult:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "d,i")))]
+ ""
+ "@
+ mul.b %2,%0
+ mul.w %2,%0"
+ [(set_attr "type" "mulw,mulw")])
+
+;;- Divide instructions.
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (div:DF (match_operand:DF 1 "register_operand" "0")
+ (match_operand:DF 2 "register_operand" "d")))]
+ ""
+ "div.d %2,%0"
+ [(set_attr "type" "divd")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (div:SF (match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "nonmemory_operand" "dF")))]
+ ""
+ "div.s %2,%0"
+ [(set_attr "type" "divs")])
+
+(define_insn "divdi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (div:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "d")))]
+ ""
+ "div.l %2,%0"
+ [(set_attr "type" "divl")])
+
+(define_expand "udivsi3"
+ [(set (match_dup 3)
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "")))
+ (set (match_dup 4)
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "")))
+ (set (match_dup 3)
+ (div:DI (match_dup 3) (match_dup 4)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (subreg:SI (match_dup 3) 0))]
+ ""
+ "operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode); ")
+
+(define_insn "udivdi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (udiv:DI (match_operand:DI 1 "register_operand" "d")
+ (match_operand:DI 2 "register_operand" "d")))]
+ ""
+ "psh.l %2\;psh.l %1\;callq udiv64\;pop.l %0\;add.w #8,sp")
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (div:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "div.w %2,%0"
+ [(set_attr "type" "divw")])
+
+(define_insn "divhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (div:HI (match_operand:HI 1 "register_operand" "0,0")
+ (match_operand:HI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "div.h %2,%0"
+ [(set_attr "type" "divw")])
+
+(define_insn "divqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (div:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "register_operand" "d")))]
+ ""
+ "div.b %2,%0"
+ [(set_attr "type" "divw")])
+
+;;- Bit clear instructions.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (and:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "" "")))]
+ "(GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[2]) == -1)"
+ "and %2,%0")
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (and:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "d")))]
+ ""
+ "and %2,%0")
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "and %2,%0")
+
+(define_insn "andhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (and:HI (match_operand:HI 1 "register_operand" "%0,0")
+ (match_operand:HI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "and %2,%0")
+
+(define_insn "andqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d,a")
+ (and:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "and %2,%0")
+
+;;- Bit set instructions.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ior:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "" "")))]
+ "(GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[2]) == 0)"
+ "or %2,%0")
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ior:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "d")))]
+ ""
+ "or %2,%0")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "or %2,%0")
+
+(define_insn "iorhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (ior:HI (match_operand:HI 1 "register_operand" "%0,0")
+ (match_operand:HI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "or %2,%0")
+
+(define_insn "iorqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d,a")
+ (ior:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "or %2,%0")
+
+;;- xor instructions.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (xor:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "" "")))]
+ "(GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0)
+ || (GET_CODE (operands[2]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[2]) == 0)"
+ "xor %2,%0")
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (xor:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "d")))]
+ ""
+ "xor %2,%0")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,0")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "xor %2,%0")
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (xor:HI (match_operand:HI 1 "register_operand" "%0,0")
+ (match_operand:HI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "xor %2,%0")
+
+(define_insn "xorqi3"
+ [(set (match_operand:QI 0 "register_operand" "=d,a")
+ (xor:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "xor %2,%0")
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (neg:DF (match_operand:DF 1 "register_operand" "d")))]
+ ""
+ "neg.d %1,%0"
+ [(set_attr "type" "addd")])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (neg:SF (match_operand:SF 1 "register_operand" "d")))]
+ ""
+ "neg.s %1,%0"
+ [(set_attr "type" "adds")])
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (neg:DI (match_operand:DI 1 "register_operand" "d")))]
+ ""
+ "neg.l %1,%0")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (neg:SI (match_operand:SI 1 "register_operand" "d,a")))]
+ ""
+ "neg.w %1,%0")
+
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (neg:HI (match_operand:HI 1 "register_operand" "d,a")))]
+ ""
+ "neg.h %1,%0")
+
+(define_insn "negqi2"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (neg:QI (match_operand:QI 1 "register_operand" "d")))]
+ ""
+ "neg.b %1,%0")
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (not:DI (match_operand:DI 1 "register_operand" "d")))]
+ ""
+ "not %1,%0")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (not:SI (match_operand:SI 1 "register_operand" "d,a")))]
+ ""
+ "not %1,%0")
+
+(define_insn "one_cmplhi2"
+ [(set (match_operand:HI 0 "register_operand" "=d,a")
+ (not:HI (match_operand:HI 1 "register_operand" "d,a")))]
+ ""
+ "not %1,%0")
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "register_operand" "=d,a")
+ (not:QI (match_operand:QI 1 "register_operand" "d,a")))]
+ ""
+ "not %1,%0")
+
+;;- Shifts
+;;
+;; The extreme profusion of patterns here is due to the different-speed
+;; shifts on different machines, and the C1's lack of word shift S-register
+;; instructions.
+
+;; SImode
+
+;; Arithmetic left 1, 1 cycle on all machines via add
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 1)))]
+ ""
+ "add.w %0,%0")
+
+;; C34 general shift is 1 cycle
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai")))]
+ "TARGET_C34"
+ "@
+ shf.w %2,%0
+ shf %2,%0"
+ [(set_attr "type" "shfw,shfw")])
+
+;; else shift left 0..7 is 1 cycle if we use an A register
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a,?d")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "immediate_operand" "ai,di")))]
+ "TARGET_C1 && INTVAL (operands[2]) < (unsigned) 8"
+ "@
+ shf %2,%0
+ shf %2,%0"
+ [(set_attr "type" "alu,shfl")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a,?d")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "immediate_operand" "ai,di")))]
+ "INTVAL (operands[2]) < (unsigned) 8"
+ "@
+ shf %2,%0
+ shf.w %2,%0"
+ [(set_attr "type" "alu,shfw")])
+
+;; else general left shift
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai")))]
+ "TARGET_C1"
+ "@
+ shf %2,%0
+ shf %2,%0"
+ [(set_attr "type" "shfl,shfw")])
+
+;; but C2 left shift by a constant is faster via multiply
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "i")))]
+ "TARGET_C2 && INTVAL (operands[2]) < (unsigned) 32"
+ "mul.w %z2,%0"
+ [(set_attr "type" "mulw")])
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=d,a")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "nonmemory_operand" "di,ai")))]
+ ""
+ "@
+ shf.w %2,%0
+ shf %2,%0"
+ [(set_attr "type" "shfw,shfw")])
+
+;; Logical right, general
+;; The hardware wants the negative of the shift count
+
+(define_expand "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "")
+ (neg:SI (match_operand:SI 2 "nonmemory_operand" ""))))]
+ ""
+ "operands[2] = negate_rtx (SImode, operands[2]);")
+
+;; C1 lacks word shift S reg
+
+(define_insn ""
+ [(set
+ (match_operand:SI 0 "register_operand" "=a,?d")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0")
+ (neg:SI (match_operand:SI 2 "nonmemory_operand" "ai,di"))))]
+ "TARGET_C1"
+ "@
+ shf %2,%0
+ ld.u #0,%0\;shf %2,%0"
+ [(set_attr "type" "shfw,shfl")])
+
+;; general case
+
+(define_insn ""
+ [(set
+ (match_operand:SI 0 "register_operand" "=d,a")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0")
+ (neg:SI (match_operand:SI 2 "nonmemory_operand" "di,ai"))))]
+ ""
+ "@
+ shf.w %2,%0
+ shf %2,%0"
+ [(set_attr "type" "shfw,shfw")])
+
+;; Patterns without neg produced by constant folding
+
+(define_insn ""
+ [(set
+ (match_operand:SI 0 "register_operand" "=a,?d")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "immediate_operand" "i,i")))]
+ "TARGET_C1"
+ "@
+ shf #%n2,%0
+ ld.u #0,%0\;shf #%n2,%0"
+ [(set_attr "type" "shfw,shfl")])
+
+(define_insn ""
+ [(set
+ (match_operand:SI 0 "register_operand" "=d,a")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0")
+ (match_operand:SI 2 "immediate_operand" "i,i")))]
+ ""
+ "@
+ shf.w #%n2,%0
+ shf #%n2,%0"
+ [(set_attr "type" "shfw,shfw")])
+
+;; Arithmetic right, general
+;; Sign-extend to 64 bits, then shift that. Works for 0..32.
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "")
+ (neg:SI (match_operand:SI 2 "nonmemory_operand" ""))))]
+ ""
+ "operands[2] = negate_rtx (SImode, operands[2]);")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d,&d")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,d")
+ (neg:SI
+ (match_operand:SI 2 "nonmemory_operand" "di,di"))))]
+ ""
+ "cvtw.l %1,%0\;shf %2,%0"
+ [(set_attr "type" "shfl,shfl")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "d")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ ""
+ "cvtw.l %1,%0\;shf #%n2,%0"
+ [(set_attr "type" "shfl")])
+
+;; DImode
+;; Arithmetic left, 1-cycle
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashift:DI (match_operand:DI 1 "register_operand" "0")
+ (const_int 1)))]
+ ""
+ "add.l %0,%0")
+
+;; Arithmetic left, general
+
+(define_insn "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashift:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:SI 2 "nonmemory_operand" "di")))]
+ ""
+ "shf %2,%0"
+ [(set_attr "type" "shfl")])
+
+;; Can omit zero- or sign-extend if shift is 32 or more.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "0"))
+ (match_operand:SI 2 "const_int_operand" "i")))]
+ "INTVAL (operands[2]) >= 32"
+ "shf %2,%0"
+ [(set_attr "type" "shfl")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (ashift:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "0"))
+ (match_operand:SI 2 "const_int_operand" "i")))]
+ "INTVAL (operands[2]) >= 32"
+ "shf %2,%0"
+ [(set_attr "type" "shfl")])
+
+;; Logical right, general
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (neg:SI (match_operand:SI 2 "nonmemory_operand" ""))))]
+ ""
+ "operands[2] = negate_rtx (SImode, operands[2]);")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (neg:SI (match_operand:SI 2 "nonmemory_operand" "di"))))]
+ ""
+ "shf %2,%0"
+ [(set_attr "type" "shfl")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:SI 2 "immediate_operand" "i")))]
+ ""
+ "shf #%n2,%0"
+ [(set_attr "type" "shfl")])
+
+;; Arithmetic right, general
+;; Use
+;; ((a >> b) ^ signbit) - signbit
+;; where signbit is (1 << 63) >> b
+;; Works for 0..63. Does not work for 64; unfortunate but valid.
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (neg:SI (match_operand:SI 2 "nonmemory_operand" ""))))
+ (set (match_dup 3) (lshiftrt:DI (match_dup 3) (neg:SI (match_dup 2))))
+ (set (match_dup 0) (xor:DI (match_dup 0) (match_dup 3)))
+ (set (match_dup 0) (minus:DI (match_dup 0) (match_dup 3)))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ switch (INTVAL (operands[2]))
+ {
+ case 32:
+ emit_insn (gen_ashrdi3_32 (operands[0], operands[1]));
+ DONE;
+ }
+
+ operands[2] = negate_rtx (SImode, operands[2]);
+ operands[3] = force_reg (DImode, immed_double_const (0, 1 << 31, DImode));
+}")
+
+;; Arithmetic right 32, a common case that can save a couple of insns.
+
+(define_expand "ashrdi3_32"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "")
+ (const_int 32)))
+ (set (match_dup 0)
+ (sign_extend:DI (subreg:SI (match_dup 0) 0)))]
+ ""
+ "")
+
+;; __builtin instructions
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (sqrt:DF (match_operand:DF 1 "register_operand" "0")))]
+ "! TARGET_C1 && flag_fast_math"
+ "sqrt.d %0"
+ [(set_attr "type" "divd")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (sqrt:SF (match_operand:SF 1 "register_operand" "0")))]
+ "! TARGET_C1 && flag_fast_math"
+ "sqrt.s %0"
+ [(set_attr "type" "divs")])
+
+(define_insn "sindf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "0")] 1))]
+ "! TARGET_C1 && flag_fast_math"
+ "sin.d %0")
+
+(define_insn "sinsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")] 1))]
+ "! TARGET_C1 && flag_fast_math"
+ "sin.s %0")
+
+(define_insn "cosdf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "0")] 2))]
+ "! TARGET_C1 && flag_fast_math"
+ "cos.d %0")
+
+(define_insn "cossf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")] 2))]
+ "! TARGET_C1 && flag_fast_math"
+ "cos.s %0")
+
+(define_insn "ftruncdf2"
+ [(set (match_operand:DF 0 "register_operand" "=d")
+ (fix:DF (match_operand:DF 1 "register_operand" "d")))]
+ "! TARGET_C1"
+ "frint.d %1,%0"
+ [(set_attr "type" "cvtd")])
+
+(define_insn "ftruncsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (fix:SF (match_operand:SF 1 "register_operand" "d")))]
+ "! TARGET_C1"
+ "frint.s %1,%0"
+ [(set_attr "type" "cvts")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (minus:SI (ffs:SI (match_operand:SI 1 "register_operand" "d"))
+ (const_int 1)))]
+ ""
+ "tzc %1,%0\;le.w #32,%0\;jbrs.f L0%=\;ld.w #-1,%0\\nL0%=:")
+
+(define_expand "ffssi2"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (minus:SI (ffs:SI (match_operand:SI 1 "register_operand" "d"))
+ (const_int 1)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ ""
+ "")
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=d")
+ (abs:SF (match_operand:SF 1 "register_operand" "0")))]
+ ""
+ "and #0x7fffffff,%0")
+
+(define_expand "absdf2"
+ [(set (subreg:DI (match_operand:DF 0 "register_operand" "=d") 0)
+ (and:DI (subreg:DI (match_operand:DF 1 "register_operand" "d") 0)
+ (match_dup 2)))]
+ ""
+ "operands[2] = force_reg (DImode,
+ immed_double_const (-1, 0x7fffffff, DImode));")
+
+;;- Compares
+
+(define_insn "cmpdi"
+ [(set (cc0)
+ (compare (match_operand:DI 0 "register_operand" "d")
+ (match_operand:DI 1 "register_operand" "d")))]
+ ""
+ "* return output_cmp (operands[0], operands[1], 'l');")
+
+(define_insn ""
+ [(set (cc0) (match_operand:DI 0 "register_operand" "d"))
+ (clobber (match_scratch:DI 1 "=d"))]
+ "next_insn_tests_no_inequality (insn)"
+ "* return output_cmp (operands[0], operands[1], 'L');")
+
+(define_insn "cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "register_operand" "d,a")
+ (match_operand:SI 1 "nonmemory_operand" "di,ai")))]
+ ""
+ "* return output_cmp (operands[0], operands[1], 'w');")
+
+(define_insn "cmphi"
+ [(set (cc0)
+ (compare (match_operand:HI 0 "register_operand" "d,a")
+ (match_operand:HI 1 "nonmemory_operand" "di,ai")))]
+ ""
+ "* return output_cmp (operands[0], operands[1], 'h');")
+
+; cmpqi is intentionally omitted.
+;
+; gcc will sign-extend or zero-extend the operands to the next
+; wider mode, HImode.
+;
+; For reg .cmp. constant, we just go with the halfword immediate
+; instruction. Perhaps the widening insn can be cse'd or combined away.
+; If not, we're still as good as loading a byte constant into a register
+; to do a reg-reg byte compare.
+;
+; The following patterns pick up cases that can use reg .cmp. reg after all.
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (sign_extend:HI (match_operand:QI 0 "register_operand" "d"))
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))))]
+ ""
+ "* return output_cmp (operands[0], operands[1], 'b');")
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (ashift:HI (subreg:HI (match_operand:QI 0 "register_operand" "d") 0)
+ (const_int 8))
+ (ashift:HI (subreg:HI (match_operand:QI 1 "register_operand" "d") 0)
+ (const_int 8))))]
+ ""
+ "* return output_cmp (operands[0], operands[1], 'b');")
+
+(define_insn ""
+ [(set (cc0)
+ (compare (match_operand:QI 0 "register_operand" "d")
+ (match_operand:QI 1 "register_operand" "d")))]
+ ""
+ "* return output_cmp (operands[0], operands[1], 'b');")
+
+(define_insn ""
+ [(set (cc0) (match_operand:QI 0 "register_operand" "d"))
+ (clobber (match_scratch:QI 1 "=d"))]
+ "next_insn_tests_no_inequality (insn)"
+ "* return output_cmp (operands[0], operands[1], 'B');")
+
+(define_insn ""
+ [(set (cc0) (subreg (match_operand:QI 0 "register_operand" "d") 0))
+ (clobber (match_scratch:QI 1 "=d"))]
+ "next_insn_tests_no_inequality (insn)"
+ "* return output_cmp (operands[0], operands[1], 'B');")
+
+(define_insn ""
+ [(set (cc0)
+ (zero_extend (subreg (match_operand:QI 0 "register_operand" "d") 0)))
+ (clobber (match_scratch:QI 1 "=d"))]
+ "next_insn_tests_no_inequality (insn)"
+ "* return output_cmp (operands[0], operands[1], 'B');")
+
+(define_insn "cmpdf"
+ [(set (cc0)
+ (compare (match_operand:DF 0 "register_operand" "d")
+ (match_operand:DF 1 "register_operand" "d")))]
+ ""
+ "* return output_cmp (operands[0], operands[1], 'd');")
+
+(define_insn "cmpsf"
+ [(set (cc0)
+ (compare (match_operand:SF 0 "register_operand" "d")
+ (match_operand:SF 1 "nonmemory_cmpsf_operand" "dF")))]
+ ""
+ "* return output_cmp (operands[0], operands[1], 's');")
+
+;; decrement-and-set-cc0 insns.
+;;
+;; The most important case where we can use the carry bit from an
+;; arithmetic insn to eliminate a redundant compare is the decrement in
+;; constructs like while (n--) and while (--n >= 0).
+;;
+;; We do it with combine patterns instead of NOTICE_UPDATE_CC because
+;; the decrement needs to be kept at the end of the block during scheduling.
+;;
+;; These patterns must have memory alternatives because reload refuses
+;; to do output reloads for an insn that sets cc0 (since it does not
+;; want to clobber cc0 with its moves). Convex moves do not clobber
+;; cc0, but there is no evident way to get reload to know that.
+
+(define_insn ""
+ [(set (cc0)
+ (match_operand:SI 0 "register_operand" "+r,*m"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "next_insn_tests_no_inequality (insn)"
+ "*
+{
+ if (which_alternative == 0)
+ {
+ output_cmp (operands[0], constm1_rtx, 'W');
+ return \"add.w #-1,%0\";
+ }
+ else
+ {
+ output_cmp (gen_rtx (REG, SImode, 7), constm1_rtx, 'W');
+ return \"psh.w s7\;ld.w %0,s7\;add.w #-1,s7\;st.w s7,%0\;pop.w s7\";
+ }
+}")
+
+(define_insn ""
+ [(set (cc0)
+ (plus:SI (match_operand:SI 0 "register_operand" "+r,*m")
+ (const_int -1)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "find_reg_note (next_cc0_user (insn), REG_NONNEG, 0)"
+ "*
+{
+ if (which_alternative == 0)
+ {
+ output_cmp (operands[0], const0_rtx, 'W');
+ return \"add.w #-1,%0\";
+ }
+ else
+ {
+ output_cmp (gen_rtx (REG, SImode, 7), const0_rtx, 'W');
+ return \"psh.w s7\;ld.w %0,s7\;add.w #-1,s7\;st.w s7,%0\;pop.w s7\";
+ }
+}")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operand:HI 0 "register_operand" "+r,*m"))
+ (set (match_dup 0)
+ (plus:HI (match_dup 0)
+ (const_int -1)))]
+ "next_insn_tests_no_inequality (insn)"
+ "*
+{
+ if (which_alternative == 0)
+ {
+ output_cmp (operands[0], constm1_rtx, 'H');
+ return \"add.h #-1,%0\";
+ }
+ else
+ {
+ output_cmp (gen_rtx (REG, HImode, 7), constm1_rtx, 'H');
+ return \"psh.w s7\;ld.h %0,s7\;add.h #-1,s7\;st.h s7,%0\;pop.w s7\";
+ }
+}")
+
+(define_insn ""
+ [(set (cc0)
+ (plus:HI (match_operand:HI 0 "register_operand" "+r,*m")
+ (const_int -1)))
+ (set (match_dup 0)
+ (plus:HI (match_dup 0)
+ (const_int -1)))]
+ "find_reg_note (next_cc0_user (insn), REG_NONNEG, 0)"
+ "*
+{
+ if (which_alternative == 0)
+ {
+ output_cmp (operands[0], const0_rtx, 'H');
+ return \"add.h #-1,%0\";
+ }
+ else
+ {
+ output_cmp (gen_rtx (REG, HImode, 7), const0_rtx, 'H');
+ return \"psh.w s7\;ld.h %0,s7\;add.h #-1,s7\;st.h s7,%0\;pop.w s7\";
+ }
+}")
+
+;;- Jumps
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jbr %l0")
+
+(define_insn "beq"
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"eq\", 't'); ")
+
+(define_insn "bne"
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"eq\", 'f'); ")
+
+(define_insn "bgt"
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"le\", 'f'); ")
+
+(define_insn "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"leu\", 'f'); ")
+
+(define_insn "blt"
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"lt\", 't'); ")
+
+(define_insn "bltu"
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"ltu\", 't'); ")
+
+(define_insn "bge"
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"lt\", 'f'); ")
+
+(define_insn "bgeu"
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"ltu\", 'f'); ")
+
+(define_insn "ble"
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"le\", 't'); ")
+
+(define_insn "bleu"
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "* return output_condjump (operands[0], \"leu\", 't'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"eq\", 'f'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"eq\", 't'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"le\", 't'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"leu\", 't'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"lt\", 'f'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"ltu\", 'f'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"lt\", 't'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"ltu\", 't'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"le\", 'f'); ")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "* return output_condjump (operands[0], \"leu\", 'f'); ")
+
+;;- Calls
+
+(define_expand "call_pop"
+ [(parallel [(call (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:SI 1 "const_int_operand" "i"))
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")
+ (reg:SI 8)])]
+ ""
+ "")
+
+(define_insn ""
+ [(call (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:SI 1 "const_int_operand" "i"))
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")
+ (match_operand:SI 4 "" "")]
+ ""
+ "* return output_call (insn, &operands[0]);")
+
+(define_expand "call_value_pop"
+ [(parallel [(set (match_operand 0 "" "=g")
+ (call (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:SI 2 "const_int_operand" "i")))
+ (match_operand:SI 3 "const_int_operand" "i")
+ (match_operand:SI 4 "const_int_operand" "i")
+ (reg:SI 8)])]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand 0 "" "=g")
+ (call (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:SI 2 "const_int_operand" "i")))
+ (match_operand:SI 3 "const_int_operand" "i")
+ (match_operand:SI 4 "const_int_operand" "i")
+ (match_operand:SI 5 "" "")]
+ ""
+ "* return output_call (insn, &operands[1]); ")
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call_pop (operands[0], const0_rtx,
+ const0_rtx, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "")
+
+(define_expand "return"
+ [(return)]
+ ""
+ " replace_arg_pushes (); ")
+
+(define_insn ""
+ [(return)]
+ ""
+ "rtn")
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+{
+ emit_ap_optimizations ();
+ DONE;
+}")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp %a0")
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
+ ""
+ "jmp %a0")
diff --git a/gcc/config/convex/fixinc.convex b/gcc/config/convex/fixinc.convex
new file mode 100755
index 0000000..0dc5f30
--- /dev/null
+++ b/gcc/config/convex/fixinc.convex
@@ -0,0 +1,416 @@
+
+# This is a shell archive. Remove anything before this line,
+# then unpack it by saving it in a file and typing "sh file".
+#
+# Wrapped by on Fri Mar 12 08:41:28 CST 1993
+# Contents: include/ include/limits.h include/math.h include/stddef.h
+# include/stdlib.h
+
+echo mkdir - include
+mkdir include
+chmod u=rwx,g=rwx,o=rx include
+
+echo x - include/limits.h
+sed 's/^@//' > "include/limits.h" <<'@//E*O*F include/limits.h//'
+#ifndef _LIMITS_H
+#define _LIMITS_H
+
+#include_next <limits.h>
+
+/* Minimum and maximum values a `char' can hold. */
+#ifdef __CHAR_UNSIGNED__
+#undef CHAR_MIN
+#define CHAR_MIN 0
+#undef CHAR_MAX
+#define CHAR_MAX 255
+#endif
+
+#endif /* _LIMITS_H */
+@//E*O*F include/limits.h//
+chmod u=rw,g=rw,o=r include/limits.h
+
+echo x - include/math.h
+sed 's/^@//' > "include/math.h" <<'@//E*O*F include/math.h//'
+#ifndef _MATH_H
+#define _MATH_H
+
+#include_next <math.h>
+
+#undef HUGE_VAL
+
+#if _IEEE_FLOAT_
+#define HUGE_VAL 1.79769313486231570e+308
+#else
+#define HUGE_VAL 8.98846567431157854e+307
+#endif
+
+#if __OPTIMIZE__ && ! __NO_INLINE
+
+#define frexp(x,y) __inline_frexp ((x), (y))
+#define ldexp(x,y) __inline_ldexp ((x), (y))
+#define irint(x) __inline_irint (x)
+#define frexpf(x,y) __inline_frexpf ((x), (y))
+#define ldexpf(x,y) __inline_ldexpf ((x), (y))
+#define irintf(x) __inline_irintf (x)
+
+#if __convex_c2__ || __convex_c32__ || __convex_c34__ || __convex_c38__
+
+#define atan(x) __inline_atan (x)
+#define ceil(x) __inline_ceil (x)
+#define cos(x) __inline_cos (x)
+#define exp(x) __inline_exp (x)
+#define floor(x) __inline_floor (x)
+#define log(x) __inline_log (x)
+#define log10(x) __inline_log10 (x)
+#define modf(x,y) __inline_modf ((x), (y))
+#define rint(x) __inline_rint (x)
+#define sin(x) __inline_sin (x)
+#define sqrt(x) __inline_sqrt (x)
+
+#define atanf(x) __inline_atanf (x)
+#define ceilf(x) __inline_ceilf (x)
+#define cosf(x) __inline_cosf (x)
+#define expf(x) __inline_expf (x)
+#define floorf(x) __inline_floorf (x)
+#define logf(x) __inline_logf (x)
+#define log10f(x) __inline_log10f (x)
+#define modff(x,y) __inline_modff ((x), (y))
+#define rintf(x) __inline_rintf (x)
+#define sinf(x) __inline_sinf (x)
+#define sqrtf(x) __inline_sqrtf (x)
+
+#endif /* __convex_c[23*]__ */
+
+#endif /* __OPTIMIZE__ */
+
+static __inline__ __const__ double __inline_atan (double x)
+{
+ double z;
+ __asm__ ("atan.d %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ float __inline_atanf (float x)
+{
+ float z;
+ __asm__ ("atan.s %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ double __inline_cos (double x)
+{
+ double z;
+ __asm__ ("cos.d %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ float __inline_cosf (float x)
+{
+ float z;
+ __asm__ ("cos.s %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ double __inline_exp (double x)
+{
+ double z;
+ __asm__ ("exp.d %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ float __inline_expf (float x)
+{
+ float z;
+ __asm__ ("exp.s %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ double __inline_log (double x)
+{
+ double z;
+ __asm__ ("ln.d %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ float __inline_logf (float x)
+{
+ float z;
+ __asm__ ("ln.s %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ double __inline_sin (double x)
+{
+ double z;
+ __asm__ ("sin.d %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ float __inline_sinf (float x)
+{
+ float z;
+ __asm__ ("sin.s %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ double __inline_sqrt (double x)
+{
+ double z;
+ __asm__ ("sqrt.d %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ float __inline_sqrtf (float x)
+{
+ float z;
+ __asm__ ("sqrt.s %0" : "=d" (z) : "0" (x));
+ return z;
+}
+
+static __inline__ __const__ double __inline_ceil (double x)
+{
+ double z;
+ __asm__ ("frint.d %1,%0" : "=d" (z) : "d" (x));
+ if (z < x) z += 1.0;
+ return z;
+}
+
+static __inline__ __const__ float __inline_ceilf (float x)
+{
+ float z;
+ __asm__ ("frint.s %1,%0" : "=d" (z) : "d" (x));
+ if (z < x) z += 1.0F;
+ return z;
+}
+
+static __inline__ __const__ double __inline_floor (double x)
+{
+ double z;
+ __asm__ ("frint.d %1,%0" : "=d" (z) : "d" (x));
+ if (z > x) z -= 1.0;
+ return z;
+}
+
+static __inline__ __const__ float __inline_floorf (float x)
+{
+ float z;
+ __asm__ ("frint.s %1,%0" : "=d" (z) : "d" (x));
+ if (z > x) z -= 1.0F;
+ return z;
+}
+
+static __inline__ __const__ double __inline_log10 (double x)
+{
+ return 0.43429448190325182765 * __inline_log (x);
+}
+
+static __inline__ __const__ float __inline_log10f (float x)
+{
+ return 0.43429448190325182765F * __inline_logf (x);
+}
+
+static __inline__ double __inline_modf (double x, double *np)
+{
+ double intpart;
+ __asm__ ("frint.d %1,%0" : "=d" (intpart) : "d" (x));
+ *np = intpart;
+ return x - intpart;
+}
+
+static __inline__ float __inline_modff (float x, float *np)
+{
+ float intpart;
+ __asm__ ("frint.s %1,%0" : "=d" (intpart) : "d" (x));
+ *np = intpart;
+ return x - intpart;
+}
+
+static __inline__ double __inline_frexp (double x, int *np)
+{
+ union u { double d; unsigned long long ll; } u;
+ if ((u.d = x) == 0)
+ *np = 0;
+ else
+ {
+#if _IEEE_FLOAT_
+ *np = ((u.ll >> 52) & 03777) - 01776;
+ u.ll = (u.ll & 0x800fffffffffffffLL) | 0x3fe0000000000000LL;
+#else
+ *np = ((u.ll >> 52) & 03777) - 02000;
+ u.ll = (u.ll & 0x800fffffffffffffLL) | 0x4000000000000000LL;
+#endif
+ }
+ return u.d;
+}
+
+static __inline__ float __inline_frexpf (float x, int *np)
+{
+ union u { float f; unsigned int i; } u;
+ if ((u.f = x) == 0)
+ *np = 0;
+ else
+ {
+#if _IEEE_FLOAT_
+ *np = ((u.i >> 23) & 0377) - 0176;
+ u.i = (u.i & 0x807fffff) | 0x3f000000;
+#else
+ *np = ((u.i >> 23) & 0377) - 0200;
+ u.i = (u.i & 0x807fffff) | 0x40000000;
+#endif
+ }
+ return u.f;
+}
+
+static __inline__ double __inline_ldexp (double x, int n)
+{
+ extern int errno;
+ union { double d; long long ll; unsigned sexp : 12; } u;
+ if ((u.d = x) != 0)
+ {
+ int exp = n + (u.sexp & 03777);
+ long long nn = (long long) n << 52;
+#if _IEEE_FLOAT_
+ if (exp <= 0)
+ u.ll &= 0x8000000000000000LL, errno = 34;
+ else if (exp > 03776)
+ u.ll = u.ll & 0x8000000000000000LL | 0x7fefffffffffffffLL, errno = 34;
+#else
+ if (exp <= 0)
+ u.ll = 0, errno = 34;
+ else if (exp > 03777)
+ u.ll |= 0x7fffffffffffffffLL, errno = 34;
+#endif
+ else
+ u.ll += nn;
+ }
+ return u.d;
+}
+
+static __inline__ float __inline_ldexpf (float x, int n)
+{
+ extern int errno;
+ union { float f; int i; unsigned sexp : 9; } u;
+ if ((u.f = x) != 0)
+ {
+ int exp = n + (u.sexp & 0377);
+ int nn = n << 23;
+#if _IEEE_FLOAT_
+ if (exp <= 0)
+ u.i &= 0x80000000, errno = 34;
+ else if (exp > 0376)
+ u.i = u.i & 0x80000000 | 0x7f7fffff, errno = 34;
+#else
+ if (exp <= 0)
+ u.i = 0, errno = 34;
+ else if (exp > 0377)
+ u.i |= 0x7fffffff, errno = 34;
+#endif
+ else
+ u.i += nn;
+ }
+ return u.f;
+}
+
+static __inline__ __const__ double __inline_rint (double x)
+{
+ double z;
+ union { double d; unsigned long long ll; } u;
+ u.d = x;
+#if _IEEE_FLOAT_
+ u.ll = (u.ll & 0x8000000000000000LL) | 0x3fe0000000000000LL;
+#else
+ u.ll = (u.ll & 0x8000000000000000LL) | 0x4000000000000000LL;
+#endif
+ __asm__ ("frint.d %1,%0" : "=d" (z) : "d" (x + u.d));
+ return z;
+}
+
+static __inline__ __const__ float __inline_rintf (float x)
+{
+ float z;
+ union { float f; unsigned int i; } u;
+ u.f = x;
+#if _IEEE_FLOAT_
+ u.i = (u.i & 0x80000000) | 0x3f000000;
+#else
+ u.i = (u.i & 0x80000000) | 0x40000000;
+#endif
+ __asm__ ("frint.s %1,%0" : "=d" (z) : "d" (x + u.f));
+ return z;
+}
+
+static __inline__ __const__ int __inline_irint (double x)
+{
+ union { double d; unsigned long long ll; } u;
+ u.d = x;
+#if _IEEE_FLOAT_
+ u.ll = (u.ll & 0x8000000000000000LL) | 0x3fe0000000000000LL;
+#else
+ u.ll = (u.ll & 0x8000000000000000LL) | 0x4000000000000000LL;
+#endif
+ return x + u.d;
+}
+
+static __inline__ __const__ int __inline_irintf (float x)
+{
+ union { float f; unsigned int i; } u;
+ u.f = x;
+#if _IEEE_FLOAT_
+ u.i = (u.i & 0x80000000) | 0x3f000000;
+#else
+ u.i = (u.i & 0x80000000) | 0x40000000;
+#endif
+ return x + u.f;
+}
+
+#endif /* _MATH_H */
+@//E*O*F include/math.h//
+chmod u=rw,g=rw,o=r include/math.h
+
+echo x - include/stddef.h
+sed 's/^@//' > "include/stddef.h" <<'@//E*O*F include/stddef.h//'
+#ifndef _STDDEF_H
+#define _STDDEF_H
+
+#ifndef __WCHAR_T
+#define __WCHAR_T
+
+#ifdef __GNUG__
+/* In C++, wchar_t is a distinct basic type,
+ and we can expect __wchar_t to be defined by cc1plus. */
+typedef __wchar_t wchar_t;
+#else
+/* In C, cpp tells us which type to make an alias for. */
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+
+#endif /* __WCHAR_T */
+
+#include_next <stddef.h>
+
+#endif /* _STDDEF_H */
+@//E*O*F include/stddef.h//
+chmod u=rw,g=rw,o=r include/stddef.h
+
+echo x - include/stdlib.h
+sed 's/^@//' > "include/stdlib.h" <<'@//E*O*F include/stdlib.h//'
+#ifndef _STDLIB_H
+#define _STDLIB_H
+
+#if _CONVEX_SOURCE
+
+#define alloca __non_builtin_alloca
+#include_next <stdlib.h>
+#undef alloca
+
+#else
+
+#include_next <stdlib.h>
+
+#endif /* _CONVEX_SOURCE */
+
+#endif /* _STDLIB_H */
+@//E*O*F include/stdlib.h//
+chmod u=rw,g=rw,o=r include/stdlib.h
+
+exit 0
diff --git a/gcc/config/convex/x-convex b/gcc/config/convex/x-convex
new file mode 100755
index 0000000..8029ac5
--- /dev/null
+++ b/gcc/config/convex/x-convex
@@ -0,0 +1,5 @@
+# ld can make exe's c2-only if this lib is searched even though not loaded
+CCLIBFLAGS = -tm c1
+
+# Use -pcc to avoid surprises.
+CC = cc -pcc
diff --git a/gcc/config/convex/xm-convex.h b/gcc/config/convex/xm-convex.h
new file mode 100755
index 0000000..aaaa3b1
--- /dev/null
+++ b/gcc/config/convex/xm-convex.h
@@ -0,0 +1,48 @@
+/* Configuration for GNU C-compiler for Convex.
+ Copyright (C) 1989, 1993, 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* Convex ships /tmp as a separate file system - thus it
+ usually has more free space than /usr/tmp */
+
+#define P_tmpdir "/tmp/"
+
+/* Convex uses Vax or IEEE floats.
+ Both formats have Vax semantics. */
+
+#define HOST_FLOAT_FORMAT VAX_FLOAT_FORMAT