summaryrefslogtreecommitdiff
path: root/gcc/config/vax
diff options
context:
space:
mode:
authorYamaArashi <shadow962@live.com>2016-01-06 01:47:28 -0800
committerYamaArashi <shadow962@live.com>2016-01-06 01:47:28 -0800
commitbe8b04496302184c6e8f04d6179f9c3afc50aeb6 (patch)
tree726e2468c0c07add773c0dbd86ab6386844259ae /gcc/config/vax
initial commit
Diffstat (limited to 'gcc/config/vax')
-rwxr-xr-xgcc/config/vax/netbsd.h24
-rwxr-xr-xgcc/config/vax/ultrix.h12
-rwxr-xr-xgcc/config/vax/vax.c837
-rwxr-xr-xgcc/config/vax/vax.h1317
-rwxr-xr-xgcc/config/vax/vax.md2136
-rwxr-xr-xgcc/config/vax/vaxv.h70
-rwxr-xr-xgcc/config/vax/vms.h369
-rwxr-xr-xgcc/config/vax/x-vax3
-rwxr-xr-xgcc/config/vax/xm-vax.h45
-rwxr-xr-xgcc/config/vax/xm-vaxv.h3
-rwxr-xr-xgcc/config/vax/xm-vms.h206
11 files changed, 5022 insertions, 0 deletions
diff --git a/gcc/config/vax/netbsd.h b/gcc/config/vax/netbsd.h
new file mode 100755
index 0000000..23f3ff5
--- /dev/null
+++ b/gcc/config/vax/netbsd.h
@@ -0,0 +1,24 @@
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dvax -D__NetBSD__ -Asystem(unix) -Asystem(NetBSD) -Acpu(vax) -Amachine(vax)"
+
+/* Make gcc agree with <machine/ansi.h> */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Until they use ELF or something that handles dwarf2 unwinds
+ and initialization stuff better. */
+#undef DWARF2_UNWIND_INFO
+
diff --git a/gcc/config/vax/ultrix.h b/gcc/config/vax/ultrix.h
new file mode 100755
index 0000000..3a14419
--- /dev/null
+++ b/gcc/config/vax/ultrix.h
@@ -0,0 +1,12 @@
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES " -Dvax -Dunix -Dultrix -Dbsd4_2 -D__vax -D__unix -D__ultrix -D__bsd4_2 -Asystem(unix) -Asystem(bsd) -Acpu(vax) -Amachine(vax)"
+
+/* These are as defined in /usr/include/sys/stdtypes.h.
+ These values are for ultrix 4.2 on the vax. */
+#define SIZE_TYPE "unsigned int"
+#define PTRDIFF_TYPE "int"
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE 32
+
+/* True for Ultrix 4.3 and later and possibly earlier. */
+#define HAVE_ATEXIT
diff --git a/gcc/config/vax/vax.c b/gcc/config/vax/vax.c
new file mode 100755
index 0000000..bac442a
--- /dev/null
+++ b/gcc/config/vax/vax.c
@@ -0,0 +1,837 @@
+/* Subroutines for insn-output.c for Vax.
+ Copyright (C) 1987, 1994, 1995, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include <stdio.h>
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#ifdef VMS_TARGET
+#include "tree.h"
+#endif
+
+/* This is like nonimmediate_operand with a restriction on the type of MEM. */
+
+void
+split_quadword_operands (operands, low, n)
+ rtx *operands, *low;
+ int n;
+{
+ int i;
+ /* Split operands. */
+
+ low[0] = low[1] = low[2] = 0;
+ for (i = 0; i < 3; i++)
+ {
+ if (low[i])
+ /* it's already been figured out */;
+ else if (GET_CODE (operands[i]) == MEM
+ && (GET_CODE (XEXP (operands[i], 0)) == POST_INC))
+ {
+ rtx addr = XEXP (operands[i], 0);
+ operands[i] = low[i] = gen_rtx (MEM, SImode, addr);
+ if (which_alternative == 0 && i == 0)
+ {
+ addr = XEXP (operands[i], 0);
+ operands[i+1] = low[i+1] = gen_rtx (MEM, SImode, addr);
+ }
+ }
+ else
+ {
+ low[i] = operand_subword (operands[i], 0, 0, DImode);
+ operands[i] = operand_subword (operands[i], 1, 0, DImode);
+ }
+ }
+}
+
+print_operand_address (file, addr)
+ FILE *file;
+ register rtx addr;
+{
+ register rtx reg1, reg2, breg, ireg;
+ rtx offset;
+
+ retry:
+ switch (GET_CODE (addr))
+ {
+ case MEM:
+ fprintf (file, "*");
+ addr = XEXP (addr, 0);
+ goto retry;
+
+ case REG:
+ fprintf (file, "(%s)", reg_names[REGNO (addr)]);
+ break;
+
+ case PRE_DEC:
+ fprintf (file, "-(%s)", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case POST_INC:
+ fprintf (file, "(%s)+", reg_names[REGNO (XEXP (addr, 0))]);
+ break;
+
+ case PLUS:
+ /* There can be either two or three things added here. One must be a
+ REG. One can be either a REG or a MULT of a REG and an appropriate
+ constant, and the third can only be a constant or a MEM.
+
+ We get these two or three things and put the constant or MEM in
+ OFFSET, the MULT or REG in IREG, and the REG in BREG. If we have
+ a register and can't tell yet if it is a base or index register,
+ put it into REG1. */
+
+ reg1 = 0; ireg = 0; breg = 0; offset = 0;
+
+ if (CONSTANT_ADDRESS_P (XEXP (addr, 0))
+ || GET_CODE (XEXP (addr, 0)) == MEM)
+ {
+ offset = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else if (CONSTANT_ADDRESS_P (XEXP (addr, 1))
+ || GET_CODE (XEXP (addr, 1)) == MEM)
+ {
+ offset = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 1)) == MULT)
+ {
+ ireg = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 0)) == MULT)
+ {
+ ireg = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else if (GET_CODE (XEXP (addr, 1)) == REG)
+ {
+ reg1 = XEXP (addr, 1);
+ addr = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 0)) == REG)
+ {
+ reg1 = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (addr) == REG)
+ {
+ if (reg1)
+ ireg = addr;
+ else
+ reg1 = addr;
+ }
+ else if (GET_CODE (addr) == MULT)
+ ireg = addr;
+ else if (GET_CODE (addr) == PLUS)
+ {
+ if (CONSTANT_ADDRESS_P (XEXP (addr, 0))
+ || GET_CODE (XEXP (addr, 0)) == MEM)
+ {
+ if (offset)
+ {
+ if (GET_CODE (offset) == CONST_INT)
+ offset = plus_constant (XEXP (addr, 0), INTVAL (offset));
+ else if (GET_CODE (XEXP (addr, 0)) == CONST_INT)
+ offset = plus_constant (offset, INTVAL (XEXP (addr, 0)));
+ else
+ abort ();
+ }
+ offset = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 0)) == REG)
+ {
+ if (reg1)
+ ireg = reg1, breg = XEXP (addr, 0), reg1 = 0;
+ else
+ reg1 = XEXP (addr, 0);
+ }
+ else if (GET_CODE (XEXP (addr, 0)) == MULT)
+ {
+ if (ireg)
+ abort ();
+ ireg = XEXP (addr, 0);
+ }
+ else
+ abort ();
+
+ if (CONSTANT_ADDRESS_P (XEXP (addr, 1))
+ || GET_CODE (XEXP (addr, 1)) == MEM)
+ {
+ if (offset)
+ {
+ if (GET_CODE (offset) == CONST_INT)
+ offset = plus_constant (XEXP (addr, 1), INTVAL (offset));
+ else if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ offset = plus_constant (offset, INTVAL (XEXP (addr, 1)));
+ else
+ abort ();
+ }
+ offset = XEXP (addr, 1);
+ }
+ else if (GET_CODE (XEXP (addr, 1)) == REG)
+ {
+ if (reg1)
+ ireg = reg1, breg = XEXP (addr, 1), reg1 = 0;
+ else
+ reg1 = XEXP (addr, 1);
+ }
+ else if (GET_CODE (XEXP (addr, 1)) == MULT)
+ {
+ if (ireg)
+ abort ();
+ ireg = XEXP (addr, 1);
+ }
+ else
+ abort ();
+ }
+ else
+ abort ();
+
+ /* If REG1 is non-zero, figure out if it is a base or index register. */
+ if (reg1)
+ {
+ if (breg != 0 || (offset && GET_CODE (offset) == MEM))
+ {
+ if (ireg)
+ abort ();
+ ireg = reg1;
+ }
+ else
+ breg = reg1;
+ }
+
+ if (offset != 0)
+ output_address (offset);
+
+ if (breg != 0)
+ fprintf (file, "(%s)", reg_names[REGNO (breg)]);
+
+ if (ireg != 0)
+ {
+ if (GET_CODE (ireg) == MULT)
+ ireg = XEXP (ireg, 0);
+ if (GET_CODE (ireg) != REG)
+ abort ();
+ fprintf (file, "[%s]", reg_names[REGNO (ireg)]);
+ }
+ break;
+
+ default:
+ output_addr_const (file, addr);
+ }
+}
+
+char *
+rev_cond_name (op)
+ rtx op;
+{
+ switch (GET_CODE (op))
+ {
+ case EQ:
+ return "neq";
+ case NE:
+ return "eql";
+ case LT:
+ return "geq";
+ case LE:
+ return "gtr";
+ case GT:
+ return "leq";
+ case GE:
+ return "lss";
+ case LTU:
+ return "gequ";
+ case LEU:
+ return "gtru";
+ case GTU:
+ return "lequ";
+ case GEU:
+ return "lssu";
+
+ default:
+ abort ();
+ }
+}
+
+int
+vax_float_literal(c)
+ register rtx c;
+{
+ register enum machine_mode mode;
+ int i;
+ union {double d; int i[2];} val;
+
+ if (GET_CODE (c) != CONST_DOUBLE)
+ return 0;
+
+ mode = GET_MODE (c);
+
+ if (c == const_tiny_rtx[(int) mode][0]
+ || c == const_tiny_rtx[(int) mode][1]
+ || c == const_tiny_rtx[(int) mode][2])
+ return 1;
+
+#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+
+ val.i[0] = CONST_DOUBLE_LOW (c);
+ val.i[1] = CONST_DOUBLE_HIGH (c);
+
+ for (i = 0; i < 7; i ++)
+ if (val.d == 1 << i || val.d == 1 / (1 << i))
+ return 1;
+#endif
+ return 0;
+}
+
+
+/* Return the cost in cycles of a memory address, relative to register
+ indirect.
+
+ Each of the following adds the indicated number of cycles:
+
+ 1 - symbolic address
+ 1 - pre-decrement
+ 1 - indexing and/or offset(register)
+ 2 - indirect */
+
+
+int vax_address_cost(addr)
+ register rtx addr;
+{
+ int reg = 0, indexed = 0, indir = 0, offset = 0, predec = 0;
+ rtx plus_op0 = 0, plus_op1 = 0;
+ restart:
+ switch (GET_CODE (addr))
+ {
+ case PRE_DEC:
+ predec = 1;
+ case REG:
+ case SUBREG:
+ case POST_INC:
+ reg = 1;
+ break;
+ case MULT:
+ indexed = 1; /* 2 on VAX 2 */
+ break;
+ case CONST_INT:
+ /* byte offsets cost nothing (on a VAX 2, they cost 1 cycle) */
+ if (offset == 0)
+ offset = (unsigned)(INTVAL(addr)+128) > 256;
+ break;
+ case CONST:
+ case SYMBOL_REF:
+ offset = 1; /* 2 on VAX 2 */
+ break;
+ case LABEL_REF: /* this is probably a byte offset from the pc */
+ if (offset == 0)
+ offset = 1;
+ break;
+ case PLUS:
+ if (plus_op0)
+ plus_op1 = XEXP (addr, 0);
+ else
+ plus_op0 = XEXP (addr, 0);
+ addr = XEXP (addr, 1);
+ goto restart;
+ case MEM:
+ indir = 2; /* 3 on VAX 2 */
+ addr = XEXP (addr, 0);
+ goto restart;
+ }
+
+ /* Up to 3 things can be added in an address. They are stored in
+ plus_op0, plus_op1, and addr. */
+
+ if (plus_op0)
+ {
+ addr = plus_op0;
+ plus_op0 = 0;
+ goto restart;
+ }
+ if (plus_op1)
+ {
+ addr = plus_op1;
+ plus_op1 = 0;
+ goto restart;
+ }
+ /* Indexing and register+offset can both be used (except on a VAX 2)
+ without increasing execution time over either one alone. */
+ if (reg && indexed && offset)
+ return reg + indir + offset + predec;
+ return reg + indexed + indir + offset + predec;
+}
+
+
+/* Cost of an expression on a VAX. This version has costs tuned for the
+ CVAX chip (found in the VAX 3 series) with comments for variations on
+ other models. */
+
+int
+vax_rtx_cost (x)
+ register rtx x;
+{
+ register enum rtx_code code = GET_CODE (x);
+ enum machine_mode mode = GET_MODE (x);
+ register int c;
+ int i = 0; /* may be modified in switch */
+ char *fmt = GET_RTX_FORMAT (code); /* may be modified in switch */
+
+ switch (code)
+ {
+ case POST_INC:
+ return 2;
+ case PRE_DEC:
+ return 3;
+ case MULT:
+ switch (mode)
+ {
+ case DFmode:
+ c = 16; /* 4 on VAX 9000 */
+ break;
+ case SFmode:
+ c = 9; /* 4 on VAX 9000, 12 on VAX 2 */
+ break;
+ case DImode:
+ c = 16; /* 6 on VAX 9000, 28 on VAX 2 */
+ break;
+ case SImode:
+ case HImode:
+ case QImode:
+ c = 10; /* 3-4 on VAX 9000, 20-28 on VAX 2 */
+ break;
+ }
+ break;
+ case UDIV:
+ c = 17;
+ break;
+ case DIV:
+ if (mode == DImode)
+ c = 30; /* highly variable */
+ else if (mode == DFmode)
+ /* divide takes 28 cycles if the result is not zero, 13 otherwise */
+ c = 24;
+ else
+ c = 11; /* 25 on VAX 2 */
+ break;
+ case MOD:
+ c = 23;
+ break;
+ case UMOD:
+ c = 29;
+ break;
+ case FLOAT:
+ c = 6 + (mode == DFmode) + (GET_MODE (XEXP (x, 0)) != SImode);
+ /* 4 on VAX 9000 */
+ break;
+ case FIX:
+ c = 7; /* 17 on VAX 2 */
+ break;
+ case ASHIFT:
+ case LSHIFTRT:
+ case ASHIFTRT:
+ if (mode == DImode)
+ c = 12;
+ else
+ c = 10; /* 6 on VAX 9000 */
+ break;
+ case ROTATE:
+ case ROTATERT:
+ c = 6; /* 5 on VAX 2, 4 on VAX 9000 */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ fmt = "e"; /* all constant rotate counts are short */
+ break;
+ case PLUS:
+ /* Check for small negative integer operand: subl2 can be used with
+ a short positive constant instead. */
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT)
+ if ((unsigned)(INTVAL (XEXP (x, 1)) + 63) < 127)
+ fmt = "e";
+ case MINUS:
+ c = (mode == DFmode) ? 13 : 8; /* 6/8 on VAX 9000, 16/15 on VAX 2 */
+ case IOR:
+ case XOR:
+ c = 3;
+ break;
+ case AND:
+ /* AND is special because the first operand is complemented. */
+ c = 3;
+ if (GET_CODE (XEXP (x, 0)) == CONST_INT)
+ {
+ if ((unsigned)~INTVAL (XEXP (x, 0)) > 63)
+ c = 4;
+ fmt = "e";
+ i = 1;
+ }
+ break;
+ case NEG:
+ if (mode == DFmode)
+ return 9;
+ else if (mode == SFmode)
+ return 6;
+ else if (mode == DImode)
+ return 4;
+ case NOT:
+ return 2;
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ c = 15;
+ break;
+ case MEM:
+ if (mode == DImode || mode == DFmode)
+ c = 5; /* 7 on VAX 2 */
+ else
+ c = 3; /* 4 on VAX 2 */
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == REG || GET_CODE (x) == POST_INC)
+ return c;
+ return c + vax_address_cost (x);
+ default:
+ c = 3;
+ break;
+ }
+
+
+ /* Now look inside the expression. Operands which are not registers or
+ short constants add to the cost.
+
+ FMT and I may have been adjusted in the switch above for instructions
+ which require special handling */
+
+ while (*fmt++ == 'e')
+ {
+ register rtx op = XEXP (x, i++);
+ code = GET_CODE (op);
+
+ /* A NOT is likely to be found as the first operand of an AND
+ (in which case the relevant cost is of the operand inside
+ the not) and not likely to be found anywhere else. */
+ if (code == NOT)
+ op = XEXP (op, 0), code = GET_CODE (op);
+
+ switch (code)
+ {
+ case CONST_INT:
+ if ((unsigned)INTVAL (op) > 63 && GET_MODE (x) != QImode)
+ c += 1; /* 2 on VAX 2 */
+ break;
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ c += 1; /* 2 on VAX 2 */
+ break;
+ case CONST_DOUBLE:
+ if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT)
+ {
+ /* Registers are faster than floating point constants -- even
+ those constants which can be encoded in a single byte. */
+ if (vax_float_literal (op))
+ c++;
+ else
+ c += (GET_MODE (x) == DFmode) ? 3 : 2;
+ }
+ else
+ {
+ if (CONST_DOUBLE_HIGH (op) != 0
+ || (unsigned)CONST_DOUBLE_LOW (op) > 63)
+ c += 2;
+ }
+ break;
+ case MEM:
+ c += 1; /* 2 on VAX 2 */
+ if (GET_CODE (XEXP (op, 0)) != REG)
+ c += vax_address_cost (XEXP (op, 0));
+ break;
+ case REG:
+ case SUBREG:
+ break;
+ default:
+ c += 1;
+ break;
+ }
+ }
+ return c;
+}
+
+/* Check a `double' value for validity for a particular machine mode. */
+
+static char *float_strings[] =
+{
+ "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
+ "-1.70141173319264430e+38",
+ "2.93873587705571877e-39", /* 2^-128 */
+ "-2.93873587705571877e-39"
+};
+
+static REAL_VALUE_TYPE float_values[4];
+
+static int inited_float_values = 0;
+
+
+int
+check_float_value (mode, d, overflow)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE *d;
+ int overflow;
+{
+ if (inited_float_values == 0)
+ {
+ int i;
+ for (i = 0; i < 4; i++)
+ {
+ float_values[i] = REAL_VALUE_ATOF (float_strings[i], DFmode);
+ }
+
+ inited_float_values = 1;
+ }
+
+ if (overflow)
+ {
+ bcopy ((char *) &float_values[0], (char *) d, sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+
+ if ((mode) == SFmode)
+ {
+ REAL_VALUE_TYPE r;
+ bcopy ((char *) d, (char *) &r, sizeof (REAL_VALUE_TYPE));
+ if (REAL_VALUES_LESS (float_values[0], r))
+ {
+ bcopy ((char *) &float_values[0], (char *) d,
+ sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+ else if (REAL_VALUES_LESS (r, float_values[1]))
+ {
+ bcopy ((char *) &float_values[1], (char*) d,
+ sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+ else if (REAL_VALUES_LESS (dconst0, r)
+ && REAL_VALUES_LESS (r, float_values[2]))
+ {
+ bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+ else if (REAL_VALUES_LESS (r, dconst0)
+ && REAL_VALUES_LESS (float_values[3], r))
+ {
+ bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef VMS_TARGET
+/* Additional support code for VMS target. */
+
+/* Linked list of all externals that are to be emitted when optimizing
+ for the global pointer if they haven't been declared by the end of
+ the program with an appropriate .comm or initialization. */
+
+static
+struct extern_list {
+ struct extern_list *next; /* next external */
+ char *name; /* name of the external */
+ int size; /* external's actual size */
+ int in_const; /* section type flag */
+} *extern_head = 0, *pending_head = 0;
+
+/* Check whether NAME is already on the external definition list. If not,
+ add it to either that list or the pending definition list. */
+
+void
+vms_check_external (decl, name, pending)
+ tree decl;
+ char *name;
+ int pending;
+{
+ register struct extern_list *p, *p0;
+
+ for (p = extern_head; p; p = p->next)
+ if (!strcmp (p->name, name))
+ return;
+
+ for (p = pending_head, p0 = 0; p; p0 = p, p = p->next)
+ if (!strcmp (p->name, name))
+ {
+ if (pending)
+ return;
+
+ /* Was pending, but has now been defined; move it to other list. */
+ if (p == pending_head)
+ pending_head = p->next;
+ else
+ p0->next = p->next;
+ p->next = extern_head;
+ extern_head = p;
+ return;
+ }
+
+ /* Not previously seen; create a new list entry. */
+ p = (struct extern_list *)permalloc ((long) sizeof (struct extern_list));
+ p->name = name;
+
+ if (pending)
+ {
+ /* Save the size and section type and link to `pending' list. */
+ p->size = (DECL_SIZE (decl) == 0) ? 0 :
+ TREE_INT_CST_LOW (size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
+ size_int (BITS_PER_UNIT)));
+ p->in_const = (TREE_READONLY (decl) && ! TREE_THIS_VOLATILE (decl));
+
+ p->next = pending_head;
+ pending_head = p;
+ }
+ else
+ {
+ /* Size and section type don't matter; link to `declared' list. */
+ p->size = p->in_const = 0; /* arbitrary init */
+
+ p->next = extern_head;
+ extern_head = p;
+ }
+ return;
+}
+
+void
+vms_flush_pending_externals (file)
+ FILE *file;
+{
+ register struct extern_list *p;
+
+ while (pending_head)
+ {
+ /* Move next pending declaration to the "done" list. */
+ p = pending_head;
+ pending_head = p->next;
+ p->next = extern_head;
+ extern_head = p;
+
+ /* Now output the actual declaration. */
+ if (p->in_const)
+ const_section ();
+ else
+ data_section ();
+ fputs (".comm ", file);
+ assemble_name (file, p->name);
+ fprintf (file, ",%d\n", p->size);
+ }
+}
+#endif /* VMS_TARGET */
+
+#ifdef VMS
+/* Additional support code for VMS host. */
+
+#ifdef QSORT_WORKAROUND
+ /*
+ Do not use VAXCRTL's qsort() due to a severe bug: once you've
+ sorted something which has a size that's an exact multiple of 4
+ and is longword aligned, you cannot safely sort anything which
+ is either not a multiple of 4 in size or not longword aligned.
+ A static "move-by-longword" optimization flag inside qsort() is
+ never reset. This is known of affect VMS V4.6 through VMS V5.5-1,
+ and was finally fixed in VMS V5.5-2.
+
+ In this work-around an insertion sort is used for simplicity.
+ The qsort code from glibc should probably be used instead.
+ */
+void
+not_qsort (array, count, size, compare)
+ void *array;
+ unsigned count, size;
+ int (*compare)();
+{
+
+ if (size == sizeof (short))
+ {
+ register int i;
+ register short *next, *prev;
+ short tmp, *base = array;
+
+ for (next = base, i = count - 1; i > 0; i--)
+ {
+ prev = next++;
+ if ((*compare)(next, prev) < 0)
+ {
+ tmp = *next;
+ do *(prev + 1) = *prev;
+ while (--prev >= base ? (*compare)(&tmp, prev) < 0 : 0);
+ *(prev + 1) = tmp;
+ }
+ }
+ }
+ else if (size == sizeof (long))
+ {
+ register int i;
+ register long *next, *prev;
+ long tmp, *base = array;
+
+ for (next = base, i = count - 1; i > 0; i--)
+ {
+ prev = next++;
+ if ((*compare)(next, prev) < 0)
+ {
+ tmp = *next;
+ do *(prev + 1) = *prev;
+ while (--prev >= base ? (*compare)(&tmp, prev) < 0 : 0);
+ *(prev + 1) = tmp;
+ }
+ }
+ }
+ else /* arbitrary size */
+ {
+ register int i;
+ register char *next, *prev, *tmp = alloca (size), *base = array;
+
+ for (next = base, i = count - 1; i > 0; i--)
+ { /* count-1 forward iterations */
+ prev = next, next += size; /* increment front pointer */
+ if ((*compare)(next, prev) < 0)
+ { /* found element out of order; move others up then re-insert */
+ memcpy (tmp, next, size); /* save smaller element */
+ do { memcpy (prev + size, prev, size); /* move larger elem. up */
+ prev -= size; /* decrement back pointer */
+ } while (prev >= base ? (*compare)(tmp, prev) < 0 : 0);
+ memcpy (prev + size, tmp, size); /* restore small element */
+ }
+ }
+#ifdef USE_C_ALLOCA
+ alloca (0);
+#endif
+ }
+
+ return;
+}
+#endif /* QSORT_WORKAROUND */
+
+#endif /* VMS */
diff --git a/gcc/config/vax/vax.h b/gcc/config/vax/vax.h
new file mode 100755
index 0000000..ad8fb7c
--- /dev/null
+++ b/gcc/config/vax/vax.h
@@ -0,0 +1,1317 @@
+/* Definitions of target machine for GNU compiler. Vax version.
+ Copyright (C) 1987, 88, 91, 93-96, 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define CPP_PREDEFINES "-Dvax -D__vax__ -Dunix -Asystem(unix) -Asystem(bsd) -Acpu(vax) -Amachine(vax)"
+
+/* If using g-format floating point, alter math.h. */
+
+#define CPP_SPEC "%{mg:-DGFLOAT}"
+
+/* Choose proper libraries depending on float format.
+ Note that there are no profiling libraries for g-format.
+ Also use -lg for the sake of dbx. */
+
+#define LIB_SPEC "%{g:-lg}\
+ %{mg:%{lm:-lmg} -lcg \
+ %{p:%eprofiling not supported with -mg\n}\
+ %{pg:%eprofiling not supported with -mg\n}}\
+ %{!mg:%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}}"
+
+/* Print subsidiary information on the compiler version in use. */
+
+#ifndef TARGET_NAME /* A more specific value might be supplied via -D. */
+#define TARGET_NAME "vax"
+#endif
+#define TARGET_VERSION fprintf (stderr, " (%s)", TARGET_NAME)
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+extern int target_flags;
+
+/* Macros used in the machine description to test the flags. */
+
+/* Nonzero if compiling code that Unix assembler can assemble. */
+#define TARGET_UNIX_ASM (target_flags & 1)
+
+/* Nonzero if compiling with VAX-11 "C" style structure alignment */
+#define TARGET_VAXC_ALIGNMENT (target_flags & 2)
+
+/* Nonzero if compiling with `G'-format floating point */
+#define TARGET_G_FLOAT (target_flags & 4)
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#define TARGET_SWITCHES \
+ { {"unix", 1}, \
+ {"gnu", -1}, \
+ {"vaxc-alignment", 2}, \
+ {"g", 4}, \
+ {"g-float", 4}, \
+ {"d", -4}, \
+ {"d-float", -4}, \
+ { "", TARGET_DEFAULT}}
+
+/* Default target_flags if no switches specified. */
+
+#ifndef TARGET_DEFAULT
+#define TARGET_DEFAULT 1
+#endif
+
+/* Target machine storage layout */
+
+/* Define for software floating point emulation of VAX format
+ when cross compiling from a non-VAX host. */
+/* #define REAL_ARITHMETIC */
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields.
+ This is not true on the vax. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is not true on the vax. */
+#define BYTES_BIG_ENDIAN 0
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+/* This is not true on the vax. */
+#define WORDS_BIG_ENDIAN 0
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 32
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 16
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY (TARGET_VAXC_ALIGNMENT ? 8 : 32)
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS (! TARGET_VAXC_ALIGNMENT)
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* No structure field wants to be aligned rounder than this. */
+#define BIGGEST_FIELD_ALIGNMENT (TARGET_VAXC_ALIGNMENT ? 8 : 32)
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* Let's keep the stack somewhat aligned. */
+#define STACK_BOUNDARY 32
+
+/* The table of an ADDR_DIFF_VEC must be contiguous with the case
+ opcode, it is part of the case instruction. */
+#define ADDR_VEC_ALIGN(ADDR_VEC) 0
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers. */
+#define FIRST_PSEUDO_REGISTER 16
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+ On the vax, these are the AP, FP, SP and PC. */
+#define FIXED_REGISTERS {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS {1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+ On the vax, all registers are one word long. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ On the vax, all registers can hold all modes. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* Vax pc is overloaded on a register. */
+#define PC_REGNUM 15
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 14
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 13
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 1
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 12
+
+/* Register in which static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM 0
+
+/* Register in which address to store a structure value
+ is passed to a function. */
+#define STRUCT_VALUE_REGNUM 1
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The vax has only one kind of registers, so NO_REGS and ALL_REGS
+ are the only classes. */
+
+enum reg_class { NO_REGS, ALL_REGS, LIM_REG_CLASSES };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Since GENERAL_REGS is the same class as ALL_REGS,
+ don't give it a different class number; just make it an alias. */
+
+#define GENERAL_REGS ALL_REGS
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+ {"NO_REGS", "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS {0, 0xffff}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) ALL_REGS
+
+/* The class value for index registers, and the one for base regs. */
+
+#define INDEX_REG_CLASS ALL_REGS
+#define BASE_REG_CLASS ALL_REGS
+
+/* Get reg_class from a letter such as appears in the machine description. */
+
+#define REG_CLASS_FROM_LETTER(C) NO_REGS
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ `I' is the constant zero. */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? (VALUE) == 0 \
+ : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself.
+
+ `G' is a floating-point zero. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'G' ? ((VALUE) == CONST0_RTX (DFmode) \
+ || (VALUE) == CONST0_RTX (SFmode)) \
+ : 0)
+
+/* Optional extra constraints for this machine.
+
+ For the VAX, `Q' means that OP is a MEM that does not have a mode-dependent
+ address. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' \
+ ? GET_CODE (OP) == MEM && ! mode_dependent_address_p (XEXP (OP, 0)) \
+ : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) (CLASS)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+/* On the vax, this is always the size of MODE in words,
+ since all registers are the same size. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if longjmp restores from saved registers
+ rather than from what setjmp saved. */
+#define LONGJMP_RESTORE_FROM_STACK
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* Given an rtx for the address of a frame,
+ return an rtx for the address of the word in the frame
+ that holds the dynamic chain--the previous frame's address. */
+#define DYNAMIC_CHAIN_ADDRESS(frame) \
+gen_rtx (PLUS, Pmode, frame, GEN_INT (12))
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On the vax, -(sp) pushes only the bytes of the operands. */
+#define PUSH_ROUNDING(BYTES) (BYTES)
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 4
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the Vax, the RET insn always pops all the args for any function. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) (SIZE)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+/* On the Vax the return value is in R0 regardless. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+/* On the Vax the return value is in R0 regardless. */
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, 0)
+
+/* Define this if PCC uses the nonreentrant convention for returning
+ structure and union values. */
+
+#define PCC_STATIC_STRUCT_RETURN
+
+/* 1 if N is a possible register number for a function value.
+ On the Vax, R0 is the only register thus used. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0)
+
+/* 1 if N is a possible register number for function argument passing.
+ On the Vax, no registers are used in this way. */
+
+#define FUNCTION_ARG_REGNO_P(N) 0
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On the vax, this is a single integer, which is a number of bytes
+ of arguments scanned so far. */
+
+#define CUMULATIVE_ARGS int
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ On the vax, the offset starts at 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT) \
+ ((CUM) = 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ ((CUM) += ((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + 3) & ~3 \
+ : (int_size_in_bytes (TYPE) + 3) & ~3))
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+/* On the vax all args are pushed. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) 0
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate,
+ adjusted by STARTING_FRAME_OFFSET to accommodate vms.h.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+#define FUNCTION_PROLOGUE(FILE, SIZE) \
+{ register int regno; \
+ register int mask = 0; \
+ register int size = SIZE - STARTING_FRAME_OFFSET; \
+ extern char call_used_regs[]; \
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) \
+ if (regs_ever_live[regno] && !call_used_regs[regno]) \
+ mask |= 1 << regno; \
+ fprintf (FILE, "\t.word 0x%x\n", mask); \
+ MAYBE_VMS_FUNCTION_PROLOGUE(FILE) \
+ if ((size) >= 64) fprintf (FILE, "\tmovab %d(sp),sp\n", -size);\
+ else if (size) fprintf (FILE, "\tsubl2 $%d,sp\n", (size)); }
+
+/* vms.h redefines this. */
+#define MAYBE_VMS_FUNCTION_PROLOGUE(FILE)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tmovab LP%d,r0\n\tjsb mcount\n", (LABELNO));
+
+/* Output assembler code to FILE to initialize this source file's
+ basic block profiling info, if that has not already been done. */
+
+#define FUNCTION_BLOCK_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\ttstl LPBX0\n\tjneq LPI%d\n\tpushal LPBX0\n\tcalls $1,__bb_init_func\nLPI%d:\n", \
+ LABELNO, LABELNO);
+
+/* Output assembler code to FILE to increment the entry-count for
+ the BLOCKNO'th basic block in this source file. This is a real pain in the
+ sphincter on a VAX, since we do not want to change any of the bits in the
+ processor status word. The way it is done here, it is pushed onto the stack
+ before any flags have changed, and then the stack is fixed up to account for
+ the fact that the instruction to restore the flags only reads a word.
+ It may seem a bit clumsy, but at least it works.
+*/
+
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+ fprintf (FILE, "\tmovpsl -(sp)\n\tmovw (sp),2(sp)\n\taddl2 $2,sp\n\taddl2 $1,LPBX2+%d\n\tbicpsw $255\n\tbispsw (sp)+\n", \
+ 4 * BLOCKNO)
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 1
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE. */
+
+/* #define FUNCTION_EPILOGUE(FILE, SIZE) */
+
+/* Store in the variable DEPTH the initial difference between the
+ frame pointer reg contents and the stack pointer reg contents,
+ as of the start of the function body. This depends on the layout
+ of the fixed parts of the stack frame and on how registers are saved.
+
+ On the Vax, FRAME_POINTER_REQUIRED is always 1, so the definition of this
+ macro doesn't matter. But it must be defined. */
+
+#define INITIAL_FRAME_POINTER_OFFSET(DEPTH) (DEPTH) = 0;
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts. */
+
+/* On the vax, the trampoline contains an entry mask and two instructions:
+ .word NN
+ movl $STATIC,r0 (store the functions static chain)
+ jmp *$FUNCTION (jump to function code at address FUNCTION) */
+
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ ASM_OUTPUT_SHORT (FILE, const0_rtx); \
+ ASM_OUTPUT_SHORT (FILE, GEN_INT (0x8fd0)); \
+ ASM_OUTPUT_INT (FILE, const0_rtx); \
+ ASM_OUTPUT_BYTE (FILE, 0x50+STATIC_CHAIN_REGNUM); \
+ ASM_OUTPUT_SHORT (FILE, GEN_INT (0x9f17)); \
+ ASM_OUTPUT_INT (FILE, const0_rtx); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE 15
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+/* We copy the register-mask from the function's pure code
+ to the start of the trampoline. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_insn (gen_rtx (ASM_INPUT, VOIDmode, \
+ "movpsl -(sp)\n\tpushal 1(pc)\n\trei")); \
+ emit_move_insn (gen_rtx (MEM, HImode, TRAMP), \
+ gen_rtx (MEM, HImode, FNADDR)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 4)), CXT);\
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 11)), \
+ plus_constant (FNADDR, 2)); \
+}
+
+/* Byte offset of return address in a stack frame. The "saved PC" field
+ is in element [4] when treating the frame as an array of longwords. */
+
+#define RETURN_ADDRESS_OFFSET (4 * UNITS_PER_WORD) /* 16 */
+
+/* A C expression whose value is RTL representing the value of the return
+ address for the frame COUNT steps up from the current frame.
+ FRAMEADDR is already the frame pointer of the COUNT frame, so we
+ can ignore COUNT. */
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT == 0) \
+ ? gen_rtx (MEM, Pmode, plus_constant (FRAME, RETURN_ADDRESS_OFFSET)) \
+ : (rtx) 0)
+
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_POST_INCREMENT 1
+/* #define HAVE_POST_DECREMENT 0 */
+
+#define HAVE_PRE_DECREMENT 1
+/* #define HAVE_PRE_INCREMENT 0 */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(regno) \
+((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0)
+#define REGNO_OK_FOR_BASE_P(regno) \
+((regno) < FIRST_PSEUDO_REGISTER || reg_renumber[regno] >= 0)
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* 1 if X is an rtx for a constant that is a valid address. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+
+#define LEGITIMATE_CONSTANT_P(X) 1
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) 1
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) 1
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS,
+ except for CONSTANT_ADDRESS_P which is actually machine-independent. */
+
+#ifdef NO_EXTERNAL_INDIRECT_ADDRESS
+
+/* Zero if this contains a (CONST (PLUS (SYMBOL_REF) (...))) and the
+ symbol in the SYMBOL_REF is an external symbol. */
+
+#define INDIRECTABLE_CONSTANT_P(X) \
+ (! (GET_CODE ((X)) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == SYMBOL_REF \
+ && SYMBOL_REF_FLAG (XEXP (XEXP ((X), 0), 0))))
+
+/* Re-definition of CONSTANT_ADDRESS_P, which is true only when there
+ are no SYMBOL_REFs for external symbols present. */
+
+#define INDIRECTABLE_CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == SYMBOL_REF && !SYMBOL_REF_FLAG (X)) \
+ || (GET_CODE (X) == CONST && INDIRECTABLE_CONSTANT_P(X)) \
+ || GET_CODE (X) == CONST_INT)
+
+
+/* Non-zero if X is an address which can be indirected. External symbols
+ could be in a sharable image library, so we disallow those. */
+
+#define INDIRECTABLE_ADDRESS_P(X) \
+ (INDIRECTABLE_CONSTANT_ADDRESS_P (X) \
+ || (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && INDIRECTABLE_CONSTANT_ADDRESS_P (XEXP (X, 1))))
+
+#else /* not NO_EXTERNAL_INDIRECT_ADDRESS */
+
+#define INDIRECTABLE_CONSTANT_ADDRESS_P(X) CONSTANT_ADDRESS_P(X)
+
+/* Non-zero if X is an address which can be indirected. */
+#define INDIRECTABLE_ADDRESS_P(X) \
+ (CONSTANT_ADDRESS_P (X) \
+ || (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && CONSTANT_ADDRESS_P (XEXP (X, 1))))
+
+#endif /* not NO_EXTERNAL_INDIRECT_ADDRESS */
+
+/* Go to ADDR if X is a valid address not using indexing.
+ (This much is the easy part.) */
+#define GO_IF_NONINDEXED_ADDRESS(X, ADDR) \
+{ register rtx xfoob = (X); \
+ if (GET_CODE (xfoob) == REG) \
+ { \
+ extern rtx *reg_equiv_mem; \
+ if (! reload_in_progress \
+ || reg_equiv_mem[REGNO (xfoob)] == 0 \
+ || INDIRECTABLE_ADDRESS_P (reg_equiv_mem[REGNO (xfoob)])) \
+ goto ADDR; \
+ } \
+ if (CONSTANT_ADDRESS_P (xfoob)) goto ADDR; \
+ if (INDIRECTABLE_ADDRESS_P (xfoob)) goto ADDR; \
+ xfoob = XEXP (X, 0); \
+ if (GET_CODE (X) == MEM && INDIRECTABLE_ADDRESS_P (xfoob)) \
+ goto ADDR; \
+ if ((GET_CODE (X) == PRE_DEC || GET_CODE (X) == POST_INC) \
+ && GET_CODE (xfoob) == REG && REG_OK_FOR_BASE_P (xfoob)) \
+ goto ADDR; }
+
+/* 1 if PROD is either a reg times size of mode MODE
+ or just a reg, if MODE is just one byte.
+ This macro's expansion uses the temporary variables xfoo0 and xfoo1
+ that must be declared in the surrounding context. */
+#define INDEX_TERM_P(PROD, MODE) \
+(GET_MODE_SIZE (MODE) == 1 \
+ ? (GET_CODE (PROD) == REG && REG_OK_FOR_BASE_P (PROD)) \
+ : (GET_CODE (PROD) == MULT \
+ && \
+ (xfoo0 = XEXP (PROD, 0), xfoo1 = XEXP (PROD, 1), \
+ ((GET_CODE (xfoo0) == CONST_INT \
+ && INTVAL (xfoo0) == GET_MODE_SIZE (MODE) \
+ && GET_CODE (xfoo1) == REG \
+ && REG_OK_FOR_INDEX_P (xfoo1)) \
+ || \
+ (GET_CODE (xfoo1) == CONST_INT \
+ && INTVAL (xfoo1) == GET_MODE_SIZE (MODE) \
+ && GET_CODE (xfoo0) == REG \
+ && REG_OK_FOR_INDEX_P (xfoo0))))))
+
+/* Go to ADDR if X is the sum of a register
+ and a valid index term for mode MODE. */
+#define GO_IF_REG_PLUS_INDEX(X, MODE, ADDR) \
+{ register rtx xfooa; \
+ if (GET_CODE (X) == PLUS) \
+ { if (GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0)) \
+ && (xfooa = XEXP (X, 1), \
+ INDEX_TERM_P (xfooa, MODE))) \
+ goto ADDR; \
+ if (GET_CODE (XEXP (X, 1)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 1)) \
+ && (xfooa = XEXP (X, 0), \
+ INDEX_TERM_P (xfooa, MODE))) \
+ goto ADDR; } }
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ register rtx xfoo, xfoo0, xfoo1; \
+ GO_IF_NONINDEXED_ADDRESS (X, ADDR); \
+ if (GET_CODE (X) == PLUS) \
+ { /* Handle <address>[index] represented with index-sum outermost */\
+ xfoo = XEXP (X, 0); \
+ if (INDEX_TERM_P (xfoo, MODE)) \
+ { GO_IF_NONINDEXED_ADDRESS (XEXP (X, 1), ADDR); } \
+ xfoo = XEXP (X, 1); \
+ if (INDEX_TERM_P (xfoo, MODE)) \
+ { GO_IF_NONINDEXED_ADDRESS (XEXP (X, 0), ADDR); } \
+ /* Handle offset(reg)[index] with offset added outermost */ \
+ if (INDIRECTABLE_CONSTANT_ADDRESS_P (XEXP (X, 0))) \
+ { if (GET_CODE (XEXP (X, 1)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 1))) \
+ goto ADDR; \
+ GO_IF_REG_PLUS_INDEX (XEXP (X, 1), MODE, ADDR); } \
+ if (INDIRECTABLE_CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ { if (GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_BASE_P (XEXP (X, 0))) \
+ goto ADDR; \
+ GO_IF_REG_PLUS_INDEX (XEXP (X, 0), MODE, ADDR); } } }
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ For the vax, nothing needs to be done. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) {}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the VAX, the predecrement and postincrement address depend thus
+ (the amount of decrement or increment being the length of the operand)
+ and all indexed address depend thus (because the index scale factor
+ is the length of the operand). */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+ { if (GET_CODE (ADDR) == POST_INC || GET_CODE (ADDR) == PRE_DEC) \
+ goto LABEL; \
+ if (GET_CODE (ADDR) == PLUS) \
+ { if (CONSTANT_ADDRESS_P (XEXP (ADDR, 0)) \
+ && GET_CODE (XEXP (ADDR, 1)) == REG); \
+ else if (CONSTANT_ADDRESS_P (XEXP (ADDR, 1)) \
+ && GET_CODE (XEXP (ADDR, 0)) == REG); \
+ else goto LABEL; }}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE HImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* Define this if the case instruction drops through after the table
+ when the index is out of range. Don't define it if the case insn
+ jumps to the default label instead. */
+#define CASE_DROPS_THROUGH
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+#define FIXUNS_TRUNC_LIKE_FIX_TRUNC
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 8
+
+/* Define this if zero-extension is slow (more than one real instruction). */
+/* #define SLOW_ZERO_EXTEND */
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS 0
+
+/* Define if shifts truncate the shift count
+ which implies one can omit a sign-extension or zero-extension
+ of a shift count. */
+/* #define SHIFT_COUNT_TRUNCATED */
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE QImode
+
+/* This machine doesn't use IEEE floats. */
+
+#define TARGET_FLOAT_FORMAT VAX_FLOAT_FORMAT
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+
+/* On a VAX, constants from 0..63 are cheap because they can use the
+ 1 byte literal constant format. compare to -1 should be made cheap
+ so that decrement-and-branch insns can be formed more easily (if
+ the value -1 is copied to a register some decrement-and-branch patterns
+ will not match). */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST_INT: \
+ if (INTVAL (RTX) == 0) return 0; \
+ if ((OUTER_CODE) == AND) \
+ return ((unsigned) ~INTVAL (RTX) <= 077) ? 1 : 2; \
+ if ((unsigned) INTVAL (RTX) <= 077) return 1; \
+ if ((OUTER_CODE) == COMPARE && INTVAL (RTX) == -1) \
+ return 1; \
+ if ((OUTER_CODE) == PLUS && (unsigned) -INTVAL (RTX) <= 077)\
+ return 1; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 3; \
+ case CONST_DOUBLE: \
+ if (GET_MODE_CLASS (GET_MODE (RTX)) == MODE_FLOAT) \
+ return vax_float_literal (RTX) ? 5 : 8; \
+ else \
+ return (((CONST_DOUBLE_HIGH (RTX) == 0 \
+ && (unsigned) CONST_DOUBLE_LOW (RTX) < 64) \
+ || ((OUTER_CODE) == PLUS \
+ && CONST_DOUBLE_HIGH (RTX) == -1 \
+ && (unsigned)-CONST_DOUBLE_LOW (RTX) < 64)) \
+ ? 2 : 5);
+
+#define RTX_COSTS(RTX,CODE,OUTER_CODE) case FIX: case FLOAT: \
+ case MULT: case DIV: case UDIV: case MOD: case UMOD: \
+ case ASHIFT: case LSHIFTRT: case ASHIFTRT: \
+ case ROTATE: case ROTATERT: case PLUS: case MINUS: case IOR: \
+ case XOR: case AND: case NEG: case NOT: case ZERO_EXTRACT: \
+ case SIGN_EXTRACT: case MEM: return vax_rtx_cost(RTX)
+
+#define ADDRESS_COST(RTX) (1 + (GET_CODE (RTX) == REG ? 0 : vax_address_cost(RTX)))
+
+/* Specify the cost of a branch insn; roughly the number of extra insns that
+ should be added to avoid a branch.
+
+ Branches are extremely cheap on the VAX while the shift insns often
+ used to replace branches can be expensive. */
+
+#define BRANCH_COST 0
+
+/*
+ * We can use the BSD C library routines for the libgcc calls that are
+ * still generated, since that's what they boil down to anyways.
+ */
+
+#define UDIVSI3_LIBCALL "*udiv"
+#define UMODSI3_LIBCALL "*urem"
+
+/* Check a `double' value for validity for a particular machine mode. */
+
+/* note that it is very hard to accidentally create a number that fits in a
+ double but not in a float, since their ranges are almost the same */
+
+#define CHECK_FLOAT_VALUE(MODE, D, OVERFLOW) \
+ ((OVERFLOW) = check_float_value (MODE, &D, OVERFLOW))
+
+/* For future reference:
+ D Float: 9 bit, sign magnitude, excess 128 binary exponent
+ normalized 56 bit fraction, redundant bit not represented
+ approximately 16 decimal digits of precision
+
+ The values to use if we trust decimal to binary conversions:
+#define MAX_D_FLOAT 1.7014118346046923e+38
+#define MIN_D_FLOAT .29387358770557188e-38
+
+ G float: 12 bit, sign magnitude, excess 1024 binary exponent
+ normalized 53 bit fraction, redundant bit not represented
+ approximately 15 decimal digits precision
+
+ The values to use if we trust decimal to binary conversions:
+#define MAX_G_FLOAT .898846567431157e+308
+#define MIN_G_FLOAT .556268464626800e-308
+*/
+
+/* Tell final.c how to eliminate redundant test instructions. */
+
+/* Here we define machine-dependent flags and fields in cc_status
+ (see `conditions.h'). No extra ones are needed for the vax. */
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define NOTICE_UPDATE_CC(EXP, INSN) \
+{ if (GET_CODE (EXP) == SET) \
+ { if (GET_CODE (SET_SRC (EXP)) == CALL) \
+ CC_STATUS_INIT; \
+ else if (GET_CODE (SET_DEST (EXP)) != ZERO_EXTRACT \
+ && GET_CODE (SET_DEST (EXP)) != PC) \
+ { cc_status.flags = 0; \
+ cc_status.value1 = SET_DEST (EXP); \
+ cc_status.value2 = SET_SRC (EXP); } } \
+ else if (GET_CODE (EXP) == PARALLEL \
+ && GET_CODE (XVECEXP (EXP, 0, 0)) == SET) \
+ { \
+ if (GET_CODE (SET_SRC (XVECEXP (EXP, 0, 0))) == CALL) \
+ CC_STATUS_INIT; \
+ else if (GET_CODE (SET_DEST (XVECEXP (EXP, 0, 0))) != PC) \
+ { cc_status.flags = 0; \
+ cc_status.value1 = SET_DEST (XVECEXP (EXP, 0, 0)); \
+ cc_status.value2 = SET_SRC (XVECEXP (EXP, 0, 0)); } \
+ else \
+ /* PARALLELs whose first element sets the PC are aob, \
+ sob insns. They do change the cc's. */ \
+ CC_STATUS_INIT; } \
+ else CC_STATUS_INIT; \
+ if (cc_status.value1 && GET_CODE (cc_status.value1) == REG \
+ && cc_status.value2 \
+ && reg_overlap_mentioned_p (cc_status.value1, cc_status.value2)) \
+ cc_status.value2 = 0; \
+ if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM \
+ && cc_status.value2 \
+ && GET_CODE (cc_status.value2) == MEM) \
+ cc_status.value2 = 0; }
+/* Actual condition, one line up, should be that value2's address
+ depends on value1, but that is too much of a pain. */
+
+#define OUTPUT_JUMP(NORMAL, FLOAT, NO_OV) \
+{ if (cc_status.flags & CC_NO_OVERFLOW) \
+ return NO_OV; \
+ return NORMAL; }
+
+/* Control the assembler format that we output. */
+
+/* Output at beginning of assembler file. */
+
+#define ASM_FILE_START(FILE) fprintf (FILE, "#NO_APP\n");
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON "#APP\n"
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* Output before read-only data. */
+
+#define TEXT_SECTION_ASM_OP ".text"
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP ".data"
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", \
+ "r9", "r10", "r11", "ap", "fp", "sp", "pc"}
+
+/* This is BSD, so it wants DBX format. */
+
+#define DBX_DEBUGGING_INFO
+
+/* How to renumber registers for dbx and gdb.
+ Vax needs no change in the numeration. */
+
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Do not break .stabs pseudos into continuations. */
+
+#define DBX_CONTIN_LENGTH 0
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+
+#define DBX_CONTIN_CHAR '?'
+
+/* Don't use the `xsfoo;' construct in DBX output; this system
+ doesn't support it. */
+
+#define DBX_NO_XREFS
+
+/* Output the .stabs for a C `static' variable in the data section. */
+#define DBX_STATIC_STAB_DATA_SECTION
+
+/* Vax specific: which type character is used for type double? */
+
+#define ASM_DOUBLE_CHAR (TARGET_G_FLOAT ? 'g' : 'd')
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs (".globl ", FILE); assemble_name (FILE, NAME); fputs ("\n", FILE);} while (0)
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#define USER_LABEL_PREFIX "_"
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, "%s%d:\n", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s%d", PREFIX, NUM)
+
+/* This is how to output an assembler line defining a `double' constant.
+ It is .dfloat or .gfloat, depending. */
+
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+do { char dstr[30]; \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20e", dstr); \
+ fprintf (FILE, "\t.%cfloat 0%c%s\n", ASM_DOUBLE_CHAR, \
+ ASM_DOUBLE_CHAR, dstr); \
+ } while (0);
+
+/* This is how to output an assembler line defining a `float' constant. */
+
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ do { char dstr[30]; \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20e", dstr); \
+ fprintf (FILE, "\t.float 0f%s\n", dstr); } while (0);
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE,VALUE) \
+( fprintf (FILE, "\t.long "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* Likewise for `char' and `short' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+( fprintf (FILE, "\t.word "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+( fprintf (FILE, "\t.byte "), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ fprintf (FILE, "\t.byte 0x%x\n", (VALUE))
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ fprintf (FILE, "\tpushl %s\n", reg_names[REGNO])
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ fprintf (FILE, "\tmovl (sp)+,%s\n", reg_names[REGNO])
+
+/* This is how to output an element of a case-vector that is absolute.
+ (The Vax does not use such vectors,
+ but we must define this macro anyway.) */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.long L%d\n", VALUE)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.word L%d-L%d\n", VALUE, REL)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+/* This is how to output an assembler line
+ that says to advance the location counter by SIZE bytes. */
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space %u\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+/* This says how to output an assembler line
+ to define a local common symbol. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+( fputs (".lcomm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (ROUNDED)))
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+/* When debugging, we want to output an extra dummy label so that gas
+ can distinguish between D_float and G_float prior to processing the
+ .stabs directive identifying type double. */
+
+#define ASM_IDENTIFY_LANGUAGE(FILE) \
+ do { \
+ output_lang_identify (FILE); \
+ if (write_symbols == DBX_DEBUG) \
+ fprintf (FILE, "___vax_%c_doubles:\n", ASM_DOUBLE_CHAR); \
+ } while (0)
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance.
+ .mask ^m<r2,r3,r4,r5,r6,r7,r8,r9,r10,r11> #conservative entry mask
+ addl2 $DELTA, 4(ap) #adjust first argument
+ jmp FUNCTION+2 #jump beyond FUNCTION's entry mask
+ */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ fprintf (FILE, "\t.word 0x0ffc\n"); \
+ fprintf (FILE, "\taddl2 $%d,4(ap)\n", DELTA); \
+ fprintf (FILE, "\tjmp "); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, "+2\n"); \
+} while (0)
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Print an instruction operand X on file FILE.
+ CODE is the code from the %-spec that requested printing this operand;
+ if `%z3' was used to print operand 3, then CODE is 'z'.
+
+VAX operand formatting codes:
+
+ letter print
+ C reverse branch condition
+ D 64-bit immediate operand
+ B the low 8 bits of the complement of a constant operand
+ H the low 16 bits of the complement of a constant operand
+ M a mask for the N highest bits of a word
+ N the complement of a constant integer operand
+ P constant operand plus 1
+ R 32 - constant operand
+ b the low 8 bits of a negated constant operand
+ h the low 16 bits of a negated constant operand
+ # 'd' or 'g' depending on whether dfloat or gfloat is used */
+
+/* The purpose of D is to get around a quirk or bug in vax assembler
+ whereby -1 in a 64-bit immediate operand means 0x00000000ffffffff,
+ which is not a 64-bit minus one. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '#')
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+{ extern char *rev_cond_name (); \
+ if (CODE == '#') fputc (ASM_DOUBLE_CHAR, FILE); \
+ else if (CODE == 'C') \
+ fputs (rev_cond_name (X), FILE); \
+ else if (CODE == 'D' && GET_CODE (X) == CONST_INT && INTVAL (X) < 0) \
+ fprintf (FILE, "$0xffffffff%08x", INTVAL (X)); \
+ else if (CODE == 'P' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", INTVAL (X) + 1); \
+ else if (CODE == 'N' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", ~ INTVAL (X)); \
+ /* rotl instruction cannot deal with negative arguments. */ \
+ else if (CODE == 'R' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", 32 - INTVAL (X)); \
+ else if (CODE == 'H' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", 0xffff & ~ INTVAL (X)); \
+ else if (CODE == 'h' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", (short) - INTVAL (x)); \
+ else if (CODE == 'B' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", 0xff & ~ INTVAL (X)); \
+ else if (CODE == 'b' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", 0xff & - INTVAL (X)); \
+ else if (CODE == 'M' && GET_CODE (X) == CONST_INT) \
+ fprintf (FILE, "$%d", ~((1 << INTVAL (x)) - 1)); \
+ else if (GET_CODE (X) == REG) \
+ fprintf (FILE, "%s", reg_names[REGNO (X)]); \
+ else if (GET_CODE (X) == MEM) \
+ output_address (XEXP (X, 0)); \
+ else if (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) == SFmode) \
+ { REAL_VALUE_TYPE r; char dstr[30]; \
+ REAL_VALUE_FROM_CONST_DOUBLE (r, X); \
+ REAL_VALUE_TO_DECIMAL (r, "%.20e", dstr); \
+ fprintf (FILE, "$0f%s", dstr); } \
+ else if (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) == DFmode) \
+ { REAL_VALUE_TYPE r; char dstr[30]; \
+ REAL_VALUE_FROM_CONST_DOUBLE (r, X); \
+ REAL_VALUE_TO_DECIMAL (r, "%.20e", dstr); \
+ fprintf (FILE, "$0%c%s", ASM_DOUBLE_CHAR, dstr); } \
+ else { putc ('$', FILE); output_addr_const (FILE, X); }}
+
+/* Print a memory operand whose address is X, on file FILE.
+ This uses a function in output-vax.c. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ print_operand_address (FILE, ADDR)
diff --git a/gcc/config/vax/vax.md b/gcc/config/vax/vax.md
new file mode 100755
index 0000000..4ca4668
--- /dev/null
+++ b/gcc/config/vax/vax.md
@@ -0,0 +1,2136 @@
+;;- Machine description for GNU compiler, Vax Version
+;; Copyright (C) 1987, 88, 91, 94-96, 1998 Free Software Foundation, Inc.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+
+;;- Instruction patterns. When multiple patterns apply,
+;;- the first one in the file is chosen.
+;;-
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+;;-
+;;- cpp macro #define NOTICE_UPDATE_CC in file tm.h handles condition code
+;;- updates for most instructions.
+
+;; We don't want to allow a constant operand for test insns because
+;; (set (cc0) (const_int foo)) has no mode information. Such insns will
+;; be folded while optimizing anyway.
+
+(define_insn "tstsi"
+ [(set (cc0)
+ (match_operand:SI 0 "nonimmediate_operand" "g"))]
+ ""
+ "tstl %0")
+
+(define_insn "tsthi"
+ [(set (cc0)
+ (match_operand:HI 0 "nonimmediate_operand" "g"))]
+ ""
+ "tstw %0")
+
+(define_insn "tstqi"
+ [(set (cc0)
+ (match_operand:QI 0 "nonimmediate_operand" "g"))]
+ ""
+ "tstb %0")
+
+(define_insn "tstdf"
+ [(set (cc0)
+ (match_operand:DF 0 "general_operand" "gF"))]
+ ""
+ "tst%# %0")
+
+(define_insn "tstsf"
+ [(set (cc0)
+ (match_operand:SF 0 "general_operand" "gF"))]
+ ""
+ "tstf %0")
+
+(define_insn "cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "nonimmediate_operand" "g")
+ (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "cmpl %0,%1")
+
+(define_insn "cmphi"
+ [(set (cc0)
+ (compare (match_operand:HI 0 "nonimmediate_operand" "g")
+ (match_operand:HI 1 "general_operand" "g")))]
+ ""
+ "cmpw %0,%1")
+
+(define_insn "cmpqi"
+ [(set (cc0)
+ (compare (match_operand:QI 0 "nonimmediate_operand" "g")
+ (match_operand:QI 1 "general_operand" "g")))]
+ ""
+ "cmpb %0,%1")
+
+(define_insn "cmpdf"
+ [(set (cc0)
+ (compare (match_operand:DF 0 "general_operand" "gF,gF")
+ (match_operand:DF 1 "general_operand" "G,gF")))]
+ ""
+ "@
+ tst%# %0
+ cmp%# %0,%1")
+
+(define_insn "cmpsf"
+ [(set (cc0)
+ (compare (match_operand:SF 0 "general_operand" "gF,gF")
+ (match_operand:SF 1 "general_operand" "G,gF")))]
+ ""
+ "@
+ tstf %0
+ cmpf %0,%1")
+
+(define_insn ""
+ [(set (cc0)
+ (and:SI (match_operand:SI 0 "general_operand" "g")
+ (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "bitl %0,%1")
+
+(define_insn ""
+ [(set (cc0)
+ (and:HI (match_operand:HI 0 "general_operand" "g")
+ (match_operand:HI 1 "general_operand" "g")))]
+ ""
+ "bitw %0,%1")
+
+(define_insn ""
+ [(set (cc0)
+ (and:QI (match_operand:QI 0 "general_operand" "g")
+ (match_operand:QI 1 "general_operand" "g")))]
+ ""
+ "bitb %0,%1")
+
+;; The vax has no sltu or sgeu patterns, but does have two-operand
+;; add/subtract with carry. This is still better than the alternative.
+;; Since the cc0-using insn cannot be separated from the cc0-setting insn,
+;; and the two are created independently, we can't just use a define_expand
+;; to try to optimize this. (The "movl" and "clrl" insns alter the cc0
+;; flags, but leave the carry flag alone, but that can't easily be expressed.)
+;;
+;; Several two-operator combinations could be added to make slightly more
+;; optimal code, but they'd have to cover all combinations of plus and minus
+;; using match_dup. If you want to do this, I'd suggest changing the "sgeu"
+;; pattern to something like (minus (const_int 1) (ltu ...)), so fewer
+;; patterns need to be recognized.
+;; -- Ken Raeburn (Raeburn@Watch.COM) 24 August 1991.
+
+(define_insn "sltu"
+ [(set (match_operand:SI 0 "general_operand" "=ro")
+ (ltu (cc0) (const_int 0)))]
+ ""
+ "clrl %0\;adwc $0,%0")
+
+(define_insn "sgeu"
+ [(set (match_operand:SI 0 "general_operand" "=ro")
+ (geu (cc0) (const_int 0)))]
+ ""
+ "movl $1,%0\;sbwc $0,%0")
+
+(define_insn "movdf"
+ [(set (match_operand:DF 0 "general_operand" "=g,g")
+ (match_operand:DF 1 "general_operand" "G,gF"))]
+ ""
+ "@
+ clr%# %0
+ mov%# %1,%0")
+
+(define_insn "movsf"
+ [(set (match_operand:SF 0 "general_operand" "=g,g")
+ (match_operand:SF 1 "general_operand" "G,gF"))]
+ ""
+ "@
+ clrf %0
+ movf %1,%0")
+
+;; Some vaxes don't support this instruction.
+;;(define_insn "movti"
+;; [(set (match_operand:TI 0 "general_operand" "=g")
+;; (match_operand:TI 1 "general_operand" "g"))]
+;; ""
+;; "movh %1,%0")
+
+(define_insn "movdi"
+ [(set (match_operand:DI 0 "general_operand" "=g,g")
+ (match_operand:DI 1 "general_operand" "I,g"))]
+ ""
+ "@
+ clrq %0
+ movq %D1,%0")
+
+;; The VAX move instructions have space-time tradeoffs. On a microVAX
+;; register-register mov instructions take 3 bytes and 2 CPU cycles. clrl
+;; takes 2 bytes and 3 cycles. mov from constant to register takes 2 cycles
+;; if the constant is smaller than 4 bytes, 3 cycles for a longword
+;; constant. movz, mneg, and mcom are as fast as mov, so movzwl is faster
+;; than movl for positive constants that fit in 16 bits but not 6 bits. cvt
+;; instructions take 4 cycles. inc takes 3 cycles. The machine description
+;; is willing to trade 1 byte for 1 cycle (clrl instead of movl $0; cvtwl
+;; instead of movl).
+
+;; Cycle counts for other models may vary (on a VAX 750 they are similar,
+;; but on a VAX 9000 most move and add instructions with one constant
+;; operand take 1 cycle).
+
+;; Loads of constants between 64 and 128 used to be done with
+;; "addl3 $63,#,dst" but this is slower than movzbl and takes as much space.
+
+(define_insn "movsi"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (match_operand:SI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ return \"incl %0\";
+ if (GET_CODE (operands[1]) == SYMBOL_REF || GET_CODE (operands[1]) == CONST)
+ {
+ if (push_operand (operands[0], SImode))
+ return \"pushab %a1\";
+ return \"movab %a1,%0\";
+ }
+ if (operands[1] == const0_rtx)
+ return \"clrl %0\";
+ if (GET_CODE (operands[1]) == CONST_INT
+ && (unsigned) INTVAL (operands[1]) >= 64)
+ {
+ int i = INTVAL (operands[1]);
+ if ((unsigned)(~i) < 64)
+ return \"mcoml %N1,%0\";
+ if ((unsigned)i < 0x100)
+ return \"movzbl %1,%0\";
+ if (i >= -0x80 && i < 0)
+ return \"cvtbl %1,%0\";
+ if ((unsigned)i < 0x10000)
+ return \"movzwl %1,%0\";
+ if (i >= -0x8000 && i < 0)
+ return \"cvtwl %1,%0\";
+ }
+ if (push_operand (operands[0], SImode))
+ return \"pushl %1\";
+ return \"movl %1,%0\";
+}")
+
+(define_insn "movhi"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (match_operand:HI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ return \"incw %0\";
+
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ int i = INTVAL (operands[1]);
+ if (i == 0)
+ return \"clrw %0\";
+ else if ((unsigned int)i < 64)
+ return \"movw %1,%0\";
+ else if ((unsigned int)~i < 64)
+ return \"mcomw %H1,%0\";
+ else if ((unsigned int)i < 256)
+ return \"movzbw %1,%0\";
+ }
+ return \"movw %1,%0\";
+}")
+
+(define_insn "movstricthi"
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "=g"))
+ (match_operand:HI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ int i = INTVAL (operands[1]);
+ if (i == 0)
+ return \"clrw %0\";
+ else if ((unsigned int)i < 64)
+ return \"movw %1,%0\";
+ else if ((unsigned int)~i < 64)
+ return \"mcomw %H1,%0\";
+ else if ((unsigned int)i < 256)
+ return \"movzbw %1,%0\";
+ }
+ return \"movw %1,%0\";
+}")
+
+(define_insn "movqi"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (match_operand:QI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ rtx link;
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ return \"incb %0\";
+
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ int i = INTVAL (operands[1]);
+ if (i == 0)
+ return \"clrb %0\";
+ else if ((unsigned int)~i < 64)
+ return \"mcomb %B1,%0\";
+ }
+ return \"movb %1,%0\";
+}")
+
+(define_insn "movstrictqi"
+ [(set (strict_low_part (match_operand:QI 0 "register_operand" "=g"))
+ (match_operand:QI 1 "general_operand" "g"))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ int i = INTVAL (operands[1]);
+ if (i == 0)
+ return \"clrb %0\";
+ else if ((unsigned int)~i < 64)
+ return \"mcomb %B1,%0\";
+ }
+ return \"movb %1,%0\";
+}")
+
+;; This is here to accept 4 arguments and pass the first 3 along
+;; to the movstrhi1 pattern that really does the work.
+(define_expand "movstrhi"
+ [(set (match_operand:BLK 0 "general_operand" "=g")
+ (match_operand:BLK 1 "general_operand" "g"))
+ (use (match_operand:HI 2 "general_operand" "g"))
+ (match_operand 3 "" "")]
+ ""
+ "
+ emit_insn (gen_movstrhi1 (operands[0], operands[1], operands[2]));
+ DONE;
+")
+
+;; The definition of this insn does not really explain what it does,
+;; but it should suffice
+;; that anything generated as this insn will be recognized as one
+;; and that it won't successfully combine with anything.
+(define_insn "movstrhi1"
+ [(set (match_operand:BLK 0 "general_operand" "=g")
+ (match_operand:BLK 1 "general_operand" "g"))
+ (use (match_operand:HI 2 "general_operand" "g"))
+ (clobber (reg:SI 0))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))
+ (clobber (reg:SI 4))
+ (clobber (reg:SI 5))]
+ ""
+ "movc3 %2,%1,%0")
+
+;; Extension and truncation insns.
+
+(define_insn "truncsiqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (truncate:QI (match_operand:SI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtlb %1,%0")
+
+(define_insn "truncsihi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (truncate:HI (match_operand:SI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtlw %1,%0")
+
+(define_insn "trunchiqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (truncate:QI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtwb %1,%0")
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtwl %1,%0")
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtbw %1,%0")
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtbl %1,%0")
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (float_extend:DF (match_operand:SF 1 "general_operand" "gF")))]
+ ""
+ "cvtf%# %1,%0")
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (float_truncate:SF (match_operand:DF 1 "general_operand" "gF")))]
+ ""
+ "cvt%#f %1,%0")
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzwl %1,%0")
+
+(define_insn "zero_extendqihi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzbw %1,%0")
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "movzbl %1,%0")
+
+;; Fix-to-float conversion insns.
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (float:SF (match_operand:SI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtlf %1,%0")
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (float:DF (match_operand:SI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtl%# %1,%0")
+
+(define_insn "floathisf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (float:SF (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtwf %1,%0")
+
+(define_insn "floathidf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (float:DF (match_operand:HI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtw%# %1,%0")
+
+(define_insn "floatqisf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (float:SF (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtbf %1,%0")
+
+(define_insn "floatqidf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (float:DF (match_operand:QI 1 "nonimmediate_operand" "g")))]
+ ""
+ "cvtb%# %1,%0")
+
+;; Float-to-fix conversion insns.
+
+(define_insn "fix_truncsfqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (fix:QI (fix:SF (match_operand:SF 1 "general_operand" "gF"))))]
+ ""
+ "cvtfb %1,%0")
+
+(define_insn "fix_truncsfhi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (fix:HI (fix:SF (match_operand:SF 1 "general_operand" "gF"))))]
+ ""
+ "cvtfw %1,%0")
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (fix:SI (fix:SF (match_operand:SF 1 "general_operand" "gF"))))]
+ ""
+ "cvtfl %1,%0")
+
+(define_insn "fix_truncdfqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (fix:QI (fix:DF (match_operand:DF 1 "general_operand" "gF"))))]
+ ""
+ "cvt%#b %1,%0")
+
+(define_insn "fix_truncdfhi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (fix:HI (fix:DF (match_operand:DF 1 "general_operand" "gF"))))]
+ ""
+ "cvt%#w %1,%0")
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (fix:SI (fix:DF (match_operand:DF 1 "general_operand" "gF"))))]
+ ""
+ "cvt%#l %1,%0")
+
+;;- All kinds of add instructions.
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "general_operand" "=g,g,g")
+ (plus:DF (match_operand:DF 1 "general_operand" "0,gF,gF")
+ (match_operand:DF 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ add%#2 %2,%0
+ add%#2 %1,%0
+ add%#3 %1,%2,%0")
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "general_operand" "=g,g,g")
+ (plus:SF (match_operand:SF 1 "general_operand" "0,gF,gF")
+ (match_operand:SF 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ addf2 %2,%0
+ addf2 %1,%0
+ addf3 %1,%2,%0")
+
+/* The space-time-opcode tradeoffs for addition vary by model of VAX.
+
+ On a VAX 3 "movab (r1)[r2],r3" is faster than "addl3 r1,r2,r3",
+ but it not faster on other models.
+
+ "movab #(r1),r2" is usually shorter than "addl3 #,r1,r2", and is
+ faster on a VAX 3, but some VAXes (e.g. VAX 9000) will stall if
+ a register is used in an address too soon after it is set.
+ Compromise by using movab only when it is shorter than the add
+ or the base register in the address is one of sp, ap, and fp,
+ which are not modified very often. */
+
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (plus:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return \"incl %0\";
+ if (operands[2] == constm1_rtx)
+ return \"decl %0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subl2 $%n2,%0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) INTVAL (operands[2]) >= 64
+ && GET_CODE (operands[1]) == REG
+ && ((INTVAL (operands[2]) < 32767 && INTVAL (operands[2]) > -32768)
+ || REGNO (operands[1]) > 11))
+ return \"movab %c2(%1),%0\";
+ return \"addl2 %2,%0\";
+ }
+ if (rtx_equal_p (operands[0], operands[2]))
+ return \"addl2 %1,%0\";
+
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 32767
+ && INTVAL (operands[2]) > -32768
+ && GET_CODE (operands[1]) == REG
+ && push_operand (operands[0], SImode))
+ return \"pushab %c2(%1)\";
+
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subl3 $%n2,%1,%0\";
+
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) INTVAL (operands[2]) >= 64
+ && GET_CODE (operands[1]) == REG
+ && ((INTVAL (operands[2]) < 32767 && INTVAL (operands[2]) > -32768)
+ || REGNO (operands[1]) > 11))
+ return \"movab %c2(%1),%0\";
+
+ /* Add this if using gcc on a VAX 3xxx:
+ if (REG_P (operands[1]) && REG_P (operands[2]))
+ return \"movab (%1)[%2],%0\";
+ */
+ return \"addl3 %1,%2,%0\";
+}")
+
+(define_insn "addhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (plus:HI (match_operand:HI 1 "general_operand" "g")
+ (match_operand:HI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return \"incw %0\";
+ if (operands[2] == constm1_rtx)
+ return \"decw %0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subw2 $%n2,%0\";
+ return \"addw2 %2,%0\";
+ }
+ if (rtx_equal_p (operands[0], operands[2]))
+ return \"addw2 %1,%0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subw3 $%n2,%1,%0\";
+ return \"addw3 %1,%2,%0\";
+}")
+
+(define_insn "addqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (plus:QI (match_operand:QI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (operands[2] == const1_rtx)
+ return \"incb %0\";
+ if (operands[2] == constm1_rtx)
+ return \"decb %0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subb2 $%n2,%0\";
+ return \"addb2 %2,%0\";
+ }
+ if (rtx_equal_p (operands[0], operands[2]))
+ return \"addb2 %1,%0\";
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned) (- INTVAL (operands[2])) < 64)
+ return \"subb3 $%n2,%1,%0\";
+ return \"addb3 %1,%2,%0\";
+}")
+
+;; The add-with-carry (adwc) instruction only accepts two operands.
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "general_operand" "=ro>,ro>")
+ (plus:DI (match_operand:DI 1 "general_operand" "%0,ro>")
+ (match_operand:DI 2 "general_operand" "Fro,F")))]
+ ""
+ "*
+{
+ rtx low[3];
+ char *pattern;
+ int carry = 1;
+
+ split_quadword_operands (operands, low, 3);
+ /* Add low parts. */
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (low[2] == const0_rtx)
+ /* Should examine operand, punt if not POST_INC. */
+ pattern = \"tstl %0\", carry = 0;
+ else if (low[2] == const1_rtx)
+ pattern = \"incl %0\";
+ else
+ pattern = \"addl2 %2,%0\";
+ }
+ else
+ {
+ if (low[2] == const0_rtx)
+ pattern = \"movl %1,%0\", carry = 0;
+ else
+ pattern = \"addl3 %2,%1,%0\";
+ }
+ if (pattern)
+ output_asm_insn (pattern, low);
+ if (!carry)
+ /* If CARRY is 0, we don't have any carry value to worry about. */
+ return OUT_FCN (CODE_FOR_addsi3) (operands, insn);
+ /* %0 = C + %1 + %2 */
+ if (!rtx_equal_p (operands[0], operands[1]))
+ output_asm_insn ((operands[1] == const0_rtx
+ ? \"clrl %0\"
+ : \"movl %1,%0\"), operands);
+ return \"adwc %2,%0\";
+}")
+
+;;- All kinds of subtract instructions.
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "general_operand" "=g,g")
+ (minus:DF (match_operand:DF 1 "general_operand" "0,gF")
+ (match_operand:DF 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ sub%#2 %2,%0
+ sub%#3 %2,%1,%0")
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "general_operand" "=g,g")
+ (minus:SF (match_operand:SF 1 "general_operand" "0,gF")
+ (match_operand:SF 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ subf2 %2,%0
+ subf3 %2,%1,%0")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g")
+ (minus:SI (match_operand:SI 1 "general_operand" "0,g")
+ (match_operand:SI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ subl2 %2,%0
+ subl3 %2,%1,%0")
+
+(define_insn "subhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g")
+ (minus:HI (match_operand:HI 1 "general_operand" "0,g")
+ (match_operand:HI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ subw2 %2,%0
+ subw3 %2,%1,%0")
+
+(define_insn "subqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g")
+ (minus:QI (match_operand:QI 1 "general_operand" "0,g")
+ (match_operand:QI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ subb2 %2,%0
+ subb3 %2,%1,%0")
+
+;; The subtract-with-carry (sbwc) instruction only takes two operands.
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "general_operand" "=or>,or>")
+ (minus:DI (match_operand:DI 1 "general_operand" "0,or>")
+ (match_operand:DI 2 "general_operand" "For,F")))]
+ ""
+ "*
+{
+ rtx low[3];
+ char *pattern;
+ int carry = 1;
+
+ split_quadword_operands (operands, low, 3);
+ /* Subtract low parts. */
+ if (rtx_equal_p (operands[0], operands[1]))
+ {
+ if (low[2] == const0_rtx)
+ pattern = 0, carry = 0;
+ else if (low[2] == constm1_rtx)
+ pattern = \"decl %0\";
+ else
+ pattern = \"subl2 %2,%0\";
+ }
+ else
+ {
+ if (low[2] == constm1_rtx)
+ pattern = \"decl %0\";
+ else if (low[2] == const0_rtx)
+ pattern = OUT_FCN (CODE_FOR_movsi) (low, insn), carry = 0;
+ else
+ pattern = \"subl3 %2,%1,%0\";
+ }
+ if (pattern)
+ output_asm_insn (pattern, low);
+ if (carry)
+ {
+ if (!rtx_equal_p (operands[0], operands[1]))
+ return \"movl %1,%0\;sbwc %2,%0\";
+ return \"sbwc %2,%0\";
+ /* %0 = %2 - %1 - C */
+ }
+ return OUT_FCN (CODE_FOR_subsi3) (operands, insn);
+}")
+
+;;- Multiply instructions.
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "general_operand" "=g,g,g")
+ (mult:DF (match_operand:DF 1 "general_operand" "0,gF,gF")
+ (match_operand:DF 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ mul%#2 %2,%0
+ mul%#2 %1,%0
+ mul%#3 %1,%2,%0")
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "general_operand" "=g,g,g")
+ (mult:SF (match_operand:SF 1 "general_operand" "0,gF,gF")
+ (match_operand:SF 2 "general_operand" "gF,0,gF")))]
+ ""
+ "@
+ mulf2 %2,%0
+ mulf2 %1,%0
+ mulf3 %1,%2,%0")
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g,g")
+ (mult:SI (match_operand:SI 1 "general_operand" "0,g,g")
+ (match_operand:SI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ mull2 %2,%0
+ mull2 %1,%0
+ mull3 %1,%2,%0")
+
+(define_insn "mulhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g,")
+ (mult:HI (match_operand:HI 1 "general_operand" "0,g,g")
+ (match_operand:HI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ mulw2 %2,%0
+ mulw2 %1,%0
+ mulw3 %1,%2,%0")
+
+(define_insn "mulqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g,g")
+ (mult:QI (match_operand:QI 1 "general_operand" "0,g,g")
+ (match_operand:QI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ mulb2 %2,%0
+ mulb2 %1,%0
+ mulb3 %1,%2,%0")
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "g"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "g"))))]
+ ""
+ "emul %1,%2,$0,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (plus:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "g"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "g")))
+ (sign_extend:DI (match_operand:SI 3 "nonimmediate_operand" "g"))))]
+ ""
+ "emul %1,%2,%3,%0")
+
+;; 'F' constraint means type CONST_DOUBLE
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (plus:DI
+ (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "nonimmediate_operand" "g"))
+ (sign_extend:DI
+ (match_operand:SI 2 "nonimmediate_operand" "g")))
+ (match_operand:DI 3 "immediate_operand" "F")))]
+ "GET_CODE (operands[3]) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (operands[3]) == (CONST_DOUBLE_LOW (operands[3]) >> 31)"
+ "*
+{
+ if (CONST_DOUBLE_HIGH (operands[3]))
+ operands[3] = GEN_INT (CONST_DOUBLE_LOW (operands[3]));
+ return \"emul %1,%2,%3,%0\";
+}")
+
+;;- Divide instructions.
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "general_operand" "=g,g")
+ (div:DF (match_operand:DF 1 "general_operand" "0,gF")
+ (match_operand:DF 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ div%#2 %2,%0
+ div%#3 %2,%1,%0")
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "general_operand" "=g,g")
+ (div:SF (match_operand:SF 1 "general_operand" "0,gF")
+ (match_operand:SF 2 "general_operand" "gF,gF")))]
+ ""
+ "@
+ divf2 %2,%0
+ divf3 %2,%1,%0")
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g")
+ (div:SI (match_operand:SI 1 "general_operand" "0,g")
+ (match_operand:SI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ divl2 %2,%0
+ divl3 %2,%1,%0")
+
+(define_insn "divhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g")
+ (div:HI (match_operand:HI 1 "general_operand" "0,g")
+ (match_operand:HI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ divw2 %2,%0
+ divw3 %2,%1,%0")
+
+(define_insn "divqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g")
+ (div:QI (match_operand:QI 1 "general_operand" "0,g")
+ (match_operand:QI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ divb2 %2,%0
+ divb3 %2,%1,%0")
+
+;This is left out because it is very slow;
+;we are better off programming around the "lack" of this insn.
+;(define_insn "divmoddisi4"
+; [(set (match_operand:SI 0 "general_operand" "=g")
+; (div:SI (match_operand:DI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g")))
+; (set (match_operand:SI 3 "general_operand" "=g")
+; (mod:SI (match_operand:DI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g")))]
+; ""
+; "ediv %2,%1,%0,%3")
+
+;; Bit-and on the vax is done with a clear-bits insn.
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (and:SI (not:SI (match_operand:SI 1 "general_operand" "g"))
+ (match_operand:SI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ rtx op1 = operands[1];
+
+ /* If there is a constant argument, complement that one. */
+ if (GET_CODE (operands[2]) == CONST_INT && GET_CODE (op1) != CONST_INT)
+ {
+ operands[1] = operands[2];
+ operands[2] = op1;
+ op1 = operands[1];
+ }
+
+ if (GET_CODE (op1) == CONST_INT)
+ operands[1] = GEN_INT (~INTVAL (op1));
+ else
+ operands[1] = expand_unop (SImode, one_cmpl_optab, op1, 0, 1);
+}")
+
+(define_expand "andhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (and:HI (not:HI (match_operand:HI 1 "general_operand" "g"))
+ (match_operand:HI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ rtx op1 = operands[1];
+
+ if (GET_CODE (operands[2]) == CONST_INT && GET_CODE (op1) != CONST_INT)
+ {
+ operands[1] = operands[2];
+ operands[2] = op1;
+ op1 = operands[1];
+ }
+
+ if (GET_CODE (op1) == CONST_INT)
+ operands[1] = GEN_INT (65535 & ~INTVAL (op1));
+ else
+ operands[1] = expand_unop (HImode, one_cmpl_optab, op1, 0, 1);
+}")
+
+(define_expand "andqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (and:QI (not:QI (match_operand:QI 1 "general_operand" "g"))
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ rtx op1 = operands[1];
+
+ if (GET_CODE (operands[2]) == CONST_INT && GET_CODE (op1) != CONST_INT)
+ {
+ operands[1] = operands[2];
+ operands[2] = op1;
+ op1 = operands[1];
+ }
+
+ if (GET_CODE (op1) == CONST_INT)
+ operands[1] = GEN_INT (255 & ~INTVAL (op1));
+ else
+ operands[1] = expand_unop (QImode, one_cmpl_optab, op1, 0, 1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g,g")
+ (and:SI (not:SI (match_operand:SI 1 "general_operand" "g,g"))
+ (match_operand:SI 2 "general_operand" "0,g")))]
+ ""
+ "@
+ bicl2 %1,%0
+ bicl3 %1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=g,g")
+ (and:HI (not:HI (match_operand:HI 1 "general_operand" "g,g"))
+ (match_operand:HI 2 "general_operand" "0,g")))]
+ ""
+ "@
+ bicw2 %1,%0
+ bicw3 %1,%2,%0")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "=g,g")
+ (and:QI (not:QI (match_operand:QI 1 "general_operand" "g,g"))
+ (match_operand:QI 2 "general_operand" "0,g")))]
+ ""
+ "@
+ bicb2 %1,%0
+ bicb3 %1,%2,%0")
+
+;; The following used to be needed because constant propagation can
+;; create them starting from the bic insn patterns above. This is no
+;; longer a problem. However, having these patterns allows optimization
+;; opportunities in combine.c.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g,g")
+ (and:SI (match_operand:SI 1 "general_operand" "0,g")
+ (match_operand:SI 2 "const_int_operand" "n,n")))]
+ ""
+ "@
+ bicl2 %N2,%0
+ bicl3 %N2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "general_operand" "=g,g")
+ (and:HI (match_operand:HI 1 "general_operand" "0,g")
+ (match_operand:HI 2 "const_int_operand" "n,n")))]
+ ""
+ "@
+ bicw2 %H2,%0
+ bicw3 %H2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "general_operand" "=g,g")
+ (and:QI (match_operand:QI 1 "general_operand" "0,g")
+ (match_operand:QI 2 "const_int_operand" "n,n")))]
+ ""
+ "@
+ bicb2 %B2,%0
+ bicb3 %B2,%1,%0")
+
+;;- Bit set instructions.
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g,g")
+ (ior:SI (match_operand:SI 1 "general_operand" "0,g,g")
+ (match_operand:SI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ bisl2 %2,%0
+ bisl2 %1,%0
+ bisl3 %2,%1,%0")
+
+(define_insn "iorhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g,g")
+ (ior:HI (match_operand:HI 1 "general_operand" "0,g,g")
+ (match_operand:HI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ bisw2 %2,%0
+ bisw2 %1,%0
+ bisw3 %2,%1,%0")
+
+(define_insn "iorqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g,g")
+ (ior:QI (match_operand:QI 1 "general_operand" "0,g,g")
+ (match_operand:QI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ bisb2 %2,%0
+ bisb2 %1,%0
+ bisb3 %2,%1,%0")
+
+;;- xor instructions.
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g,g,g")
+ (xor:SI (match_operand:SI 1 "general_operand" "0,g,g")
+ (match_operand:SI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ xorl2 %2,%0
+ xorl2 %1,%0
+ xorl3 %2,%1,%0")
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "general_operand" "=g,g,g")
+ (xor:HI (match_operand:HI 1 "general_operand" "0,g,g")
+ (match_operand:HI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ xorw2 %2,%0
+ xorw2 %1,%0
+ xorw3 %2,%1,%0")
+
+(define_insn "xorqi3"
+ [(set (match_operand:QI 0 "general_operand" "=g,g,g")
+ (xor:QI (match_operand:QI 1 "general_operand" "0,g,g")
+ (match_operand:QI 2 "general_operand" "g,0,g")))]
+ ""
+ "@
+ xorb2 %2,%0
+ xorb2 %1,%0
+ xorb3 %2,%1,%0")
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "general_operand" "=g")
+ (neg:DF (match_operand:DF 1 "general_operand" "gF")))]
+ ""
+ "mneg%# %1,%0")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "general_operand" "=g")
+ (neg:SF (match_operand:SF 1 "general_operand" "gF")))]
+ ""
+ "mnegf %1,%0")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (neg:SI (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "mnegl %1,%0")
+
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (neg:HI (match_operand:HI 1 "general_operand" "g")))]
+ ""
+ "mnegw %1,%0")
+
+(define_insn "negqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (neg:QI (match_operand:QI 1 "general_operand" "g")))]
+ ""
+ "mnegb %1,%0")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (not:SI (match_operand:SI 1 "general_operand" "g")))]
+ ""
+ "mcoml %1,%0")
+
+(define_insn "one_cmplhi2"
+ [(set (match_operand:HI 0 "general_operand" "=g")
+ (not:HI (match_operand:HI 1 "general_operand" "g")))]
+ ""
+ "mcomw %1,%0")
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "general_operand" "=g")
+ (not:QI (match_operand:QI 1 "general_operand" "g")))]
+ ""
+ "mcomb %1,%0")
+
+;; Arithmetic right shift on the vax works by negating the shift count,
+;; then emitting a right shift with the shift count negated. This means
+;; that all actual shift counts in the RTL will be positive. This
+;; prevents converting shifts to ZERO_EXTRACTs with negative positions,
+;; which isn't valid.
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = gen_rtx (NEG, QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "const_int_operand" "n")))]
+ ""
+ "ashl $%n2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "ashl %2,%1,%0")
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (ashift:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (operands[2] == const1_rtx && rtx_equal_p (operands[0], operands[1]))
+ return \"addl2 %0,%0\";
+ if (GET_CODE (operands[1]) == REG
+ && GET_CODE (operands[2]) == CONST_INT)
+ {
+ int i = INTVAL (operands[2]);
+ if (i == 1)
+ return \"addl3 %1,%1,%0\";
+ if (i == 2)
+ return \"moval 0[%1],%0\";
+ if (i == 3)
+ return \"movad 0[%1],%0\";
+ }
+ return \"ashl %2,%1,%0\";
+}")
+
+;; Arithmetic right shift on the vax works by negating the shift count.
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (ashiftrt:DI (match_operand:DI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ operands[2] = gen_rtx (NEG, QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn "ashldi3"
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (ashift:DI (match_operand:DI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "ashq %2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=g")
+ (ashiftrt:DI (match_operand:DI 1 "general_operand" "g")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "ashq %2,%1,%0")
+
+;; We used to have expand_shift handle logical right shifts by using extzv,
+;; but this make it very difficult to do lshrdi3. Since the VAX is the
+;; only machine with this kludge, it's better to just do this with a
+;; define_expand and remove that case from expand_shift.
+
+(define_expand "lshrsi3"
+ [(set (match_dup 3)
+ (minus:QI (const_int 32)
+ (match_dup 4)))
+ (set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_dup 3)
+ (match_operand:SI 2 "register_operand" "g")))]
+ ""
+ "
+{
+ operands[3] = gen_reg_rtx (QImode);
+ operands[4] = gen_lowpart (QImode, operands[2]);
+}")
+
+;; Rotate right on the vax works by negating the shift count.
+(define_expand "rotrsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = gen_rtx (NEG, QImode, negate_rtx (QImode, operands[2]));
+}")
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotate:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "general_operand" "g")))]
+ ""
+ "rotl %2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "const_int_operand" "n")))]
+ ""
+ "rotl %R2,%1,%0")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (rotatert:SI (match_operand:SI 1 "general_operand" "g")
+ (neg:QI (match_operand:QI 2 "general_operand" "g"))))]
+ ""
+ "rotl %2,%1,%0")
+
+;This insn is probably slower than a multiply and an add.
+;(define_insn ""
+; [(set (match_operand:SI 0 "general_operand" "=g")
+; (mult:SI (plus:SI (match_operand:SI 1 "general_operand" "g")
+; (match_operand:SI 2 "general_operand" "g"))
+; (match_operand:SI 3 "general_operand" "g")))]
+; ""
+; "index %1,$0x80000000,$0x7fffffff,%3,%2,%0")
+
+;; Special cases of bit-field insns which we should
+;; recognize in preference to the general case.
+;; These handle aligned 8-bit and 16-bit fields,
+;; which can usually be done with move instructions.
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+ro")
+ (match_operand:QI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "general_operand" "g"))]
+ "(INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16)
+ && INTVAL (operands[2]) % INTVAL (operands[1]) == 0
+ && (GET_CODE (operands[0]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[0], 0)))"
+ "*
+{
+ if (REG_P (operands[0]))
+ {
+ if (INTVAL (operands[2]) != 0)
+ return \"insv %3,%2,%1,%0\";
+ }
+ else
+ operands[0]
+ = adj_offsettable_operand (operands[0], INTVAL (operands[2]) / 8);
+
+ CC_STATUS_INIT;
+ if (INTVAL (operands[1]) == 8)
+ return \"movb %3,%0\";
+ return \"movw %3,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=&g")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "ro")
+ (match_operand:QI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
+ && (GET_CODE (operands[1]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[1], 0)))"
+ "*
+{
+ if (REG_P (operands[1]))
+ {
+ if (INTVAL (operands[3]) != 0)
+ return \"extzv %3,%2,%1,%0\";
+ }
+ else
+ operands[1]
+ = adj_offsettable_operand (operands[1], INTVAL (operands[3]) / 8);
+
+ if (INTVAL (operands[2]) == 8)
+ return \"movzbl %1,%0\";
+ return \"movzwl %1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "ro")
+ (match_operand:QI 2 "const_int_operand" "n")
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
+ && (GET_CODE (operands[1]) == REG
+ || ! mode_dependent_address_p (XEXP (operands[1], 0)))"
+ "*
+{
+ if (REG_P (operands[1]))
+ {
+ if (INTVAL (operands[3]) != 0)
+ return \"extv %3,%2,%1,%0\";
+ }
+ else
+ operands[1]
+ = adj_offsettable_operand (operands[1], INTVAL (operands[3]) / 8);
+
+ if (INTVAL (operands[2]) == 8)
+ return \"cvtbl %1,%0\";
+ return \"cvtwl %1,%0\";
+}")
+
+;; Register-only SImode cases of bit-field insns.
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (sign_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "cmpv %2,%1,%0,%3")
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "cmpzv %2,%1,%0,%3")
+
+;; When the field position and size are constant and the destination
+;; is a register, extv and extzv are much slower than a rotate followed
+;; by a bicl or sign extension. Because we might end up choosing ext[z]v
+;; anyway, we can't allow immediate values for the primary source operand.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "ro")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[3]) != CONST_INT || GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[0]) != REG
+ || (INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16))
+ return \"extv %3,%2,%1,%0\";
+ if (INTVAL (operands[2]) == 8)
+ return \"rotl %R3,%1,%0\;cvtbl %0,%0\";
+ return \"rotl %R3,%1,%0\;cvtwl %0,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "ro")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[3]) != CONST_INT || GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[0]) != REG)
+ return \"extzv %3,%2,%1,%0\";
+ if (INTVAL (operands[2]) == 8)
+ return \"rotl %R3,%1,%0\;movzbl %0,%0\";
+ if (INTVAL (operands[2]) == 16)
+ return \"rotl %R3,%1,%0\;movzwl %0,%0\";
+ if (INTVAL (operands[3]) & 31)
+ return \"rotl %R3,%1,%0\;bicl2 %M2,%0\";
+ if (rtx_equal_p (operands[0], operands[1]))
+ return \"bicl2 %M2,%0\";
+ return \"bicl3 %M2,%1,%0\";
+}")
+
+;; Non-register cases.
+;; nonimmediate_operand is used to make sure that mode-ambiguous cases
+;; don't match these (and therefore match the cases above instead).
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (sign_extract:SI (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "cmpv %2,%1,%0,%3")
+
+(define_insn ""
+ [(set (cc0)
+ (compare
+ (zero_extract:SI (match_operand:QI 0 "nonimmediate_operand" "rm")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "cmpzv %2,%1,%0,%3")
+
+(define_insn "extv"
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (sign_extract:SI (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[0]) != REG || GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT
+ || (INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 16)
+ || INTVAL (operands[2]) + INTVAL (operands[3]) > 32
+ || side_effects_p (operands[1])
+ || (GET_CODE (operands[1]) == MEM
+ && mode_dependent_address_p (XEXP (operands[1], 0))))
+ return \"extv %3,%2,%1,%0\";
+ if (INTVAL (operands[2]) == 8)
+ return \"rotl %R3,%1,%0\;cvtbl %0,%0\";
+ return \"rotl %R3,%1,%0\;cvtwl %0,%0\";
+}")
+
+(define_expand "extzv"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (zero_extract:SI (match_operand:SI 1 "general_operand" "")
+ (match_operand:QI 2 "general_operand" "")
+ (match_operand:SI 3 "general_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (zero_extract:SI (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:QI 2 "general_operand" "g")
+ (match_operand:SI 3 "general_operand" "g")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[0]) != REG || GET_CODE (operands[2]) != CONST_INT
+ || GET_CODE (operands[3]) != CONST_INT
+ || INTVAL (operands[2]) + INTVAL (operands[3]) > 32
+ || side_effects_p (operands[1])
+ || (GET_CODE (operands[1]) == MEM
+ && mode_dependent_address_p (XEXP (operands[1], 0))))
+ return \"extzv %3,%2,%1,%0\";
+ if (INTVAL (operands[2]) == 8)
+ return \"rotl %R3,%1,%0\;movzbl %0,%0\";
+ if (INTVAL (operands[2]) == 16)
+ return \"rotl %R3,%1,%0\;movzwl %0,%0\";
+ return \"rotl %R3,%1,%0\;bicl2 %M2,%0\";
+}")
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" ""))
+ (match_operand:SI 3 "general_operand" ""))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+g")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g"))]
+ ""
+ "insv %3,%2,%1,%0")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r")
+ (match_operand:QI 1 "general_operand" "g")
+ (match_operand:SI 2 "general_operand" "g"))
+ (match_operand:SI 3 "general_operand" "g"))]
+ ""
+ "insv %3,%2,%1,%0")
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jbr %l0")
+
+(define_insn "beq"
+ [(set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jeql %l0")
+
+(define_insn "bne"
+ [(set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jneq %l0")
+
+(define_insn "bgt"
+ [(set (pc)
+ (if_then_else (gt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jgtr %l0")
+
+(define_insn "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jgtru %l0")
+
+(define_insn "blt"
+ [(set (pc)
+ (if_then_else (lt (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jlss %l0")
+
+(define_insn "bltu"
+ [(set (pc)
+ (if_then_else (ltu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jlssu %l0")
+
+(define_insn "bge"
+ [(set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jgeq %l0")
+
+(define_insn "bgeu"
+ [(set (pc)
+ (if_then_else (geu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jgequ %l0")
+
+(define_insn "ble"
+ [(set (pc)
+ (if_then_else (le (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jleq %l0")
+
+(define_insn "bleu"
+ [(set (pc)
+ (if_then_else (leu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jlequ %l0")
+
+;; Recognize reversed jumps.
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ ""
+ "j%C0 %l1") ; %C0 negates condition
+
+;; Recognize jbs, jlbs, jbc and jlbc instructions. Note that the operand
+;; of jlbs and jlbc insns are SImode in the hardware. However, if it is
+;; memory, we use QImode in the insn. So we can't use those instructions
+;; for mode-dependent addresses.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:QI 0 "memory_operand" "Q,g")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,g"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbs %0,%l2
+ jbs %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:QI 0 "memory_operand" "Q,g")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,g"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbc %0,%l2
+ jbc %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r,r")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,g"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbs %0,%l2
+ jbs %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r,r")
+ (const_int 1)
+ (match_operand:SI 1 "general_operand" "I,g"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "@
+ jlbc %0,%l2
+ jbc %1,%0,%l2")
+
+;; Subtract-and-jump and Add-and-jump insns.
+;; These are not used when output is for the Unix assembler
+;; because it does not know how to modify them to reach far.
+
+;; Normal sob insns.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (gt (plus:SI (match_operand:SI 0 "general_operand" "+g")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_UNIX_ASM"
+ "jsobgtr %0,%l1")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ge (plus:SI (match_operand:SI 0 "general_operand" "+g")
+ (const_int -1))
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ "!TARGET_UNIX_ASM"
+ "jsobgeq %0,%l1")
+
+;; Normal aob insns. Define a version for when operands[1] is a constant.
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (lt (plus:SI (match_operand:SI 0 "general_operand" "+g")
+ (const_int 1))
+ (match_operand:SI 1 "general_operand" "g"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM"
+ "jaoblss %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (lt (match_operand:SI 0 "general_operand" "+g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM && GET_CODE (operands[1]) == CONST_INT"
+ "jaoblss %P1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (le (plus:SI (match_operand:SI 0 "general_operand" "+g")
+ (const_int 1))
+ (match_operand:SI 1 "general_operand" "g"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM"
+ "jaobleq %1,%0,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (le (match_operand:SI 0 "general_operand" "+g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ "!TARGET_UNIX_ASM && GET_CODE (operands[1]) == CONST_INT"
+ "jaobleq %P1,%0,%l2")
+
+;; Something like a sob insn, but compares against -1.
+;; This finds `while (foo--)' which was changed to `while (--foo != -1)'.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (match_operand:SI 0 "general_operand" "g")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "decl %0\;jgequ %l1")
+
+;; Note that operand 1 is total size of args, in bytes,
+;; and what the call insn wants is the number of words.
+;; It is used in the call instruction as a byte, but in the addl2 as
+;; a word. Since the only time we actually use it in the call instruction
+;; is when it is a constant, SImode (for addl2) is the proper mode.
+(define_insn "call_pop"
+ [(call (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:SI 1 "const_int_operand" "n"))
+ (set (reg:SI 14) (plus:SI (reg:SI 14)
+ (match_operand:SI 3 "immediate_operand" "i")))]
+ ""
+ "*
+ if (INTVAL (operands[1]) > 255 * 4)
+ /* Vax `calls' really uses only one byte of #args, so pop explicitly. */
+ return \"calls $0,%0\;addl2 %1,sp\";
+ operands[1] = GEN_INT ((INTVAL (operands[1]) + 3)/ 4);
+ return \"calls %1,%0\";
+")
+
+(define_insn "call_value_pop"
+ [(set (match_operand 0 "" "=g")
+ (call (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (reg:SI 14) (plus:SI (reg:SI 14)
+ (match_operand:SI 4 "immediate_operand" "i")))]
+ ""
+ "*
+ if (INTVAL (operands[2]) > 255 * 4)
+ /* Vax `calls' really uses only one byte of #args, so pop explicitly. */
+ return \"calls $0,%1\;addl2 %2,sp\";
+ operands[2] = GEN_INT ((INTVAL (operands[2]) + 3)/ 4);
+ return \"calls %2,%1\";
+")
+
+;; Define another set of these for the case of functions with no
+;; operands. In that case, combine may simplify the adjustment of sp.
+(define_insn ""
+ [(call (match_operand:QI 0 "memory_operand" "m")
+ (match_operand:SI 1 "const_int_operand" "n"))
+ (set (reg:SI 14) (reg:SI 14))]
+ ""
+ "*
+ if (INTVAL (operands[1]) > 255 * 4)
+ /* Vax `calls' really uses only one byte of #args, so pop explicitly. */
+ return \"calls $0,%0\;addl2 %1,sp\";
+ operands[1] = GEN_INT ((INTVAL (operands[1]) + 3)/ 4);
+ return \"calls %1,%0\";
+")
+
+(define_insn ""
+ [(set (match_operand 0 "" "=g")
+ (call (match_operand:QI 1 "memory_operand" "m")
+ (match_operand:SI 2 "const_int_operand" "n")))
+ (set (reg:SI 14) (reg:SI 14))]
+ ""
+ "*
+ if (INTVAL (operands[2]) > 255 * 4)
+ /* Vax `calls' really uses only one byte of #args, so pop explicitly. */
+ return \"calls $0,%1\;addl2 %2,sp\";
+ operands[2] = GEN_INT ((INTVAL (operands[2]) + 3)/ 4);
+ return \"calls %2,%1\";
+")
+
+;; Call subroutine returning any type.
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (gen_call_pop (operands[0], const0_rtx, NULL, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "")
+
+(define_insn "return"
+ [(return)]
+ ""
+ "ret")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+;; This had a wider constraint once, and it had trouble.
+;; If you are tempted to try `g', please don't--it's not worth
+;; the risk we will reopen the same bug.
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "general_operand" "r"))]
+ ""
+ "jmp (%0)")
+
+;; This is here to accept 5 arguments (as passed by expand_end_case)
+;; and pass the first 4 along to the casesi1 pattern that really does the work.
+(define_expand "casesi"
+ [(set (pc)
+ (if_then_else (leu (minus:SI (match_operand:SI 0 "general_operand" "g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (match_operand:SI 2 "general_operand" "g"))
+ (plus:SI (sign_extend:SI
+ (mem:HI
+ (plus:SI (pc)
+ (mult:SI (minus:SI (match_dup 0)
+ (match_dup 1))
+ (const_int 2)))))
+ (label_ref:SI (match_operand 3 "" "")))
+ (pc)))
+ (match_operand 4 "" "")]
+ ""
+ "
+ emit_insn (gen_casesi1 (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+")
+
+(define_insn "casesi1"
+ [(set (pc)
+ (if_then_else (leu (minus:SI (match_operand:SI 0 "general_operand" "g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (match_operand:SI 2 "general_operand" "g"))
+ (plus:SI (sign_extend:SI
+ (mem:HI
+ (plus:SI (pc)
+ (mult:SI (minus:SI (match_dup 0)
+ (match_dup 1))
+ (const_int 2)))))
+ (label_ref:SI (match_operand 3 "" "")))
+ (pc)))]
+ ""
+ "casel %0,%1,%2")
+
+;; This used to arise from the preceding by simplification
+;; if operand 1 is zero. Perhaps it is no longer necessary.
+(define_insn ""
+ [(set (pc)
+ (if_then_else (leu (match_operand:SI 0 "general_operand" "g")
+ (match_operand:SI 1 "general_operand" "g"))
+ (plus:SI (sign_extend:SI
+ (mem:HI
+ (plus:SI (pc)
+ (mult:SI (minus:SI (match_dup 0)
+ (const_int 0))
+ (const_int 2)))))
+ (label_ref:SI (match_operand 3 "" "")))
+ (pc)))]
+ ""
+ "casel %0,$0,%1")
+
+;;- load or push effective address
+;; These come after the move and add/sub patterns
+;; because we don't want pushl $1 turned into pushad 1.
+;; or addl3 r1,r2,r3 turned into movab 0(r1)[r2],r3.
+
+;; It does not work to use constraints to distinguish pushes from moves,
+;; because < matches any autodecrement, not just a push.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (match_operand:QI 1 "address_operand" "p"))]
+ ""
+ "*
+{
+ if (push_operand (operands[0], SImode))
+ return \"pushab %a1\";
+ else
+ return \"movab %a1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (match_operand:HI 1 "address_operand" "p"))]
+ ""
+ "*
+{
+ if (push_operand (operands[0], SImode))
+ return \"pushaw %a1\";
+ else
+ return \"movaw %a1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (match_operand:SI 1 "address_operand" "p"))]
+ ""
+ "*
+{
+ if (push_operand (operands[0], SImode))
+ return \"pushal %a1\";
+ else
+ return \"moval %a1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (match_operand:DI 1 "address_operand" "p"))]
+ ""
+ "*
+{
+ if (push_operand (operands[0], SImode))
+ return \"pushaq %a1\";
+ else
+ return \"movaq %a1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (match_operand:SF 1 "address_operand" "p"))]
+ ""
+ "*
+{
+ if (push_operand (operands[0], SImode))
+ return \"pushaf %a1\";
+ else
+ return \"movaf %a1,%0\";
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=g")
+ (match_operand:DF 1 "address_operand" "p"))]
+ ""
+ "*
+{
+ if (push_operand (operands[0], SImode))
+ return \"pushad %a1\";
+ else
+ return \"movad %a1,%0\";
+}")
+
+;; These used to be peepholes, but it is more straightforward to do them
+;; as single insns. However, we must force the output to be a register
+;; if it is not an offsettable address so that we know that we can assign
+;; to it twice.
+
+;; If we had a good way of evaluating the relative costs, these could be
+;; machine-independent.
+
+;; Optimize extzv ...,z; andl2 ...,z
+;; or ashl ...,z; andl2 ...,z
+;; with other operands constant. This is what the combiner converts the
+;; above sequences to before attempting to recognize the new insn.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=ro")
+ (and:SI (ashiftrt:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ "(INTVAL (operands[3]) & ~((1 << (32 - INTVAL (operands[2]))) - 1)) == 0"
+ "*
+{
+ unsigned long mask1 = INTVAL (operands[3]);
+ unsigned long mask2 = (1 << (32 - INTVAL (operands[2]))) - 1;
+
+ if ((mask1 & mask2) != mask1)
+ operands[3] = GEN_INT (mask1 & mask2);
+
+ return \"rotl %R2,%1,%0\;bicl2 %N3,%0\";
+}")
+
+;; left-shift and mask
+;; The only case where `ashl' is better is if the mask only turns off
+;; bits that the ashl would anyways, in which case it should have been
+;; optimized away.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "general_operand" "=ro")
+ (and:SI (ashift:SI (match_operand:SI 1 "general_operand" "g")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ ""
+ "*
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) & ~((1 << INTVAL (operands[2])) - 1));
+ return \"rotl %2,%1,%0\;bicl2 %N3,%0\";
+}")
diff --git a/gcc/config/vax/vaxv.h b/gcc/config/vax/vaxv.h
new file mode 100755
index 0000000..57dff40
--- /dev/null
+++ b/gcc/config/vax/vaxv.h
@@ -0,0 +1,70 @@
+/* Definitions of target machine for GNU compiler. Vax sysV version.
+ Copyright (C) 1988, 1993, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Cope with these under SysV */
+
+#define SCCS_DIRECTIVE
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dvax -Dunix -Asystem(unix) -Asystem(svr3) -Acpu(vax) -Amachine(vax)"
+
+/* Output #ident as a .ident. */
+
+#define ASM_OUTPUT_IDENT(FILE, NAME) fprintf (FILE, "\t.ident \"%s\"\n", NAME);
+
+#undef DBX_DEBUGGING_INFO
+#define SDB_DEBUGGING_INFO
+
+#undef LIB_SPEC
+
+/* The .file command should always begin the output. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+output_file_directive ((FILE), main_input_filename)
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ fprintf(FILE, "\t.align %d\n", 1 << (LOG))
+
+#undef ASM_OUTPUT_LOCAL
+#define ASM_OUTPUT_LOCAL(FILE,NAME,SIZE,ROUNDED) \
+( data_section (), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ":\n\t.space %u\n", (ROUNDED)))
+
+#define ASM_OUTPUT_ASCII(FILE,PTR,LEN) \
+do { \
+ unsigned char *s; \
+ int i; \
+ for (i = 0, s = (PTR); i < (LEN); s++, i++) \
+ { \
+ if ((i % 8) == 0) \
+ fputs ("\n\t.byte\t", (FILE)); \
+ fprintf ((FILE), "%s0x%x", (i%8?",":""), (unsigned)*s); \
+ } \
+ fputs ("\n", (FILE)); \
+} while (0)
+
+#undef ASM_OUTPUT_DOUBLE
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+do { char dstr[30]; \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20e", dstr); \
+ fprintf (FILE, "\t.double 0d%s\n", dstr); \
+ } while (0)
diff --git a/gcc/config/vax/vms.h b/gcc/config/vax/vms.h
new file mode 100755
index 0000000..35c1665
--- /dev/null
+++ b/gcc/config/vax/vms.h
@@ -0,0 +1,369 @@
+/* Output variables, constants and external declarations, for GNU compiler.
+ Copyright (C) 1988, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define VMS_TARGET
+
+/* This enables certain macros in vax.h, which will make an indirect
+ reference to an external symbol an invalid address. This needs to be
+ defined before we include vax.h, since it determines which macros
+ are used for GO_IF_*. */
+
+#define NO_EXTERNAL_INDIRECT_ADDRESS
+
+#include "vax/vax.h"
+
+#undef LIB_SPEC
+#undef CPP_PREDEFINES
+#undef TARGET_NAME
+#undef TARGET_DEFAULT
+#undef CALL_USED_REGISTERS
+#undef MAYBE_VMS_FUNCTION_PROLOGUE
+#undef STARTING_FRAME_OFFSET
+
+/* Predefine this in CPP because VMS limits the size of command options
+ and GNU CPP is not used on VMS except with GNU C. */
+#define CPP_PREDEFINES \
+"-Dvax -Dvms -DVMS -D__vax__ -D__vms__ -D__VMS__\
+ -D__GNUC__=2 -D__GNUC_MINOR__=7 -Asystem(vms) -Acpu(vax) -Amachine(vax)"
+
+/* These match the definitions used in VAXCRTL, the VMS C run-time library */
+
+#define SIZE_TYPE "unsigned int"
+#define PTRDIFF_TYPE "int"
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE 32 /* in bits */
+
+/* Use memcpy for structure copying, and so forth. */
+#define TARGET_MEM_FUNCTIONS
+
+/* Strictly speaking, VMS does not use DBX at all, but the interpreter built
+ into gas only speaks straight DBX. */
+
+#define DEFAULT_GDB_EXTENSIONS 0
+
+#define TARGET_DEFAULT 1
+#define TARGET_NAME "vax/vms"
+
+/* The structure return address arrives as an "argument" on VMS. */
+#undef STRUCT_VALUE_REGNUM
+#define STRUCT_VALUE 0
+#undef PCC_STATIC_STRUCT_RETURN
+
+#define CALL_USED_REGISTERS {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* The run-time library routine VAXC$ESTABLISH (necessary when mixing
+ VMS exception handling and setjmp/longjmp in the same program) requires
+ that a hidden automatic variable at the top of the stack be reserved
+ for its use. We accomplish this by simply adding 4 bytes to the local
+ stack for all functions, and making sure that normal local variables
+ are 4 bytes lower on the stack then they would otherwise have been. */
+
+#define STARTING_FRAME_OFFSET -4
+
+#define __MAIN_NAME " main("
+/*
+ * The MAYBE_VMS_FUNCTION_PROLOGUE macro works for both gcc and g++. It
+ * first checks to see if the current routine is "main", which will only
+ * happen for GCC, and add the jsb if it is. If is not the case then try and
+ * see if __MAIN_NAME is part of current_function_name, which will only happen
+ * if we are running g++, and add the jsb if it is. In gcc there should never
+ * be a paren in the function name, and in g++ there is always a "(" in the
+ * function name, thus there should never be any confusion.
+ *
+ * Adjusting the stack pointer by 4 before calling C$MAIN_ARGS is required
+ * when linking with the VMS POSIX version of the C run-time library; using
+ * `subl2 $4,r0' is adequate but we use `clrl -(sp)' instead. The extra 4
+ * bytes could be removed after the call because STARTING_FRAME_OFFSET's
+ * setting of -4 will end up adding them right back again, but don't bother.
+ */
+#define MAYBE_VMS_FUNCTION_PROLOGUE(FILE) \
+{ extern char *current_function_name; \
+ char *p = current_function_name; \
+ int is_main = strcmp ("main", p) == 0; \
+ while (!is_main && *p != '\0') \
+ { \
+ if (*p == *__MAIN_NAME \
+ && strncmp (p, __MAIN_NAME, sizeof __MAIN_NAME - sizeof "") == 0) \
+ is_main = 1; \
+ else \
+ p++; \
+ } \
+ if (is_main) \
+ fprintf (FILE, "\t%s\n\t%s\n", "clrl -(sp)", "jsb _C$MAIN_ARGS"); \
+}
+
+/* This macro definition sets up a default value for `main' to return. */
+#define DEFAULT_MAIN_RETURN c_expand_return (integer_one_node)
+
+/* This makes use of a hook in varasm.c to mark all external variables
+ for us. We use this to make sure that external variables are correctly
+ addressed. Under VMS there is some brain damage in the linker that requires
+ us to do this. */
+
+#define ENCODE_SECTION_INFO(decl) \
+ if (DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)) \
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl), 0)) = 1;
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#undef ASM_GLOBALIZE_LABEL
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs (".globl ", FILE); \
+ assemble_name (FILE, NAME); \
+ fputs ("\n", FILE); \
+ vms_check_external (NULL_TREE, NAME, 0); \
+ } while (0)
+
+/* Under VMS we write the actual size of the storage to be allocated even
+ though the symbol is external. Although it is possible to give external
+ symbols a size of 0 (as unix does), the VMS linker does not make the
+ distinction between a variable definition and an external reference of a
+ variable, and thus the linker will not complain about a missing definition.
+ If we followed the unix example of giving external symbols a size of
+ zero, you tried to link a program where a given variable was externally
+ defined but none of the object modules contained a non-extern definition,
+ the linker would allocate 0 bytes for the variable, and any attempt to
+ use that variable would use the storage allocated to some other variable.
+
+ We must also select either const_section or data_section: this will indicate
+ whether or not the variable will get the readonly bit set. Since the
+ VMS linker does not distinguish between a variable's definition and an
+ external reference, all usages of a given variable must have the readonly
+ bit set the same way, or the linker will get confused and give warning
+ messages. */
+
+/* We used to round the size up to a multiple of 4,
+ but that causes linker errors sometimes when the variable was initialized
+ since the size of its definition was not likewise rounded up. */
+
+/* Note: the original ASM_OUTPUT_EXTERNAL code has been moved into
+ vms_check_external and vms_flush_pending_externals. */
+
+#define ASM_OUTPUT_EXTERNAL(FILE,DECL,NAME) \
+{ if (DECL_INITIAL (DECL) == 0 && TREE_CODE (DECL) != FUNCTION_DECL) \
+ vms_check_external ((DECL), (NAME), 1); \
+}
+
+/* ASM_OUTPUT_EXTERNAL will have wait until after an initializer is
+ completed in order to switch sections for an external object, so
+ use the DECLARE_OBJECT hooks to manage deferred declarations. */
+
+/* This is the default action for ASM_DECLARE_OBJECT_NAME, but if it
+ is explicitly defined, then ASM_FINISH_DECLARE_OBJECT will be used. */
+
+#define ASM_DECLARE_OBJECT_NAME(ASM_OUT_FILE,NAME,DECL) \
+ ASM_OUTPUT_LABEL ((ASM_OUT_FILE), (NAME))
+
+/* We don't need to do anything special to finish the current object, but it
+ should now be safe to output any deferred external global declarations. */
+
+#define ASM_FINISH_DECLARE_OBJECT(FILE,DECL,TOPLVL,ATEND) \
+ vms_flush_pending_externals(FILE)
+
+/* Anything still pending must be flushed at the very end. */
+
+#define ASM_FILE_END(STREAM) \
+ vms_flush_pending_externals(STREAM)
+
+/* Here we redefine ASM_OUTPUT_COMMON to select the data_section or the
+ const_section before writing the ".const" assembler directive.
+ If we were specifying a size of zero for external variables, we would
+ not have to select a section, since the assembler can assume that
+ when the size > 0, the storage is for a non-external, uninitialized
+ variable (for which a "const" declaration would be senseless),
+ and the assembler can make the storage read/write.
+
+ Since the ".const" directive specifies the actual size of the storage used
+ for both external and non-external variables, the assembler cannot
+ make this assumption, and thus it has no way of deciding if storage should
+ be read/write or read-only. To resolve this, we give the assembler some
+ assistance, in the form of a ".const" or a ".data" directive.
+
+ Under GCC 1.40, external variables were declared with a size of zero.
+ The GNU assembler, GAS, will recognize the "-2" switch when built for VMS;
+ when compiling programs with GCC 2.n this switch should be used or the
+ assembler will not give the read-only attribute to external constants.
+ Failure to use this switch will result in linker warning messages about
+ mismatched psect attributes. */
+
+#undef ASM_OUTPUT_COMMON
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( ((TREE_READONLY (decl) && ! TREE_THIS_VOLATILE (decl)) \
+ ? (const_section (), 0) : (data_section (), 0)), \
+ fputs (".comm ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u\n", (SIZE)))
+
+/* We define this to prevent the name mangler from putting dollar signs into
+ function names. This isn't really needed, but it has been here for
+ some time and removing it would cause the object files generated by the
+ compiler to be incompatible with the object files from a compiler that
+ had this defined. Since it does no harm, we leave it in. */
+
+#define NO_DOLLAR_IN_LABEL
+
+/* Add a "const" section. This is viewed by the assembler as being nearly
+ the same as the "data" section, with the only difference being that a
+ flag is set for variables declared while in the const section. This
+ flag is used to determine whether or not the read/write bit should be
+ set in the Psect definition. */
+
+#define EXTRA_SECTIONS in_const
+
+#define EXTRA_SECTION_FUNCTIONS \
+void \
+const_section () \
+{ \
+ if (in_section != in_const) { \
+ fprintf(asm_out_file,".const\n"); \
+ in_section = in_const; \
+ } \
+}
+
+/* This macro contains the logic to decide which section a variable
+ should be stored in. Static constant variables go in the text_section,
+ non-const variables go in the data_section, and non-static const
+ variables go in the const_section.
+
+ Since this macro is used in a number of places, we must also be able
+ to decide where to place string constants. */
+
+#define SELECT_SECTION(T,RELOC) \
+{ \
+ if (TREE_CODE (T) == VAR_DECL) \
+ { \
+ if (TREE_READONLY (T) && ! TREE_THIS_VOLATILE (T) \
+ && DECL_INITIAL (T) \
+ && (DECL_INITIAL (T) == error_mark_node \
+ || TREE_CONSTANT (DECL_INITIAL (T)))) \
+ { \
+ if (TREE_PUBLIC (T)) \
+ const_section (); \
+ else \
+ text_section (); \
+ } \
+ else \
+ data_section (); \
+ } \
+ if (TREE_CODE_CLASS (TREE_CODE (T)) == 'c') \
+ { \
+ if ((TREE_CODE (T) == STRING_CST && flag_writable_strings)) \
+ data_section (); \
+ else \
+ text_section (); \
+ } \
+}
+
+/* This is used by a hook in varasm.c to write the assembler directives
+ that are needed to tell the startup code which constructors need to
+ be run. */
+
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+{ \
+ fprintf ((FILE),".globl $$PsectAttributes_NOOVR$$__gxx_init_1\n"); \
+ data_section(); \
+ fprintf ((FILE),"$$PsectAttributes_NOOVR$$__gxx_init_1:\n\t.long\t"); \
+ assemble_name ((FILE), (NAME)); \
+ fputc ('\n', (FILE)); \
+}
+
+/* This is used by a hook in varasm.c to write the assembler directives
+ that are needed to tell the startup code which destructors need to
+ be run. */
+
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+{ \
+ fprintf ((FILE),".globl $$PsectAttributes_NOOVR$$__gxx_clean_1\n"); \
+ data_section(); \
+ fprintf ((FILE),"$$PsectAttributes_NOOVR$$__gxx_clean_1:\n\t.long\t");\
+ assemble_name ((FILE), (NAME)); \
+ fputc ('\n', (FILE)); \
+}
+
+/* True for VMS V4.6 and later. */
+#define HAVE_ATEXIT
+
+/* The following definitions are used in libgcc2.c with the __main
+ function. The _SHR symbol is used when the sharable image library
+ for the C++ library is used - this is picked up automatically by the linker
+ and this symbol points to the start of __CTOR_LIST__ from the C++ library.
+ If the C++ library is not used, then __CTOR_LIST_SHR__ occurs just after
+ __CTOR_LIST__, and essentially points to the same list as __CTOR_LIST. */
+
+#ifdef L__main
+
+#define __CTOR_LIST__ __gxx_init_0
+#define __CTOR_LIST_END__ __gxx_init_2
+
+#define __CTOR_LIST_SHR__ $$PsectAttributes_NOSHR$$__gxx_init_0_shr
+#define __CTOR_LIST_SHR_END__ $$PsectAttributes_NOSHR$$__gxx_init_2_shr
+
+#define DO_GLOBAL_CTORS_BODY \
+do { \
+ func_ptr *p; \
+ extern func_ptr __CTOR_LIST__[1], __CTOR_LIST_END__[1]; \
+ extern func_ptr __CTOR_LIST_SHR__[1], __CTOR_LIST_SHR_END__[1]; \
+ if (&__CTOR_LIST_SHR__[0] != &__CTOR_LIST__[1]) \
+ for (p = __CTOR_LIST_SHR__ + 1; p < __CTOR_LIST_SHR_END__ ; p++ ) \
+ if (*p) (*p) (); \
+ for (p = __CTOR_LIST__ + 1; p < __CTOR_LIST_END__ ; p++ ) \
+ if (*p) (*p) (); \
+ do { /* arrange for `return' from main() to pass through exit() */ \
+ __label__ foo; \
+ int *callers_caller_fp = (int *) __builtin_frame_address (3); \
+ register int retval asm ("r0"); \
+ callers_caller_fp[4] = (int) && foo; \
+ break; /* out of do-while block */ \
+ foo: \
+ exit (retval); \
+ } while (0); \
+} while (0)
+
+#define __DTOR_LIST__ __gxx_clean_0
+#define __DTOR_LIST_END__ __gxx_clean_2
+
+#define __DTOR_LIST_SHR__ $$PsectAttributes_NOSHR$$__gxx_clean_0_shr
+#define __DTOR_LIST_SHR_END__ $$PsectAttributes_NOSHR$$__gxx_clean_2_shr
+
+#define DO_GLOBAL_DTORS_BODY \
+do { \
+ func_ptr *p; \
+ extern func_ptr __DTOR_LIST__[1], __DTOR_LIST_END__[1]; \
+ extern func_ptr __DTOR_LIST_SHR__[1], __DTOR_LIST_SHR_END__[1]; \
+ for (p = __DTOR_LIST__ + 1; p < __DTOR_LIST_END__ ; p++ ) \
+ if (*p) (*p) (); \
+ if (&__DTOR_LIST_SHR__[0] != &__DTOR_LIST__[1]) \
+ for (p = __DTOR_LIST_SHR__ + 1; p < __DTOR_LIST_SHR_END__ ; p++ ) \
+ if (*p) (*p) (); \
+} while (0)
+
+#endif /* L__main */
+
+/* Specify the list of include file directories. */
+#define INCLUDE_DEFAULTS \
+{ \
+ { "GNU_GXX_INCLUDE:", "G++", 1, 1 }, \
+ { "GNU_CC_INCLUDE:", "GCC", 0, 0 }, /* GNU includes */ \
+ { "SYS$SYSROOT:[SYSLIB.]", 0, 0, 0 }, /* VAX-11 "C" includes */ \
+ { ".", 0, 0, 1 }, /* Make normal VMS filespecs work. */ \
+ { 0, 0, 0, 0 } \
+}
diff --git a/gcc/config/vax/x-vax b/gcc/config/vax/x-vax
new file mode 100755
index 0000000..bb58a6a
--- /dev/null
+++ b/gcc/config/vax/x-vax
@@ -0,0 +1,3 @@
+# If compiling GCC with the Unix assembler, -J will handle a large function.
+# With GAS, it should have no effect.
+X_CPPFLAGS = -J
diff --git a/gcc/config/vax/xm-vax.h b/gcc/config/vax/xm-vax.h
new file mode 100755
index 0000000..d7ef168
--- /dev/null
+++ b/gcc/config/vax/xm-vax.h
@@ -0,0 +1,45 @@
+/* Configuration for GNU C-compiler for Vax.
+ Copyright (C) 1987, 1993 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* This machine doesn't use IEEE floats. */
+#define HOST_FLOAT_FORMAT VAX_FLOAT_FORMAT
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* isinf isn't there, but finite is. */
+#define isinf(x) (!finite(x))
+
diff --git a/gcc/config/vax/xm-vaxv.h b/gcc/config/vax/xm-vaxv.h
new file mode 100755
index 0000000..aef16f0
--- /dev/null
+++ b/gcc/config/vax/xm-vaxv.h
@@ -0,0 +1,3 @@
+/* Config file for Vax running system V. */
+
+#define USG
diff --git a/gcc/config/vax/xm-vms.h b/gcc/config/vax/xm-vms.h
new file mode 100755
index 0000000..5d01aeb
--- /dev/null
+++ b/gcc/config/vax/xm-vms.h
@@ -0,0 +1,206 @@
+/* Configuration for GNU C-compiler for Vax.
+ Copyright (C) 1987, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* Other configurations get these via autoconfig. */
+#define STDC_HEADERS 1
+#define HAVE_STDLIB_H 1
+#define HAVE_STRING_H 1
+#ifdef __DECC
+#define HAVE_UNISTD_H 1
+#endif
+
+#if defined(VAXC) || defined(__DECC)
+/* if compiling with VAXC, need to fix problem with <stdio.h>
+ which defines a macro called FILE_TYPE that breaks "tree.h".
+ Fortunately it uses #ifndef to suppress multiple inclusions.
+ Three possible cases:
+ 1) <stdio.h> has already been included -- ours will be no-op;
+ 2) <stdio.h> will be included after us -- "theirs" will be no-op;
+ 3) <stdio.h> isn't needed -- including it here shouldn't hurt.
+ In all three cases, the problem macro will be removed here. */
+#include <stdio.h>
+#undef FILE_TYPE
+#endif
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+#define HOST_FLOAT_FORMAT VAX_FLOAT_FORMAT
+
+#define SUCCESS_EXIT_CODE 1
+#define FATAL_EXIT_CODE (44 | 0x10000000) /* Abort, and no DCL message. */
+
+/* A couple of conditionals for execution machine are controlled here. */
+#ifndef VMS
+#define VMS
+#endif
+
+#ifndef __GNUC__
+/* not present, at least in VAX-11 C (VMS) v2.2 */
+#define R_OK 4
+#define W_OK 2
+#define X_OK 1
+#define F_OK 0
+#endif
+
+#define GCC_INCLUDE_DIR "///not used with VMS///" /* nonsense string for now */
+
+/* and define a local equivalent (sort of) for unlink */
+#define unlink remove
+
+/* Used by the preprocessor to limit size of disk I/O chunks.
+ 64K - 1 is the maximum supported by VAXCRTL. Amounts in excess
+ of 35 blocks will bypass the VMS V6.x VIOC [Virtual I/O Cache],
+ so we'll pick a limit of 16K (32 blocks). */
+#define MAX_READ_LEN (32 * 512)
+#define MAX_WRITE_LEN (32 * 512)
+
+/* Under VMS a directory specification can be enclosed either in square
+ brackets or in angle brackets. Thus we need to check both. This
+ macro is used to help compare filenames in cp-lex.c.
+
+ We also need to make sure that the names are all lower case, because
+ we must be able to compare filenames to determine if a file implements
+ a class. */
+
+#define FILE_NAME_NONDIRECTORY(C) \
+({ \
+ char * pnt_ = (C), * pnt1_; \
+ pnt1_ = pnt_ - 1; \
+ while (*++pnt1_) \
+ if ((*pnt1_ >= 'A' && *pnt1_ <= 'Z')) *pnt1_ |= 0x20; \
+ pnt1_ = rindex (pnt_, ']'); \
+ pnt1_ = (pnt1_ == 0 ? rindex (pnt_, '>') : pnt1_); \
+ pnt1_ = (pnt1_ == 0 ? rindex (pnt_, ':') : pnt1_); \
+ (pnt1_ == 0 ? pnt_ : pnt1_ + 1); \
+ })
+
+/* Macro to generate the name of the cross reference file. The standard
+ one does not work, since it was written assuming that the conventions
+ of a unix style filesystem will work on the host system. */
+
+#define XREF_FILE_NAME(BUFF, NAME) \
+ s = FILE_NAME_NONDIRECTORY (NAME); \
+ if (s == NAME) sprintf(BUFF, "%s_gxref", NAME); \
+ else { \
+ strcpy(BUFF, NAME); \
+ strcat(BUFF, "_gxref"); \
+ }
+
+/* Macro that is used in cp-xref.c to determine whether a file name is
+ absolute or not. */
+
+#define FILE_NAME_ABSOLUTE_P(NAME) \
+ (FILE_NAME_NONDIRECTORY (NAME) != (&NAME[1]))
+
+/* FILE_NAME_JOINER is defined to be the characters that are inserted between
+ a directory name and a filename in order to make an absolute file
+ specification. Under VMS the directory specification contains all of the
+ required characters, so we define this to be a null string. */
+
+#define FILE_NAME_JOINER ""
+
+/* vprintf() has been available since VMS V4.6. */
+
+#define HAVE_VPRINTF
+
+#if defined(VAXC) || defined(__DECC)
+
+/* Customizations/kludges for building with DEC's VAX C compiler
+ rather than GCC. */
+
+#define NO_SYS_PARAMS_H /* don't have <sys/params.h> */
+#define USE_C_ALLOCA /* using alloca.c */
+#define QSORT_WORKAROUND /* do not use VAXCRTL's qsort */
+
+/* use ANSI/SYSV style byte manipulation routines instead of BSD ones */
+/* rename all too-long external symbol names to avoid warnings */
+#define check_for_full_enumeration_handling check_for_full_enum_handling
+#define current_function_contains_functions curfunc_contains_functions
+#define current_function_epilogue_delay_list curfunc_epilogue_delay_list
+#define current_function_has_nonlocal_goto curfunc_has_nonlocal_goto
+#define current_function_has_nonlocal_label curfunc_has_nonlocal_label
+#define current_function_internal_arg_pointer curfunc_internal_arg_pointer
+#define current_function_outgoing_args_size curfunc_outgoing_args_size
+#define current_function_pretend_args_size curfunc_pretend_args_size
+#define current_function_returns_pcc_struct curfunc_returns_pcc_struct
+#define current_function_returns_pointer curfunc_returns_pointer
+#define current_function_uses_const_pool curfunc_uses_const_pool
+#define current_function_uses_pic_offset_table curfunc_uses_pic_offset_table
+#define dbxout_resume_previous_source_file dbxout_resume_previous_src_file
+#define expand_builtin_extract_return_addr expand_builtin_extract_ret_addr
+#define expand_builtin_set_return_addr_reg expand_builtin_set_ret_addr_reg
+#define expand_start_loop_continue_elsewhere expnd_start_loop_cont_elsewhere
+#define flag_schedule_insns_after_reload flag_sched_insns_after_reload
+#define get_dynamic_handler_chain_libfunc get_dynamic_hndlr_chain_libfunc
+#define lookup_name_current_level_global lookup_name_current_level_gbl
+#define maybe_building_objc_message_expr maybe_building_objc_msg_expr
+#define mesg_implicit_function_declaration mesg_implicit_func_declaration
+#define output_deferred_addressed_constants output_deferred_addr_constants
+#define protect_cleanup_actions_with_terminate protect_cleanup_act_w_terminate
+#define reg_overlap_mentioned_for_reload_p reg_overlap_mtnd_for_reload_p
+#define reposition_prologue_and_epilogue_notes repos_prolog_and_epilog_notes
+#define rtx_equal_function_value_matters rtx_equal_func_value_matters
+#define set_new_first_and_last_label_num set_new_first_and_last_lbl_num
+#define thread_prologue_and_epilogue_insns thread_prolog_and_epilog_insns
+#endif
+
+/* We need to avoid the library qsort routine, due to a serious bug
+ in VAXCRTL. (Sorting anything with size that's not a multiple of 4
+ after having previously sorted something that was a multiple of 4
+ can produce wrong results and result in data corruption.) We'll
+ use our own substitute (in vax.c) instead. */
+/* #define QSORT_WORKAROUND */
+#ifdef QSORT_WORKAROUND
+#define qsort not_qsort
+#endif
+
+#ifdef __DECC
+/* DECC$SHR doesn't have VAXCRTL's bugs. */
+#undef QSORT_WORKAROUND
+#undef qsort
+/* Avoid a lot of informational level diagnostics about implicitly
+ declared functions. */
+#include <stdlib.h>
+#include <string.h>
+/* this is for genopinit.c */
+ #pragma message disable (undefescap)
+#endif
+
+#if defined(USE_C_ALLOCA) && !defined(alloca)
+/* Declare alloca() using similar logic to that in alloca.c. */
+#ifdef __STDC__
+extern void *alloca(unsigned);
+#else
+extern char *alloca();
+#endif
+#endif