summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorYamaArashi <shadow962@live.com>2016-02-14 18:43:19 -0800
committerYamaArashi <shadow962@live.com>2016-02-14 18:43:19 -0800
commit7aeb8da2138f7521a78d377343b3a266e1e6f9d6 (patch)
tree8d1f73d3b850609a2152ce86719c68177558aef9 /gcc
parent9e217b935f6df9afa8e8fbad9c5c6401b879e54a (diff)
remove delay slot and stack reg code
Diffstat (limited to 'gcc')
-rwxr-xr-xgcc/Makefile.in10
-rwxr-xr-xgcc/dwarf2out.c18
-rwxr-xr-xgcc/expr.c6
-rwxr-xr-xgcc/final.c467
-rwxr-xr-xgcc/function.c10
-rwxr-xr-xgcc/gcse.c9
-rwxr-xr-xgcc/global.c8
-rwxr-xr-xgcc/jump.c35
-rwxr-xr-xgcc/reg-stack.c2931
-rwxr-xr-xgcc/reload.c5
-rwxr-xr-xgcc/reload1.c112
-rwxr-xr-xgcc/reorg.c3525
-rwxr-xr-xgcc/rtl.h11
-rwxr-xr-xgcc/stupid.c5
-rwxr-xr-xgcc/toplev.c96
-rwxr-xr-xgcc/varasm.c5
16 files changed, 3 insertions, 7250 deletions
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index be919fc..2bdeaa1 100755
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -578,7 +578,6 @@ C_AND_OBJC_OBJS = c-lex.o c-pragma.o c-decl.o c-typeck.o c-convert.o \
# Language-specific object files for C.
C_OBJS = c-parse.o $(C_AND_OBJC_OBJS)
-SCHED_PREFIX = @sched_prefix@
SCHED_CFLAGS = @sched_cflags@
# Language-independent object files.
@@ -588,7 +587,7 @@ OBJS = toplev.o version.o tree.o print-tree.o stor-layout.o fold-const.o \
dwarf2out.o bitmap.o alias.o \
integrate.o jump.o cse.o loop.o unroll.o flow.o stupid.o combine.o varray.o \
regclass.o local-alloc.o global.o reload.o reload1.o caller-save.o gcse.o \
- insn-peep.o reorg.o $(SCHED_PREFIX)sched.o final.o recog.o reg-stack.o \
+ insn-peep.o sched.o final.o recog.o \
insn-opinit.o insn-recog.o insn-extract.o insn-output.o insn-emit.o \
$(CYGNUS-LOCAL-lcm) lcm.o \
insn-attrtab.o $(out_object_file) getpwd.o $(EXTRA_OBJS) convert.o \
@@ -1400,15 +1399,12 @@ reload1.o : reload1.c $(CONFIG_H) system.h $(RTL_H) real.h flags.h $(EXPR_H) \
caller-save.o : caller-save.c $(CONFIG_H) system.h $(RTL_H) flags.h \
$(REGS_H) hard-reg-set.h insn-config.h $(BASIC_BLOCK_H) \
$(RECOG_H) reload.h $(EXPR_H) toplev.h
-reorg.o : reorg.c $(CONFIG_H) system.h $(RTL_H) conditions.h hard-reg-set.h \
- $(BASIC_BLOCK_H) $(REGS_H) insn-config.h insn-attr.h \
- insn-flags.h $(RECOG_H) flags.h output.h $(EXPR_H)
alias.o : alias.c $(CONFIG_H) system.h $(RTL_H) flags.h hard-reg-set.h \
$(REGS_H) toplev.h output.h $(EXPR_H)
regmove.o : regmove.c $(CONFIG_H) system.h $(RTL_H) insn-config.h \
$(RECOG_H) output.h reload.h $(REGS_H) hard-reg-set.h flags.h \
$(EXPR_H) insn-flags.h $(BASIC_BLOCK_H) toplev.h
-$(SCHED_PREFIX)sched.o : $(SCHED_PREFIX)sched.c $(CONFIG_H) system.h $(RTL_H) \
+sched.o : sched.c $(CONFIG_H) system.h $(RTL_H) \
$(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h flags.h insn-config.h \
insn-attr.h toplev.h recog.h
final.o : final.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h $(REGS_H) \
@@ -1418,8 +1414,6 @@ final.o : final.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h $(REGS_H) \
recog.o : recog.c $(CONFIG_H) system.h $(RTL_H) \
$(REGS_H) $(RECOG_H) hard-reg-set.h flags.h insn-config.h insn-attr.h \
insn-flags.h insn-codes.h real.h toplev.h
-reg-stack.o : reg-stack.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) recog.h \
- $(REGS_H) hard-reg-set.h flags.h insn-config.h insn-flags.h toplev.h
dyn-string.o: dyn-string.c dyn-string.h $(CONFIG_H) system.h
$(out_object_file): $(out_file) $(CONFIG_H) $(TREE_H) \
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index b51159b..6c3a49a 100755
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -2398,9 +2398,6 @@ static unsigned pending_types;
/* Record whether the function being analyzed contains inlined functions. */
static int current_function_has_inlines;
-#if 0 && defined (MIPS_DEBUGGING_INFO)
-static int comp_unit_has_inlines;
-#endif
/* A pointer to the ..._DECL node which we have most recently been working
on. We keep this around just in case something about it looks screwy and
@@ -7095,10 +7092,6 @@ add_location_or_const_value_attribute (die, decl)
return;
rtl = eliminate_regs (rtl, 0, NULL_RTX);
-#ifdef LEAF_REG_REMAP
- if (leaf_function)
- leaf_renumber_regs_insn (rtl);
-#endif
switch (GET_CODE (rtl))
{
@@ -8422,17 +8415,6 @@ gen_subprogram_die (decl, context_die)
current_function_has_inlines = 0;
decls_for_scope (outer_scope, subr_die, 0);
-#if 0 && defined (MIPS_DEBUGGING_INFO)
- if (current_function_has_inlines)
- {
- add_AT_flag (subr_die, DW_AT_MIPS_has_inlines, 1);
- if (! comp_unit_has_inlines)
- {
- add_AT_flag (comp_unit_die, DW_AT_MIPS_has_inlines, 1);
- comp_unit_has_inlines = 1;
- }
- }
-#endif
}
pop_decl_scope ();
diff --git a/gcc/expr.c b/gcc/expr.c
index 5d9ff6a..8d1178e 100755
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -9865,12 +9865,6 @@ expand_builtin_apply_args ()
tem = gen_rtx_REG (mode, INCOMING_REGNO (regno));
-#ifdef STACK_REGS
- /* For reg-stack.c's stack register household.
- Compare with a similar piece of code in function.c. */
-
- emit_insn (gen_rtx_USE (mode, tem));
-#endif
emit_move_insn (change_address (registers, mode,
plus_constant (XEXP (registers, 0),
diff --git a/gcc/final.c b/gcc/final.c
index 8e1cc7e..624a231 100755
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -104,11 +104,6 @@ extern struct obstack *rtl_obstack;
#define JUMP_TABLES_IN_TEXT_SECTION 0
#endif
-/* Nonzero means this function is a leaf function, with no function calls.
- This variable exists to be examined in FUNCTION_PROLOGUE
- and FUNCTION_EPILOGUE. Always zero, unless set by some action. */
-int leaf_function;
-
/* Last insn processed by final_scan_insn. */
static rtx debug_insn = 0;
@@ -225,11 +220,6 @@ static int app_on;
rtx final_sequence;
-#ifdef ASSEMBLER_DIALECT
-
-/* Number of the assembler dialect to use, starting at 0. */
-static int dialect_number;
-#endif
/* Indexed by line number, nonzero if there is a note for that line. */
@@ -273,9 +263,6 @@ init_final (filename)
/* END CYGNUS LOCAL */
final_sequence = 0;
-#ifdef ASSEMBLER_DIALECT
- dialect_number = ASSEMBLER_DIALECT;
-#endif
}
/* Called at end of source file,
@@ -312,22 +299,7 @@ app_disable ()
app_on = 0;
}
}
-
-/* Return the number of slots filled in the current
- delayed branch sequence (we don't count the insn needing the
- delay slot). Zero if not in a delayed branch sequence. */
-#ifdef DELAY_SLOTS
-int
-dbr_sequence_length ()
-{
- if (final_sequence != 0)
- return XVECLEN (final_sequence, 0) - 1;
- else
- return 0;
-}
-#endif
-
/* The next two pages contain routines used to compute the length of an insn
and to shorten branches. */
@@ -455,9 +427,6 @@ get_attr_length (insn)
break;
}
-#ifdef ADJUST_INSN_LENGTH
- ADJUST_INSN_LENGTH (insn, length);
-#endif
return length;
#else /* not HAVE_ATTR_length */
return 0;
@@ -891,62 +860,6 @@ shorten_branches (first)
align_tab[i] = seq;
}
}
-#ifdef CASE_VECTOR_SHORTEN_MODE
- if (optimize)
- {
- /* Look for ADDR_DIFF_VECs, and initialize their minimum and maximum
- label fields. */
-
- int min_shuid = INSN_SHUID (get_insns ()) - 1;
- int max_shuid = INSN_SHUID (get_last_insn ()) + 1;
- int rel;
-
- for (insn = first; insn != 0; insn = NEXT_INSN (insn))
- {
- rtx min_lab = NULL_RTX, max_lab = NULL_RTX, pat;
- int len, i, min, max, insn_shuid;
- int min_align;
- addr_diff_vec_flags flags;
-
- if (GET_CODE (insn) != JUMP_INSN
- || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
- continue;
- pat = PATTERN (insn);
- len = XVECLEN (pat, 1);
- if (len <= 0)
- abort ();
- min_align = MAX_CODE_ALIGN;
- for (min = max_shuid, max = min_shuid, i = len - 1; i >= 0; i--)
- {
- rtx lab = XEXP (XVECEXP (pat, 1, i), 0);
- int shuid = INSN_SHUID (lab);
- if (shuid < min)
- {
- min = shuid;
- min_lab = lab;
- }
- if (shuid > max)
- {
- max = shuid;
- max_lab = lab;
- }
- if (min_align > LABEL_TO_ALIGNMENT (lab))
- min_align = LABEL_TO_ALIGNMENT (lab);
- }
- XEXP (pat, 2) = gen_rtx_LABEL_REF (VOIDmode, min_lab);
- XEXP (pat, 3) = gen_rtx_LABEL_REF (VOIDmode, max_lab);
- insn_shuid = INSN_SHUID (insn);
- rel = INSN_SHUID (XEXP (XEXP (pat, 0), 0));
- flags.min_align = min_align;
- flags.base_after_vec = rel > insn_shuid;
- flags.min_after_vec = min > insn_shuid;
- flags.max_after_vec = max > insn_shuid;
- flags.min_after_base = min > rel;
- flags.max_after_base = max > rel;
- ADDR_DIFF_VEC_FLAGS (pat) = flags;
- }
- }
-#endif /* CASE_VECTOR_SHORTEN_MODE */
/* Compute initial lengths, addresses, and varying flags for each insn. */
@@ -998,12 +911,6 @@ shorten_branches (first)
else if (GET_CODE (body) == SEQUENCE)
{
int i;
- int const_delay_slots;
-#ifdef DELAY_SLOTS
- const_delay_slots = const_num_delay_slots (XVECEXP (body, 0, 0));
-#else
- const_delay_slots = 0;
-#endif
/* Inside a delay slot sequence, we do not do any branch shortening
if the shortening could change the number of delay slots
of the branch. */
@@ -1020,16 +927,7 @@ shorten_branches (first)
inner_length = insn_default_length (inner_insn);
insn_lengths[inner_uid] = inner_length;
- if (const_delay_slots)
- {
- if ((varying_length[inner_uid]
- = insn_variable_length_p (inner_insn)) != 0)
- varying_length[uid] = 1;
- insn_addresses[inner_uid] = (insn_current_address +
- insn_lengths[uid]);
- }
- else
- varying_length[inner_uid] = 0;
+ varying_length[inner_uid] = 0;
insn_lengths[uid] += inner_length;
}
}
@@ -1040,9 +938,6 @@ shorten_branches (first)
}
/* If needed, do any adjustment. */
-#ifdef ADJUST_INSN_LENGTH
- ADJUST_INSN_LENGTH (insn, insn_lengths[uid]);
-#endif
}
/* Now loop over all the insns finding varying length insns. For each,
@@ -1058,9 +953,6 @@ shorten_branches (first)
insn = NEXT_INSN (insn))
{
int new_length;
-#ifdef ADJUST_INSN_LENGTH
- int tmp_length;
-#endif
int length_align;
uid = INSN_UID (insn);
@@ -1089,115 +981,6 @@ shorten_branches (first)
insn_last_address = insn_addresses[uid];
insn_addresses[uid] = insn_current_address;
-#ifdef CASE_VECTOR_SHORTEN_MODE
- if (optimize && GET_CODE (insn) == JUMP_INSN
- && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
- {
- rtx body = PATTERN (insn);
- int old_length = insn_lengths[uid];
- rtx rel_lab = XEXP (XEXP (body, 0), 0);
- rtx min_lab = XEXP (XEXP (body, 2), 0);
- rtx max_lab = XEXP (XEXP (body, 3), 0);
- addr_diff_vec_flags flags = ADDR_DIFF_VEC_FLAGS (body);
- int rel_addr = insn_addresses[INSN_UID (rel_lab)];
- int min_addr = insn_addresses[INSN_UID (min_lab)];
- int max_addr = insn_addresses[INSN_UID (max_lab)];
- rtx prev;
- int rel_align = 0;
-
- /* Try to find a known alignment for rel_lab. */
- for (prev = rel_lab;
- prev
- && ! insn_lengths[INSN_UID (prev)]
- && ! (varying_length[INSN_UID (prev)] & 1);
- prev = PREV_INSN (prev))
- if (varying_length[INSN_UID (prev)] & 2)
- {
- rel_align = LABEL_TO_ALIGNMENT (prev);
- break;
- }
-
- /* See the comment on addr_diff_vec_flags in rtl.h for the
- meaning of the flags values. base: REL_LAB vec: INSN */
- /* Anything after INSN has still addresses from the last
- pass; adjust these so that they reflect our current
- estimate for this pass. */
- if (flags.base_after_vec)
- rel_addr += insn_current_address - insn_last_address;
- if (flags.min_after_vec)
- min_addr += insn_current_address - insn_last_address;
- if (flags.max_after_vec)
- max_addr += insn_current_address - insn_last_address;
- /* We want to know the worst case, i.e. lowest possible value
- for the offset of MIN_LAB. If MIN_LAB is after REL_LAB,
- its offset is positive, and we have to be wary of code shrink;
- otherwise, it is negative, and we have to be vary of code
- size increase. */
- if (flags.min_after_base)
- {
- /* If INSN is between REL_LAB and MIN_LAB, the size
- changes we are about to make can change the alignment
- within the observed offset, therefore we have to break
- it up into two parts that are independent. */
- if (! flags.base_after_vec && flags.min_after_vec)
- {
- min_addr -= align_fuzz (rel_lab, insn, rel_align, 0);
- min_addr -= align_fuzz (insn, min_lab, 0, 0);
- }
- else
- min_addr -= align_fuzz (rel_lab, min_lab, rel_align, 0);
- }
- else
- {
- if (flags.base_after_vec && ! flags.min_after_vec)
- {
- min_addr -= align_fuzz (min_lab, insn, 0, ~0);
- min_addr -= align_fuzz (insn, rel_lab, 0, ~0);
- }
- else
- min_addr -= align_fuzz (min_lab, rel_lab, 0, ~0);
- }
- /* Likewise, determine the highest lowest possible value
- for the offset of MAX_LAB. */
- if (flags.max_after_base)
- {
- if (! flags.base_after_vec && flags.max_after_vec)
- {
- max_addr += align_fuzz (rel_lab, insn, rel_align, ~0);
- max_addr += align_fuzz (insn, max_lab, 0, ~0);
- }
- else
- max_addr += align_fuzz (rel_lab, max_lab, rel_align, ~0);
- }
- else
- {
- if (flags.base_after_vec && ! flags.max_after_vec)
- {
- max_addr += align_fuzz (max_lab, insn, 0, 0);
- max_addr += align_fuzz (insn, rel_lab, 0, 0);
- }
- else
- max_addr += align_fuzz (max_lab, rel_lab, 0, 0);
- }
- PUT_MODE (body, CASE_VECTOR_SHORTEN_MODE (min_addr - rel_addr,
- max_addr - rel_addr,
- body));
- if (JUMP_TABLES_IN_TEXT_SECTION
-#if !defined(READONLY_DATA_SECTION)
- || 1
-#endif
- )
- {
- insn_lengths[uid]
- = (XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body)));
- insn_current_address += insn_lengths[uid];
- if (insn_lengths[uid] != old_length)
- something_changed = 1;
- }
-
- continue;
- }
-#endif /* CASE_VECTOR_SHORTEN_MODE */
if (! (varying_length[uid]))
{
@@ -1240,12 +1023,6 @@ shorten_branches (first)
insn_current_address += new_length;
}
-#ifdef ADJUST_INSN_LENGTH
- /* If needed, do any adjustment. */
- tmp_length = new_length;
- ADJUST_INSN_LENGTH (insn, new_length);
- insn_current_address += (new_length - tmp_length);
-#endif
if (new_length != insn_lengths[uid])
{
@@ -1309,18 +1086,6 @@ final_start_function (first, file, optimize)
this_is_asm_operands = 0;
-#ifdef NON_SAVING_SETJMP
- /* A function that calls setjmp should save and restore all the
- call-saved registers on a system where longjmp clobbers them. */
- if (NON_SAVING_SETJMP && current_function_calls_setjmp)
- {
- int i;
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (!call_used_regs[i])
- regs_ever_live[i] = 1;
- }
-#endif
/* Initial line number is supposed to be output
before the function's prologue and label
@@ -1341,10 +1106,6 @@ final_start_function (first, file, optimize)
if (NOTE_LINE_NUMBER (first) != NOTE_INSN_DELETED)
output_source_line (file, first);
-#ifdef LEAF_REG_REMAP
- if (leaf_function)
- leaf_renumber_regs (first);
-#endif
#if defined (DWARF2_UNWIND_INFO) && defined (HAVE_prologue)
if (dwarf2out_do_frame ())
@@ -1696,16 +1457,9 @@ final_scan_insn (insn, file, optimize, prescan, nopeepholes)
if (CODE_LABEL_NUMBER (insn) <= max_labelno)
{
int align = LABEL_TO_ALIGNMENT (insn);
-#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
- int max_skip = LABEL_TO_MAX_SKIP (insn);
-#endif
if (align && NEXT_INSN (insn))
-#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
- ASM_OUTPUT_MAX_SKIP_ALIGN (file, align, max_skip);
-#else
ASM_OUTPUT_ALIGN (file, align);
-#endif
}
#ifdef HAVE_cc0
CC_STATUS_INIT;
@@ -2052,7 +1806,6 @@ final_scan_insn (insn, file, optimize, prescan, nopeepholes)
}
#endif
-#ifndef STACK_REGS
/* Don't bother outputting obvious no-ops, even without -O.
This optimization is fast and doesn't interfere with debugging.
Don't do this if the insn is in a delay slot, since this
@@ -2064,7 +1817,6 @@ final_scan_insn (insn, file, optimize, prescan, nopeepholes)
&& GET_CODE (SET_DEST (body)) == REG
&& REGNO (SET_SRC (body)) == REGNO (SET_DEST (body)))
break;
-#endif
#ifdef HAVE_cc0
/* If this is a conditional branch, maybe modify it
@@ -2715,33 +2467,6 @@ output_asm_insn (template, operands)
#endif
break;
-#ifdef ASSEMBLER_DIALECT
- case '{':
- {
- register int i;
-
- /* If we want the first dialect, do nothing. Otherwise, skip
- DIALECT_NUMBER of strings ending with '|'. */
- for (i = 0; i < dialect_number; i++)
- {
- while (*p && *p++ != '|')
- ;
-
- if (*p == '|')
- p++;
- }
- }
- break;
-
- case '|':
- /* Skip to close brace. */
- while (*p && *p++ != '}')
- ;
- break;
-
- case '}':
- break;
-#endif
case '%':
/* %% outputs a single %. */
@@ -3031,33 +2756,6 @@ asm_fprintf VPROTO((FILE *file, char *p, ...))
while ((c = *p++))
switch (c)
{
-#ifdef ASSEMBLER_DIALECT
- case '{':
- {
- int i;
-
- /* If we want the first dialect, do nothing. Otherwise, skip
- DIALECT_NUMBER of strings ending with '|'. */
- for (i = 0; i < dialect_number; i++)
- {
- while (*p && *p++ != '|')
- ;
-
- if (*p == '|')
- p++;
- }
- }
- break;
-
- case '|':
- /* Skip to close brace. */
- while (*p && *p++ != '}')
- ;
- break;
-
- case '}':
- break;
-#endif
case '%':
c = *p++;
@@ -3248,7 +2946,6 @@ split_double (value, first, second)
}
else
{
-#ifdef REAL_ARITHMETIC
REAL_VALUE_TYPE r; long l[2];
REAL_VALUE_FROM_CONST_DOUBLE (r, value);
@@ -3260,30 +2957,6 @@ split_double (value, first, second)
*first = GEN_INT ((HOST_WIDE_INT) l[0]);
*second = GEN_INT ((HOST_WIDE_INT) l[1]);
-#else
- if ((HOST_FLOAT_FORMAT != TARGET_FLOAT_FORMAT
- || HOST_BITS_PER_WIDE_INT != BITS_PER_WORD)
- && ! flag_pretend_float)
- abort ();
-
- if (
-#ifdef HOST_WORDS_BIG_ENDIAN
- WORDS_BIG_ENDIAN
-#else
- ! WORDS_BIG_ENDIAN
-#endif
- )
- {
- /* Host and target agree => no need to swap. */
- *first = GEN_INT (CONST_DOUBLE_LOW (value));
- *second = GEN_INT (CONST_DOUBLE_HIGH (value));
- }
- else
- {
- *second = GEN_INT (CONST_DOUBLE_LOW (value));
- *first = GEN_INT (CONST_DOUBLE_HIGH (value));
- }
-#endif /* no REAL_ARITHMETIC */
}
}
@@ -3315,141 +2988,3 @@ leaf_function_p ()
return 1;
}
-
-/* On some machines, a function with no call insns
- can run faster if it doesn't create its own register window.
- When output, the leaf function should use only the "output"
- registers. Ordinarily, the function would be compiled to use
- the "input" registers to find its arguments; it is a candidate
- for leaf treatment if it uses only the "input" registers.
- Leaf function treatment means renumbering so the function
- uses the "output" registers instead. */
-
-#ifdef LEAF_REGISTERS
-
-static char permitted_reg_in_leaf_functions[] = LEAF_REGISTERS;
-
-/* Return 1 if this function uses only the registers that can be
- safely renumbered. */
-
-int
-only_leaf_regs_used ()
-{
- int i;
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if ((regs_ever_live[i] || global_regs[i])
- && ! permitted_reg_in_leaf_functions[i])
- return 0;
-
- if (current_function_uses_pic_offset_table
- && pic_offset_table_rtx != 0
- && GET_CODE (pic_offset_table_rtx) == REG
- && ! permitted_reg_in_leaf_functions[REGNO (pic_offset_table_rtx)])
- return 0;
-
- return 1;
-}
-
-/* Scan all instructions and renumber all registers into those
- available in leaf functions. */
-
-static void
-leaf_renumber_regs (first)
- rtx first;
-{
- rtx insn;
-
- /* Renumber only the actual patterns.
- The reg-notes can contain frame pointer refs,
- and renumbering them could crash, and should not be needed. */
- for (insn = first; insn; insn = NEXT_INSN (insn))
- if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
- leaf_renumber_regs_insn (PATTERN (insn));
- for (insn = current_function_epilogue_delay_list; insn; insn = XEXP (insn, 1))
- if (GET_RTX_CLASS (GET_CODE (XEXP (insn, 0))) == 'i')
- leaf_renumber_regs_insn (PATTERN (XEXP (insn, 0)));
-}
-
-/* Scan IN_RTX and its subexpressions, and renumber all regs into those
- available in leaf functions. */
-
-void
-leaf_renumber_regs_insn (in_rtx)
- register rtx in_rtx;
-{
- register int i, j;
- register char *format_ptr;
-
- if (in_rtx == 0)
- return;
-
- /* Renumber all input-registers into output-registers.
- renumbered_regs would be 1 for an output-register;
- they */
-
- if (GET_CODE (in_rtx) == REG)
- {
- int newreg;
-
- /* Don't renumber the same reg twice. */
- if (in_rtx->used)
- return;
-
- newreg = REGNO (in_rtx);
- /* Don't try to renumber pseudo regs. It is possible for a pseudo reg
- to reach here as part of a REG_NOTE. */
- if (newreg >= FIRST_PSEUDO_REGISTER)
- {
- in_rtx->used = 1;
- return;
- }
- newreg = LEAF_REG_REMAP (newreg);
- if (newreg < 0)
- abort ();
- regs_ever_live[REGNO (in_rtx)] = 0;
- regs_ever_live[newreg] = 1;
- REGNO (in_rtx) = newreg;
- in_rtx->used = 1;
- }
-
- if (GET_RTX_CLASS (GET_CODE (in_rtx)) == 'i')
- {
- /* Inside a SEQUENCE, we find insns.
- Renumber just the patterns of these insns,
- just as we do for the top-level insns. */
- leaf_renumber_regs_insn (PATTERN (in_rtx));
- return;
- }
-
- format_ptr = GET_RTX_FORMAT (GET_CODE (in_rtx));
-
- for (i = 0; i < GET_RTX_LENGTH (GET_CODE (in_rtx)); i++)
- switch (*format_ptr++)
- {
- case 'e':
- leaf_renumber_regs_insn (XEXP (in_rtx, i));
- break;
-
- case 'E':
- if (NULL != XVEC (in_rtx, i))
- {
- for (j = 0; j < XVECLEN (in_rtx, i); j++)
- leaf_renumber_regs_insn (XVECEXP (in_rtx, i, j));
- }
- break;
-
- case 'S':
- case 's':
- case '0':
- case 'i':
- case 'w':
- case 'n':
- case 'u':
- break;
-
- default:
- abort ();
- }
-}
-#endif
diff --git a/gcc/function.c b/gcc/function.c
index 6e37df4..10f8760 100755
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -4298,16 +4298,6 @@ assign_parms (fndecl, second_time)
}
#endif /* 0 */
-#ifdef STACK_REGS
- /* We need this "use" info, because the gcc-register->stack-register
- converter in reg-stack.c needs to know which registers are active
- at the start of the function call. The actual parameter loading
- instructions are not always available then anymore, since they might
- have been optimised away. */
-
- if (GET_CODE (entry_parm) == REG && !(hide_last_arg && last_named))
- emit_insn (gen_rtx_USE (GET_MODE (entry_parm), entry_parm));
-#endif
/* ENTRY_PARM is an RTX for the parameter as it arrives,
in the mode in which it arrives.
diff --git a/gcc/gcse.c b/gcc/gcse.c
index e8864ef..115a4ba 100755
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -2215,15 +2215,6 @@ compute_hash_table (set_p)
insn && insn != NEXT_INSN (BLOCK_END (bb));
insn = NEXT_INSN (insn))
{
-#ifdef NON_SAVING_SETJMP
- if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE
- && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
- {
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- record_last_reg_set_info (insn, regno);
- continue;
- }
-#endif
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
continue;
diff --git a/gcc/global.c b/gcc/global.c
index d31f615..715ee2c 100755
--- a/gcc/global.c
+++ b/gcc/global.c
@@ -689,14 +689,6 @@ global_conflicts ()
record_conflicts (block_start_allocnos, ax);
-#ifdef STACK_REGS
- /* Pseudos can't go in stack regs at the start of a basic block
- that can be reached through a computed goto, since reg-stack
- can't handle computed gotos. */
- if (basic_block_computed_jump_target[b])
- for (ax = FIRST_STACK_REG; ax <= LAST_STACK_REG; ax++)
- record_one_conflict (ax);
-#endif
}
insn = BLOCK_HEAD (b);
diff --git a/gcc/jump.c b/gcc/jump.c
index 85452dd..55b6517 100755
--- a/gcc/jump.c
+++ b/gcc/jump.c
@@ -2851,41 +2851,6 @@ find_cross_jump (e1, e2, minimum, f1, f2)
CALL_INSN_FUNCTION_USAGE (i2)))
lose = 1;
-#ifdef STACK_REGS
- /* If cross_jump_death_matters is not 0, the insn's mode
- indicates whether or not the insn contains any stack-like
- regs. */
-
- if (!lose && cross_jump_death_matters && GET_MODE (i1) == QImode)
- {
- /* If register stack conversion has already been done, then
- death notes must also be compared before it is certain that
- the two instruction streams match. */
-
- rtx note;
- HARD_REG_SET i1_regset, i2_regset;
-
- CLEAR_HARD_REG_SET (i1_regset);
- CLEAR_HARD_REG_SET (i2_regset);
-
- for (note = REG_NOTES (i1); note; note = XEXP (note, 1))
- if (REG_NOTE_KIND (note) == REG_DEAD
- && STACK_REG_P (XEXP (note, 0)))
- SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0)));
-
- for (note = REG_NOTES (i2); note; note = XEXP (note, 1))
- if (REG_NOTE_KIND (note) == REG_DEAD
- && STACK_REG_P (XEXP (note, 0)))
- SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0)));
-
- GO_IF_HARD_REG_EQUAL (i1_regset, i2_regset, done);
-
- lose = 1;
-
- done:
- ;
- }
-#endif
/* Don't allow old-style asm or volatile extended asms to be accepted
for cross jumping purposes. It is conceptually correct to allow
diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c
deleted file mode 100755
index b8fb48e..0000000
--- a/gcc/reg-stack.c
+++ /dev/null
@@ -1,2931 +0,0 @@
-/* Register to Stack convert for GNU compiler.
- Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
-
-This file is part of GNU CC.
-
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
-
-/* This pass converts stack-like registers from the "flat register
- file" model that gcc uses, to a stack convention that the 387 uses.
-
- * The form of the input:
-
- On input, the function consists of insn that have had their
- registers fully allocated to a set of "virtual" registers. Note that
- the word "virtual" is used differently here than elsewhere in gcc: for
- each virtual stack reg, there is a hard reg, but the mapping between
- them is not known until this pass is run. On output, hard register
- numbers have been substituted, and various pop and exchange insns have
- been emitted. The hard register numbers and the virtual register
- numbers completely overlap - before this pass, all stack register
- numbers are virtual, and afterward they are all hard.
-
- The virtual registers can be manipulated normally by gcc, and their
- semantics are the same as for normal registers. After the hard
- register numbers are substituted, the semantics of an insn containing
- stack-like regs are not the same as for an insn with normal regs: for
- instance, it is not safe to delete an insn that appears to be a no-op
- move. In general, no insn containing hard regs should be changed
- after this pass is done.
-
- * The form of the output:
-
- After this pass, hard register numbers represent the distance from
- the current top of stack to the desired register. A reference to
- FIRST_STACK_REG references the top of stack, FIRST_STACK_REG + 1,
- represents the register just below that, and so forth. Also, REG_DEAD
- notes indicate whether or not a stack register should be popped.
-
- A "swap" insn looks like a parallel of two patterns, where each
- pattern is a SET: one sets A to B, the other B to A.
-
- A "push" or "load" insn is a SET whose SET_DEST is FIRST_STACK_REG
- and whose SET_DEST is REG or MEM. Any other SET_DEST, such as PLUS,
- will replace the existing stack top, not push a new value.
-
- A store insn is a SET whose SET_DEST is FIRST_STACK_REG, and whose
- SET_SRC is REG or MEM.
-
- The case where the SET_SRC and SET_DEST are both FIRST_STACK_REG
- appears ambiguous. As a special case, the presence of a REG_DEAD note
- for FIRST_STACK_REG differentiates between a load insn and a pop.
-
- If a REG_DEAD is present, the insn represents a "pop" that discards
- the top of the register stack. If there is no REG_DEAD note, then the
- insn represents a "dup" or a push of the current top of stack onto the
- stack.
-
- * Methodology:
-
- Existing REG_DEAD and REG_UNUSED notes for stack registers are
- deleted and recreated from scratch. REG_DEAD is never created for a
- SET_DEST, only REG_UNUSED.
-
- Before life analysis, the mode of each insn is set based on whether
- or not any stack registers are mentioned within that insn. VOIDmode
- means that no regs are mentioned anyway, and QImode means that at
- least one pattern within the insn mentions stack registers. This
- information is valid until after reg_to_stack returns, and is used
- from jump_optimize.
-
- * asm_operands:
-
- There are several rules on the usage of stack-like regs in
- asm_operands insns. These rules apply only to the operands that are
- stack-like regs:
-
- 1. Given a set of input regs that die in an asm_operands, it is
- necessary to know which are implicitly popped by the asm, and
- which must be explicitly popped by gcc.
-
- An input reg that is implicitly popped by the asm must be
- explicitly clobbered, unless it is constrained to match an
- output operand.
-
- 2. For any input reg that is implicitly popped by an asm, it is
- necessary to know how to adjust the stack to compensate for the pop.
- If any non-popped input is closer to the top of the reg-stack than
- the implicitly popped reg, it would not be possible to know what the
- stack looked like - it's not clear how the rest of the stack "slides
- up".
-
- All implicitly popped input regs must be closer to the top of
- the reg-stack than any input that is not implicitly popped.
-
- 3. It is possible that if an input dies in an insn, reload might
- use the input reg for an output reload. Consider this example:
-
- asm ("foo" : "=t" (a) : "f" (b));
-
- This asm says that input B is not popped by the asm, and that
- the asm pushes a result onto the reg-stack, ie, the stack is one
- deeper after the asm than it was before. But, it is possible that
- reload will think that it can use the same reg for both the input and
- the output, if input B dies in this insn.
-
- If any input operand uses the "f" constraint, all output reg
- constraints must use the "&" earlyclobber.
-
- The asm above would be written as
-
- asm ("foo" : "=&t" (a) : "f" (b));
-
- 4. Some operands need to be in particular places on the stack. All
- output operands fall in this category - there is no other way to
- know which regs the outputs appear in unless the user indicates
- this in the constraints.
-
- Output operands must specifically indicate which reg an output
- appears in after an asm. "=f" is not allowed: the operand
- constraints must select a class with a single reg.
-
- 5. Output operands may not be "inserted" between existing stack regs.
- Since no 387 opcode uses a read/write operand, all output operands
- are dead before the asm_operands, and are pushed by the asm_operands.
- It makes no sense to push anywhere but the top of the reg-stack.
-
- Output operands must start at the top of the reg-stack: output
- operands may not "skip" a reg.
-
- 6. Some asm statements may need extra stack space for internal
- calculations. This can be guaranteed by clobbering stack registers
- unrelated to the inputs and outputs.
-
- Here are a couple of reasonable asms to want to write. This asm
- takes one input, which is internally popped, and produces two outputs.
-
- asm ("fsincos" : "=t" (cos), "=u" (sin) : "0" (inp));
-
- This asm takes two inputs, which are popped by the fyl2xp1 opcode,
- and replaces them with one output. The user must code the "st(1)"
- clobber for reg-stack.c to know that fyl2xp1 pops both inputs.
-
- asm ("fyl2xp1" : "=t" (result) : "0" (x), "u" (y) : "st(1)");
-
- */
-
-#include "config.h"
-#include "system.h"
-#include "tree.h"
-#include "rtl.h"
-#include "insn-config.h"
-#include "regs.h"
-#include "hard-reg-set.h"
-#include "flags.h"
-#include "insn-flags.h"
-#include "recog.h"
-#include "toplev.h"
-
-#ifdef STACK_REGS
-
-#define REG_STACK_SIZE (LAST_STACK_REG - FIRST_STACK_REG + 1)
-
-/* This is the basic stack record. TOP is an index into REG[] such
- that REG[TOP] is the top of stack. If TOP is -1 the stack is empty.
-
- If TOP is -2, REG[] is not yet initialized. Stack initialization
- consists of placing each live reg in array `reg' and setting `top'
- appropriately.
-
- REG_SET indicates which registers are live. */
-
-typedef struct stack_def
-{
- int top; /* index to top stack element */
- HARD_REG_SET reg_set; /* set of live registers */
- char reg[REG_STACK_SIZE]; /* register - stack mapping */
-} *stack;
-
-/* highest instruction uid */
-static int max_uid = 0;
-
-/* Number of basic blocks in the current function. */
-static int blocks;
-
-/* Element N is first insn in basic block N.
- This info lasts until we finish compiling the function. */
-static rtx *block_begin;
-
-/* Element N is last insn in basic block N.
- This info lasts until we finish compiling the function. */
-static rtx *block_end;
-
-/* Element N is nonzero if control can drop into basic block N */
-static char *block_drops_in;
-
-/* Element N says all about the stack at entry block N */
-static stack block_stack_in;
-
-/* Element N says all about the stack life at the end of block N */
-static HARD_REG_SET *block_out_reg_set;
-
-/* This is where the BLOCK_NUM values are really stored. This is set
- up by find_blocks and used there and in life_analysis. It can be used
- later, but only to look up an insn that is the head or tail of some
- block. life_analysis and the stack register conversion process can
- add insns within a block. */
-static int *block_number;
-
-/* This is the register file for all register after conversion */
-static rtx
- FP_mode_reg[LAST_STACK_REG+1-FIRST_STACK_REG][(int) MAX_MACHINE_MODE];
-
-#define FP_MODE_REG(regno,mode) \
- (FP_mode_reg[(regno)-FIRST_STACK_REG][(int)(mode)])
-
-/* Get the basic block number of an insn. See note at block_number
- definition are validity of this information. */
-
-#define BLOCK_NUM(INSN) \
- ((INSN_UID (INSN) > max_uid) \
- ? (abort() , -1) : block_number[INSN_UID (INSN)])
-
-extern rtx forced_labels;
-
-/* Forward declarations */
-
-static void mark_regs_pat PROTO((rtx, HARD_REG_SET *));
-static void straighten_stack PROTO((rtx, stack));
-static void pop_stack PROTO((stack, int));
-static void record_label_references PROTO((rtx, rtx));
-static rtx *get_true_reg PROTO((rtx *));
-
-static void record_asm_reg_life PROTO((rtx, stack));
-static void record_reg_life_pat PROTO((rtx, HARD_REG_SET *,
- HARD_REG_SET *, int));
-static int get_asm_operand_n_inputs PROTO((rtx));
-static void record_reg_life PROTO((rtx, int, stack));
-static void find_blocks PROTO((rtx));
-static rtx stack_result PROTO((tree));
-static void stack_reg_life_analysis PROTO((rtx, HARD_REG_SET *));
-static void replace_reg PROTO((rtx *, int));
-static void remove_regno_note PROTO((rtx, enum reg_note, int));
-static int get_hard_regnum PROTO((stack, rtx));
-static void delete_insn_for_stacker PROTO((rtx));
-static rtx emit_pop_insn PROTO((rtx, stack, rtx, rtx (*) ()));
-static void emit_swap_insn PROTO((rtx, stack, rtx));
-static void move_for_stack_reg PROTO((rtx, stack, rtx));
-static void swap_rtx_condition PROTO((rtx));
-static void compare_for_stack_reg PROTO((rtx, stack, rtx));
-static void subst_stack_regs_pat PROTO((rtx, stack, rtx));
-static void subst_asm_stack_regs PROTO((rtx, stack));
-static void subst_stack_regs PROTO((rtx, stack));
-static void change_stack PROTO((rtx, stack, stack, rtx (*) ()));
-
-static void goto_block_pat PROTO((rtx, stack, rtx));
-static void convert_regs PROTO((void));
-static void print_blocks PROTO((FILE *, rtx, rtx));
-static void dump_stack_info PROTO((FILE *));
-
-/* Mark all registers needed for this pattern. */
-
-static void
-mark_regs_pat (pat, set)
- rtx pat;
- HARD_REG_SET *set;
-{
- enum machine_mode mode;
- register int regno;
- register int count;
-
- if (GET_CODE (pat) == SUBREG)
- {
- mode = GET_MODE (pat);
- regno = SUBREG_WORD (pat);
- regno += REGNO (SUBREG_REG (pat));
- }
- else
- regno = REGNO (pat), mode = GET_MODE (pat);
-
- for (count = HARD_REGNO_NREGS (regno, mode);
- count; count--, regno++)
- SET_HARD_REG_BIT (*set, regno);
-}
-
-/* Reorganise the stack into ascending numbers,
- after this insn. */
-
-static void
-straighten_stack (insn, regstack)
- rtx insn;
- stack regstack;
-{
- struct stack_def temp_stack;
- int top;
-
- /* If there is only a single register on the stack, then the stack is
- already in increasing order and no reorganization is needed.
-
- Similarly if the stack is empty. */
- if (regstack->top <= 0)
- return;
-
- temp_stack.reg_set = regstack->reg_set;
-
- for (top = temp_stack.top = regstack->top; top >= 0; top--)
- temp_stack.reg[top] = FIRST_STACK_REG + temp_stack.top - top;
-
- change_stack (insn, regstack, &temp_stack, emit_insn_after);
-}
-
-/* Pop a register from the stack */
-
-static void
-pop_stack (regstack, regno)
- stack regstack;
- int regno;
-{
- int top = regstack->top;
-
- CLEAR_HARD_REG_BIT (regstack->reg_set, regno);
- regstack->top--;
- /* If regno was not at the top of stack then adjust stack */
- if (regstack->reg [top] != regno)
- {
- int i;
- for (i = regstack->top; i >= 0; i--)
- if (regstack->reg [i] == regno)
- {
- int j;
- for (j = i; j < top; j++)
- regstack->reg [j] = regstack->reg [j + 1];
- break;
- }
- }
-}
-
-/* Return non-zero if any stack register is mentioned somewhere within PAT. */
-
-int
-stack_regs_mentioned_p (pat)
- rtx pat;
-{
- register char *fmt;
- register int i;
-
- if (STACK_REG_P (pat))
- return 1;
-
- fmt = GET_RTX_FORMAT (GET_CODE (pat));
- for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'E')
- {
- register int j;
-
- for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
- if (stack_regs_mentioned_p (XVECEXP (pat, i, j)))
- return 1;
- }
- else if (fmt[i] == 'e' && stack_regs_mentioned_p (XEXP (pat, i)))
- return 1;
- }
-
- return 0;
-}
-
-/* Convert register usage from "flat" register file usage to a "stack
- register file. FIRST is the first insn in the function, FILE is the
- dump file, if used.
-
- First compute the beginning and end of each basic block. Do a
- register life analysis on the stack registers, recording the result
- for the head and tail of each basic block. The convert each insn one
- by one. Run a last jump_optimize() pass, if optimizing, to eliminate
- any cross-jumping created when the converter inserts pop insns.*/
-
-void
-reg_to_stack (first, file)
- rtx first;
- FILE *file;
-{
- register rtx insn;
- register int i;
- int stack_reg_seen = 0;
- enum machine_mode mode;
- HARD_REG_SET stackentry;
-
- CLEAR_HARD_REG_SET (stackentry);
-
- {
- static int initialised;
- if (!initialised)
- {
-#if 0
- initialised = 1; /* This array can not have been previously
- initialised, because the rtx's are
- thrown away between compilations of
- functions. */
-#endif
- for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
- {
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_FLOAT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
- FP_MODE_REG (i, mode) = gen_rtx_REG (mode, i);
- for (mode = GET_CLASS_NARROWEST_MODE (MODE_COMPLEX_FLOAT); mode != VOIDmode;
- mode = GET_MODE_WIDER_MODE (mode))
- FP_MODE_REG (i, mode) = gen_rtx_REG (mode, i);
- }
- }
- }
-
- /* Count the basic blocks. Also find maximum insn uid. */
- {
- register RTX_CODE prev_code = BARRIER;
- register RTX_CODE code;
- register int before_function_beg = 1;
-
- max_uid = 0;
- blocks = 0;
- for (insn = first; insn; insn = NEXT_INSN (insn))
- {
- /* Note that this loop must select the same block boundaries
- as code in find_blocks. Also note that this code is not the
- same as that used in flow.c. */
-
- if (INSN_UID (insn) > max_uid)
- max_uid = INSN_UID (insn);
-
- code = GET_CODE (insn);
-
- if (code == CODE_LABEL
- || (prev_code != INSN
- && prev_code != CALL_INSN
- && prev_code != CODE_LABEL
- && GET_RTX_CLASS (code) == 'i'))
- blocks++;
-
- if (code == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_FUNCTION_BEG)
- before_function_beg = 0;
-
- /* Remember whether or not this insn mentions an FP regs.
- Check JUMP_INSNs too, in case someone creates a funny PARALLEL. */
-
- if (GET_RTX_CLASS (code) == 'i'
- && stack_regs_mentioned_p (PATTERN (insn)))
- {
- stack_reg_seen = 1;
- PUT_MODE (insn, QImode);
-
- /* Note any register passing parameters. */
-
- if (before_function_beg && code == INSN
- && GET_CODE (PATTERN (insn)) == USE)
- record_reg_life_pat (PATTERN (insn), (HARD_REG_SET *) 0,
- &stackentry, 1);
- }
- else
- PUT_MODE (insn, VOIDmode);
-
- if (code == CODE_LABEL)
- LABEL_REFS (insn) = insn; /* delete old chain */
-
- if (code != NOTE)
- prev_code = code;
- }
- }
-
- /* If no stack register reference exists in this insn, there isn't
- anything to convert. */
-
- if (! stack_reg_seen)
- return;
-
- /* If there are stack registers, there must be at least one block. */
-
- if (! blocks)
- abort ();
-
- /* Allocate some tables that last till end of compiling this function
- and some needed only in find_blocks and life_analysis. */
-
- block_begin = (rtx *) alloca (blocks * sizeof (rtx));
- block_end = (rtx *) alloca (blocks * sizeof (rtx));
- block_drops_in = (char *) alloca (blocks);
-
- block_stack_in = (stack) alloca (blocks * sizeof (struct stack_def));
- block_out_reg_set = (HARD_REG_SET *) alloca (blocks * sizeof (HARD_REG_SET));
- zero_memory ((char *) block_stack_in, blocks * sizeof (struct stack_def));
- zero_memory ((char *) block_out_reg_set, blocks * sizeof (HARD_REG_SET));
-
- block_number = (int *) alloca ((max_uid + 1) * sizeof (int));
-
- find_blocks (first);
- stack_reg_life_analysis (first, &stackentry);
-
- /* Dump the life analysis debug information before jump
- optimization, as that will destroy the LABEL_REFS we keep the
- information in. */
-
- if (file)
- dump_stack_info (file);
-
- convert_regs ();
-
- if (optimize)
- jump_optimize (first, 2, 0, 0);
-}
-
-/* Check PAT, which is in INSN, for LABEL_REFs. Add INSN to the
- label's chain of references, and note which insn contains each
- reference. */
-
-static void
-record_label_references (insn, pat)
- rtx insn, pat;
-{
- register enum rtx_code code = GET_CODE (pat);
- register int i;
- register char *fmt;
-
- if (code == LABEL_REF)
- {
- register rtx label = XEXP (pat, 0);
- register rtx ref;
-
- if (GET_CODE (label) != CODE_LABEL)
- abort ();
-
- /* If this is an undefined label, LABEL_REFS (label) contains
- garbage. */
- if (INSN_UID (label) == 0)
- return;
-
- /* Don't make a duplicate in the code_label's chain. */
-
- for (ref = LABEL_REFS (label);
- ref && ref != label;
- ref = LABEL_NEXTREF (ref))
- if (CONTAINING_INSN (ref) == insn)
- return;
-
- CONTAINING_INSN (pat) = insn;
- LABEL_NEXTREF (pat) = LABEL_REFS (label);
- LABEL_REFS (label) = pat;
-
- return;
- }
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- record_label_references (insn, XEXP (pat, i));
- if (fmt[i] == 'E')
- {
- register int j;
- for (j = 0; j < XVECLEN (pat, i); j++)
- record_label_references (insn, XVECEXP (pat, i, j));
- }
- }
-}
-
-/* Return a pointer to the REG expression within PAT. If PAT is not a
- REG, possible enclosed by a conversion rtx, return the inner part of
- PAT that stopped the search. */
-
-static rtx *
-get_true_reg (pat)
- rtx *pat;
-{
- for (;;)
- switch (GET_CODE (*pat))
- {
- case SUBREG:
- /* eliminate FP subregister accesses in favour of the
- actual FP register in use. */
- {
- rtx subreg;
- if (FP_REG_P (subreg = SUBREG_REG (*pat)))
- {
- *pat = FP_MODE_REG (REGNO (subreg) + SUBREG_WORD (*pat),
- GET_MODE (subreg));
- default:
- return pat;
- }
- }
- case FLOAT:
- case FIX:
- case FLOAT_EXTEND:
- pat = & XEXP (*pat, 0);
- }
-}
-
-/* Record the life info of each stack reg in INSN, updating REGSTACK.
- N_INPUTS is the number of inputs; N_OUTPUTS the outputs.
- OPERANDS is an array of all operands for the insn, and is assumed to
- contain all output operands, then all inputs operands.
-
- There are many rules that an asm statement for stack-like regs must
- follow. Those rules are explained at the top of this file: the rule
- numbers below refer to that explanation. */
-
-static void
-record_asm_reg_life (insn, regstack)
- rtx insn;
- stack regstack;
-{
- int i;
- int n_clobbers;
- int malformed_asm = 0;
- rtx body = PATTERN (insn);
-
- int reg_used_as_output[FIRST_PSEUDO_REGISTER];
- int implicitly_dies[FIRST_PSEUDO_REGISTER];
- int alt;
-
- rtx *clobber_reg;
- int n_inputs, n_outputs;
-
- /* Find out what the constraints require. If no constraint
- alternative matches, this asm is malformed. */
- extract_insn (insn);
- constrain_operands (1);
- alt = which_alternative;
-
- preprocess_constraints ();
-
- n_inputs = get_asm_operand_n_inputs (body);
- n_outputs = recog_n_operands - n_inputs;
-
- if (alt < 0)
- {
- malformed_asm = 1;
- /* Avoid further trouble with this insn. */
- PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
- PUT_MODE (insn, VOIDmode);
- return;
- }
-
- /* Strip SUBREGs here to make the following code simpler. */
- for (i = 0; i < recog_n_operands; i++)
- if (GET_CODE (recog_operand[i]) == SUBREG
- && GET_CODE (SUBREG_REG (recog_operand[i])) == REG)
- recog_operand[i] = SUBREG_REG (recog_operand[i]);
-
- /* Set up CLOBBER_REG. */
-
- n_clobbers = 0;
-
- if (GET_CODE (body) == PARALLEL)
- {
- clobber_reg = (rtx *) alloca (XVECLEN (body, 0) * sizeof (rtx));
-
- for (i = 0; i < XVECLEN (body, 0); i++)
- if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
- {
- rtx clobber = XVECEXP (body, 0, i);
- rtx reg = XEXP (clobber, 0);
-
- if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
- reg = SUBREG_REG (reg);
-
- if (STACK_REG_P (reg))
- {
- clobber_reg[n_clobbers] = reg;
- n_clobbers++;
- }
- }
- }
-
- /* Enforce rule #4: Output operands must specifically indicate which
- reg an output appears in after an asm. "=f" is not allowed: the
- operand constraints must select a class with a single reg.
-
- Also enforce rule #5: Output operands must start at the top of
- the reg-stack: output operands may not "skip" a reg. */
-
- zero_memory ((char *) reg_used_as_output, sizeof (reg_used_as_output));
- for (i = 0; i < n_outputs; i++)
- if (STACK_REG_P (recog_operand[i]))
- {
- if (reg_class_size[(int) recog_op_alt[i][alt].class] != 1)
- {
- error_for_asm (insn, "Output constraint %d must specify a single register", i);
- malformed_asm = 1;
- }
- else
- reg_used_as_output[REGNO (recog_operand[i])] = 1;
- }
-
-
- /* Search for first non-popped reg. */
- for (i = FIRST_STACK_REG; i < LAST_STACK_REG + 1; i++)
- if (! reg_used_as_output[i])
- break;
-
- /* If there are any other popped regs, that's an error. */
- for (; i < LAST_STACK_REG + 1; i++)
- if (reg_used_as_output[i])
- break;
-
- if (i != LAST_STACK_REG + 1)
- {
- error_for_asm (insn, "Output regs must be grouped at top of stack");
- malformed_asm = 1;
- }
-
- /* Enforce rule #2: All implicitly popped input regs must be closer
- to the top of the reg-stack than any input that is not implicitly
- popped. */
-
- zero_memory ((char *) implicitly_dies, sizeof (implicitly_dies));
- for (i = n_outputs; i < n_outputs + n_inputs; i++)
- if (STACK_REG_P (recog_operand[i]))
- {
- /* An input reg is implicitly popped if it is tied to an
- output, or if there is a CLOBBER for it. */
- int j;
-
- for (j = 0; j < n_clobbers; j++)
- if (operands_match_p (clobber_reg[j], recog_operand[i]))
- break;
-
- if (j < n_clobbers || recog_op_alt[i][alt].matches >= 0)
- implicitly_dies[REGNO (recog_operand[i])] = 1;
- }
-
- /* Search for first non-popped reg. */
- for (i = FIRST_STACK_REG; i < LAST_STACK_REG + 1; i++)
- if (! implicitly_dies[i])
- break;
-
- /* If there are any other popped regs, that's an error. */
- for (; i < LAST_STACK_REG + 1; i++)
- if (implicitly_dies[i])
- break;
-
- if (i != LAST_STACK_REG + 1)
- {
- error_for_asm (insn,
- "Implicitly popped regs must be grouped at top of stack");
- malformed_asm = 1;
- }
-
- /* Enfore rule #3: If any input operand uses the "f" constraint, all
- output constraints must use the "&" earlyclobber.
-
- ??? Detect this more deterministically by having constraint_asm_operands
- record any earlyclobber. */
-
- for (i = n_outputs; i < n_outputs + n_inputs; i++)
- if (recog_op_alt[i][alt].matches == -1)
- {
- int j;
-
- for (j = 0; j < n_outputs; j++)
- if (operands_match_p (recog_operand[j], recog_operand[i]))
- {
- error_for_asm (insn,
- "Output operand %d must use `&' constraint", j);
- malformed_asm = 1;
- }
- }
-
- if (malformed_asm)
- {
- /* Avoid further trouble with this insn. */
- PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
- PUT_MODE (insn, VOIDmode);
- return;
- }
-
- /* Process all outputs */
- for (i = 0; i < n_outputs; i++)
- {
- rtx op = recog_operand[i];
-
- if (! STACK_REG_P (op))
- {
- if (stack_regs_mentioned_p (op))
- abort ();
- else
- continue;
- }
-
- /* Each destination is dead before this insn. If the
- destination is not used after this insn, record this with
- REG_UNUSED. */
-
- if (! TEST_HARD_REG_BIT (regstack->reg_set, REGNO (op)))
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_UNUSED, op,
- REG_NOTES (insn));
-
- CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (op));
- }
-
- /* Process all inputs */
- for (i = n_outputs; i < n_outputs + n_inputs; i++)
- {
- rtx op = recog_operand[i];
- if (! STACK_REG_P (op))
- {
- if (stack_regs_mentioned_p (op))
- abort ();
- else
- continue;
- }
-
- /* If an input is dead after the insn, record a death note.
- But don't record a death note if there is already a death note,
- or if the input is also an output. */
-
- if (! TEST_HARD_REG_BIT (regstack->reg_set, REGNO (op))
- && recog_op_alt[i][alt].matches == -1
- && find_regno_note (insn, REG_DEAD, REGNO (op)) == NULL_RTX)
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, op, REG_NOTES (insn));
-
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (op));
- }
-}
-
-/* Scan PAT, which is part of INSN, and record registers appearing in
- a SET_DEST in DEST, and other registers in SRC.
-
- This function does not know about SET_DESTs that are both input and
- output (such as ZERO_EXTRACT) - this cannot happen on a 387. */
-
-static void
-record_reg_life_pat (pat, src, dest, douse)
- rtx pat;
- HARD_REG_SET *src, *dest;
- int douse;
-{
- register char *fmt;
- register int i;
-
- if (STACK_REG_P (pat)
- || (GET_CODE (pat) == SUBREG && STACK_REG_P (SUBREG_REG (pat))))
- {
- if (src)
- mark_regs_pat (pat, src);
-
- if (dest)
- mark_regs_pat (pat, dest);
-
- return;
- }
-
- if (GET_CODE (pat) == SET)
- {
- record_reg_life_pat (XEXP (pat, 0), NULL_PTR, dest, 0);
- record_reg_life_pat (XEXP (pat, 1), src, NULL_PTR, 0);
- return;
- }
-
- /* We don't need to consider either of these cases. */
- if ((GET_CODE (pat) == USE && !douse) || GET_CODE (pat) == CLOBBER)
- return;
-
- fmt = GET_RTX_FORMAT (GET_CODE (pat));
- for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'E')
- {
- register int j;
-
- for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
- record_reg_life_pat (XVECEXP (pat, i, j), src, dest, 0);
- }
- else if (fmt[i] == 'e')
- record_reg_life_pat (XEXP (pat, i), src, dest, 0);
- }
-}
-
-/* Calculate the number of inputs and outputs in BODY, an
- asm_operands. N_OPERANDS is the total number of operands, and
- N_INPUTS and N_OUTPUTS are pointers to ints into which the results are
- placed. */
-
-static int
-get_asm_operand_n_inputs (body)
- rtx body;
-{
- if (GET_CODE (body) == SET && GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
- return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (body));
-
- else if (GET_CODE (body) == ASM_OPERANDS)
- return ASM_OPERANDS_INPUT_LENGTH (body);
-
- else if (GET_CODE (body) == PARALLEL
- && GET_CODE (XVECEXP (body, 0, 0)) == SET)
- return ASM_OPERANDS_INPUT_LENGTH (SET_SRC (XVECEXP (body, 0, 0)));
-
- else if (GET_CODE (body) == PARALLEL
- && GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS)
- return ASM_OPERANDS_INPUT_LENGTH (XVECEXP (body, 0, 0));
-
- abort ();
-}
-
-/* Scan INSN, which is in BLOCK, and record the life & death of stack
- registers in REGSTACK. This function is called to process insns from
- the last insn in a block to the first. The actual scanning is done in
- record_reg_life_pat.
-
- If a register is live after a CALL_INSN, but is not a value return
- register for that CALL_INSN, then code is emitted to initialize that
- register. The block_end[] data is kept accurate.
-
- Existing death and unset notes for stack registers are deleted
- before processing the insn. */
-
-static void
-record_reg_life (insn, block, regstack)
- rtx insn;
- int block;
- stack regstack;
-{
- rtx note, *note_link;
- int n_operands;
-
- if ((GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
- || INSN_DELETED_P (insn))
- return;
-
- /* Strip death notes for stack regs from this insn */
-
- note_link = &REG_NOTES(insn);
- for (note = *note_link; note; note = XEXP (note, 1))
- if (STACK_REG_P (XEXP (note, 0))
- && (REG_NOTE_KIND (note) == REG_DEAD
- || REG_NOTE_KIND (note) == REG_UNUSED))
- *note_link = XEXP (note, 1);
- else
- note_link = &XEXP (note, 1);
-
- /* Process all patterns in the insn. */
-
- n_operands = asm_noperands (PATTERN (insn));
- if (n_operands >= 0)
- {
- record_asm_reg_life (insn, regstack);
- return;
- }
-
- {
- HARD_REG_SET src, dest;
- int regno;
-
- CLEAR_HARD_REG_SET (src);
- CLEAR_HARD_REG_SET (dest);
-
- if (GET_CODE (insn) == CALL_INSN)
- for (note = CALL_INSN_FUNCTION_USAGE (insn);
- note;
- note = XEXP (note, 1))
- if (GET_CODE (XEXP (note, 0)) == USE)
- record_reg_life_pat (SET_DEST (XEXP (note, 0)), &src, NULL_PTR, 0);
-
- record_reg_life_pat (PATTERN (insn), &src, &dest, 0);
- for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG; regno++)
- if (! TEST_HARD_REG_BIT (regstack->reg_set, regno))
- {
- if (TEST_HARD_REG_BIT (src, regno)
- && ! TEST_HARD_REG_BIT (dest, regno))
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD,
- FP_MODE_REG (regno, DFmode),
- REG_NOTES (insn));
- else if (TEST_HARD_REG_BIT (dest, regno))
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_UNUSED,
- FP_MODE_REG (regno, DFmode),
- REG_NOTES (insn));
- }
-
- if (GET_CODE (insn) == CALL_INSN)
- {
- int reg;
-
- /* There might be a reg that is live after a function call.
- Initialize it to zero so that the program does not crash. See
- comment towards the end of stack_reg_life_analysis(). */
-
- for (reg = FIRST_STACK_REG; reg <= LAST_STACK_REG; reg++)
- if (! TEST_HARD_REG_BIT (dest, reg)
- && TEST_HARD_REG_BIT (regstack->reg_set, reg))
- {
- rtx init, pat;
-
- /* The insn will use virtual register numbers, and so
- convert_regs is expected to process these. But BLOCK_NUM
- cannot be used on these insns, because they do not appear in
- block_number[]. */
-
- pat = gen_rtx_SET (VOIDmode, FP_MODE_REG (reg, DFmode),
- CONST0_RTX (DFmode));
- init = emit_insn_after (pat, insn);
- PUT_MODE (init, QImode);
-
- CLEAR_HARD_REG_BIT (regstack->reg_set, reg);
-
- /* If the CALL_INSN was the end of a block, move the
- block_end to point to the new insn. */
-
- if (block_end[block] == insn)
- block_end[block] = init;
- }
-
- /* Some regs do not survive a CALL */
- AND_COMPL_HARD_REG_SET (regstack->reg_set, call_used_reg_set);
- }
-
- AND_COMPL_HARD_REG_SET (regstack->reg_set, dest);
- IOR_HARD_REG_SET (regstack->reg_set, src);
- }
-}
-
-/* Find all basic blocks of the function, which starts with FIRST.
- For each JUMP_INSN, build the chain of LABEL_REFS on each CODE_LABEL. */
-
-static void
-find_blocks (first)
- rtx first;
-{
- register rtx insn;
- register int block;
- register RTX_CODE prev_code = BARRIER;
- register RTX_CODE code;
- rtx label_value_list = 0;
-
- /* Record where all the blocks start and end.
- Record which basic blocks control can drop in to. */
-
- block = -1;
- for (insn = first; insn; insn = NEXT_INSN (insn))
- {
- /* Note that this loop must select the same block boundaries
- as code in reg_to_stack, but that these are not the same
- as those selected in flow.c. */
-
- code = GET_CODE (insn);
-
- if (code == CODE_LABEL
- || (prev_code != INSN
- && prev_code != CALL_INSN
- && prev_code != CODE_LABEL
- && GET_RTX_CLASS (code) == 'i'))
- {
- block_begin[++block] = insn;
- block_end[block] = insn;
- block_drops_in[block] = prev_code != BARRIER;
- }
- else if (GET_RTX_CLASS (code) == 'i')
- block_end[block] = insn;
-
- if (GET_RTX_CLASS (code) == 'i')
- {
- rtx note;
-
- /* Make a list of all labels referred to other than by jumps. */
- for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
- if (REG_NOTE_KIND (note) == REG_LABEL)
- label_value_list = gen_rtx_EXPR_LIST (VOIDmode, XEXP (note, 0),
- label_value_list);
- }
-
- block_number[INSN_UID (insn)] = block;
-
- if (code != NOTE)
- prev_code = code;
- }
-
- if (block + 1 != blocks)
- abort ();
-
- /* generate all label references to the corresponding jump insn */
- for (block = 0; block < blocks; block++)
- {
- insn = block_end[block];
-
- if (GET_CODE (insn) == JUMP_INSN)
- {
- rtx pat = PATTERN (insn);
- rtx x;
-
- if (computed_jump_p (insn))
- {
- for (x = label_value_list; x; x = XEXP (x, 1))
- record_label_references (insn,
- gen_rtx_LABEL_REF (VOIDmode,
- XEXP (x, 0)));
-
- for (x = forced_labels; x; x = XEXP (x, 1))
- record_label_references (insn,
- gen_rtx_LABEL_REF (VOIDmode,
- XEXP (x, 0)));
- }
-
- record_label_references (insn, pat);
- }
- }
-}
-
-/* If current function returns its result in an fp stack register,
- return the REG. Otherwise, return 0. */
-
-static rtx
-stack_result (decl)
- tree decl;
-{
- rtx result = DECL_RTL (DECL_RESULT (decl));
-
- if (result != 0
- && ! (GET_CODE (result) == REG
- && REGNO (result) < FIRST_PSEUDO_REGISTER))
- {
-#ifdef FUNCTION_OUTGOING_VALUE
- result
- = FUNCTION_OUTGOING_VALUE (TREE_TYPE (DECL_RESULT (decl)), decl);
-#else
- result = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (decl)), decl);
-#endif
- }
-
- return result != 0 && STACK_REG_P (result) ? result : 0;
-}
-
-/* Determine the which registers are live at the start of each basic
- block of the function whose first insn is FIRST.
-
- First, if the function returns a real_type, mark the function
- return type as live at each return point, as the RTL may not give any
- hint that the register is live.
-
- Then, start with the last block and work back to the first block.
- Similarly, work backwards within each block, insn by insn, recording
- which regs are dead and which are used (and therefore live) in the
- hard reg set of block_stack_in[].
-
- After processing each basic block, if there is a label at the start
- of the block, propagate the live registers to all jumps to this block.
-
- As a special case, if there are regs live in this block, that are
- not live in a block containing a jump to this label, and the block
- containing the jump has already been processed, we must propagate this
- block's entry register life back to the block containing the jump, and
- restart life analysis from there.
-
- In the worst case, this function may traverse the insns
- REG_STACK_SIZE times. This is necessary, since a jump towards the end
- of the insns may not know that a reg is live at a target that is early
- in the insns. So we back up and start over with the new reg live.
-
- If there are registers that are live at the start of the function,
- insns are emitted to initialize these registers. Something similar is
- done after CALL_INSNs in record_reg_life. */
-
-static void
-stack_reg_life_analysis (first, stackentry)
- rtx first;
- HARD_REG_SET *stackentry;
-{
- int reg, block;
- struct stack_def regstack;
-
- {
- rtx retvalue;
-
- if ((retvalue = stack_result (current_function_decl)))
- {
- /* Find all RETURN insns and mark them. */
-
- for (block = blocks - 1; --block >= 0;)
- if (GET_CODE (block_end[block]) == JUMP_INSN
- && GET_CODE (PATTERN (block_end[block])) == RETURN)
- mark_regs_pat (retvalue, block_out_reg_set+block);
-
- /* Mark off the end of last block if we "fall off" the end of the
- function into the epilogue. */
-
- if (GET_CODE (block_end[blocks-1]) != JUMP_INSN
- || GET_CODE (PATTERN (block_end[blocks-1])) == RETURN)
- mark_regs_pat (retvalue, block_out_reg_set+blocks-1);
- }
- }
-
- /* now scan all blocks backward for stack register use */
-
- block = blocks - 1;
- while (block >= 0)
- {
- register rtx insn, prev;
-
- /* current register status at last instruction */
-
- COPY_HARD_REG_SET (regstack.reg_set, block_out_reg_set[block]);
-
- prev = block_end[block];
- do
- {
- insn = prev;
- prev = PREV_INSN (insn);
-
- /* If the insn is a CALL_INSN, we need to ensure that
- everything dies. But otherwise don't process unless there
- are some stack regs present. */
-
- if (GET_MODE (insn) == QImode || GET_CODE (insn) == CALL_INSN)
- record_reg_life (insn, block, &regstack);
-
- } while (insn != block_begin[block]);
-
- /* Set the state at the start of the block. Mark that no
- register mapping information known yet. */
-
- COPY_HARD_REG_SET (block_stack_in[block].reg_set, regstack.reg_set);
- block_stack_in[block].top = -2;
-
- /* If there is a label, propagate our register life to all jumps
- to this label. */
-
- if (GET_CODE (insn) == CODE_LABEL)
- {
- register rtx label;
- int must_restart = 0;
-
- for (label = LABEL_REFS (insn); label != insn;
- label = LABEL_NEXTREF (label))
- {
- int jump_block = BLOCK_NUM (CONTAINING_INSN (label));
-
- if (jump_block < block)
- IOR_HARD_REG_SET (block_out_reg_set[jump_block],
- block_stack_in[block].reg_set);
- else
- {
- /* The block containing the jump has already been
- processed. If there are registers that were not known
- to be live then, but are live now, we must back up
- and restart life analysis from that point with the new
- life information. */
-
- GO_IF_HARD_REG_SUBSET (block_stack_in[block].reg_set,
- block_out_reg_set[jump_block],
- win);
-
- IOR_HARD_REG_SET (block_out_reg_set[jump_block],
- block_stack_in[block].reg_set);
-
- block = jump_block;
- must_restart = 1;
- break;
-
- win:
- ;
- }
- }
- if (must_restart)
- continue;
- }
-
- if (block_drops_in[block])
- IOR_HARD_REG_SET (block_out_reg_set[block-1],
- block_stack_in[block].reg_set);
-
- block -= 1;
- }
-
- /* If any reg is live at the start of the first block of a
- function, then we must guarantee that the reg holds some value by
- generating our own "load" of that register. Otherwise a 387 would
- fault trying to access an empty register. */
-
- /* Load zero into each live register. The fact that a register
- appears live at the function start necessarily implies an error
- in the user program: it means that (unless the offending code is *never*
- executed) this program is using uninitialised floating point
- variables. In order to keep broken code like this happy, we initialise
- those variables with zero.
-
- Note that we are inserting virtual register references here:
- these insns must be processed by convert_regs later. Also, these
- insns will not be in block_number, so BLOCK_NUM() will fail for them. */
-
- for (reg = LAST_STACK_REG; reg >= FIRST_STACK_REG; reg--)
- if (TEST_HARD_REG_BIT (block_stack_in[0].reg_set, reg)
- && ! TEST_HARD_REG_BIT (*stackentry, reg))
- {
- rtx init_rtx;
-
- init_rtx = gen_rtx_SET (VOIDmode, FP_MODE_REG(reg, DFmode),
- CONST0_RTX (DFmode));
- block_begin[0] = emit_insn_after (init_rtx, first);
- PUT_MODE (block_begin[0], QImode);
-
- CLEAR_HARD_REG_BIT (block_stack_in[0].reg_set, reg);
- }
-}
-
-/*****************************************************************************
- This section deals with stack register substitution, and forms the second
- pass over the RTL.
- *****************************************************************************/
-
-/* Replace REG, which is a pointer to a stack reg RTX, with an RTX for
- the desired hard REGNO. */
-
-static void
-replace_reg (reg, regno)
- rtx *reg;
- int regno;
-{
- if (regno < FIRST_STACK_REG || regno > LAST_STACK_REG
- || ! STACK_REG_P (*reg))
- abort ();
-
- switch (GET_MODE_CLASS (GET_MODE (*reg)))
- {
- default: abort ();
- case MODE_FLOAT:
- case MODE_COMPLEX_FLOAT:;
- }
-
- *reg = FP_MODE_REG (regno, GET_MODE (*reg));
-}
-
-/* Remove a note of type NOTE, which must be found, for register
- number REGNO from INSN. Remove only one such note. */
-
-static void
-remove_regno_note (insn, note, regno)
- rtx insn;
- enum reg_note note;
- int regno;
-{
- register rtx *note_link, this;
-
- note_link = &REG_NOTES(insn);
- for (this = *note_link; this; this = XEXP (this, 1))
- if (REG_NOTE_KIND (this) == note
- && REG_P (XEXP (this, 0)) && REGNO (XEXP (this, 0)) == regno)
- {
- *note_link = XEXP (this, 1);
- return;
- }
- else
- note_link = &XEXP (this, 1);
-
- abort ();
-}
-
-/* Find the hard register number of virtual register REG in REGSTACK.
- The hard register number is relative to the top of the stack. -1 is
- returned if the register is not found. */
-
-static int
-get_hard_regnum (regstack, reg)
- stack regstack;
- rtx reg;
-{
- int i;
-
- if (! STACK_REG_P (reg))
- abort ();
-
- for (i = regstack->top; i >= 0; i--)
- if (regstack->reg[i] == REGNO (reg))
- break;
-
- return i >= 0 ? (FIRST_STACK_REG + regstack->top - i) : -1;
-}
-
-/* Delete INSN from the RTL. Mark the insn, but don't remove it from
- the chain of insns. Doing so could confuse block_begin and block_end
- if this were the only insn in the block. */
-
-static void
-delete_insn_for_stacker (insn)
- rtx insn;
-{
- PUT_CODE (insn, NOTE);
- NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
- NOTE_SOURCE_FILE (insn) = 0;
-}
-
-/* Emit an insn to pop virtual register REG before or after INSN.
- REGSTACK is the stack state after INSN and is updated to reflect this
- pop. WHEN is either emit_insn_before or emit_insn_after. A pop insn
- is represented as a SET whose destination is the register to be popped
- and source is the top of stack. A death note for the top of stack
- cases the movdf pattern to pop. */
-
-static rtx
-emit_pop_insn (insn, regstack, reg, when)
- rtx insn;
- stack regstack;
- rtx reg;
- rtx (*when)();
-{
- rtx pop_insn, pop_rtx;
- int hard_regno;
-
- hard_regno = get_hard_regnum (regstack, reg);
-
- if (hard_regno < FIRST_STACK_REG)
- abort ();
-
- pop_rtx = gen_rtx_SET (VOIDmode, FP_MODE_REG (hard_regno, DFmode),
- FP_MODE_REG (FIRST_STACK_REG, DFmode));
-
- pop_insn = (*when) (pop_rtx, insn);
- /* ??? This used to be VOIDmode, but that seems wrong. */
- PUT_MODE (pop_insn, QImode);
-
- REG_NOTES (pop_insn) = gen_rtx_EXPR_LIST (REG_DEAD,
- FP_MODE_REG (FIRST_STACK_REG, DFmode),
- REG_NOTES (pop_insn));
-
- regstack->reg[regstack->top - (hard_regno - FIRST_STACK_REG)]
- = regstack->reg[regstack->top];
- regstack->top -= 1;
- CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (reg));
-
- return pop_insn;
-}
-
-/* Emit an insn before or after INSN to swap virtual register REG with the
- top of stack. WHEN should be `emit_insn_before' or `emit_insn_before'
- REGSTACK is the stack state before the swap, and is updated to reflect
- the swap. A swap insn is represented as a PARALLEL of two patterns:
- each pattern moves one reg to the other.
-
- If REG is already at the top of the stack, no insn is emitted. */
-
-static void
-emit_swap_insn (insn, regstack, reg)
- rtx insn;
- stack regstack;
- rtx reg;
-{
- int hard_regno;
- rtx gen_swapdf();
- rtx swap_rtx, swap_insn;
- int tmp, other_reg; /* swap regno temps */
- rtx i1; /* the stack-reg insn prior to INSN */
- rtx i1set = NULL_RTX; /* the SET rtx within I1 */
-
- hard_regno = get_hard_regnum (regstack, reg);
-
- if (hard_regno < FIRST_STACK_REG)
- abort ();
- if (hard_regno == FIRST_STACK_REG)
- return;
-
- other_reg = regstack->top - (hard_regno - FIRST_STACK_REG);
-
- tmp = regstack->reg[other_reg];
- regstack->reg[other_reg] = regstack->reg[regstack->top];
- regstack->reg[regstack->top] = tmp;
-
- /* Find the previous insn involving stack regs, but don't go past
- any labels, calls or jumps. */
- i1 = prev_nonnote_insn (insn);
- while (i1 && GET_CODE (i1) == INSN && GET_MODE (i1) != QImode)
- i1 = prev_nonnote_insn (i1);
-
- if (i1)
- i1set = single_set (i1);
-
- if (i1set)
- {
- rtx i1src = *get_true_reg (&SET_SRC (i1set));
- rtx i1dest = *get_true_reg (&SET_DEST (i1set));
-
- /* If the previous register stack push was from the reg we are to
- swap with, omit the swap. */
-
- if (GET_CODE (i1dest) == REG && REGNO (i1dest) == FIRST_STACK_REG
- && GET_CODE (i1src) == REG && REGNO (i1src) == hard_regno - 1
- && find_regno_note (i1, REG_DEAD, FIRST_STACK_REG) == NULL_RTX)
- return;
-
- /* If the previous insn wrote to the reg we are to swap with,
- omit the swap. */
-
- if (GET_CODE (i1dest) == REG && REGNO (i1dest) == hard_regno
- && GET_CODE (i1src) == REG && REGNO (i1src) == FIRST_STACK_REG
- && find_regno_note (i1, REG_DEAD, FIRST_STACK_REG) == NULL_RTX)
- return;
- }
-
- if (GET_RTX_CLASS (GET_CODE (i1)) == 'i' && sets_cc0_p (PATTERN (i1)))
- {
- i1 = next_nonnote_insn (i1);
- if (i1 == insn)
- abort ();
- }
-
- swap_rtx = gen_swapdf (FP_MODE_REG (hard_regno, DFmode),
- FP_MODE_REG (FIRST_STACK_REG, DFmode));
- swap_insn = emit_insn_after (swap_rtx, i1);
- /* ??? This used to be VOIDmode, but that seems wrong. */
- PUT_MODE (swap_insn, QImode);
-}
-
-/* Handle a move to or from a stack register in PAT, which is in INSN.
- REGSTACK is the current stack. */
-
-static void
-move_for_stack_reg (insn, regstack, pat)
- rtx insn;
- stack regstack;
- rtx pat;
-{
- rtx *psrc = get_true_reg (&SET_SRC (pat));
- rtx *pdest = get_true_reg (&SET_DEST (pat));
- rtx src, dest;
- rtx note;
-
- src = *psrc; dest = *pdest;
-
- if (STACK_REG_P (src) && STACK_REG_P (dest))
- {
- /* Write from one stack reg to another. If SRC dies here, then
- just change the register mapping and delete the insn. */
-
- note = find_regno_note (insn, REG_DEAD, REGNO (src));
- if (note)
- {
- int i;
-
- /* If this is a no-op move, there must not be a REG_DEAD note. */
- if (REGNO (src) == REGNO (dest))
- abort ();
-
- for (i = regstack->top; i >= 0; i--)
- if (regstack->reg[i] == REGNO (src))
- break;
-
- /* The source must be live, and the dest must be dead. */
- if (i < 0 || get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
- abort ();
-
- /* It is possible that the dest is unused after this insn.
- If so, just pop the src. */
-
- if (find_regno_note (insn, REG_UNUSED, REGNO (dest)))
- {
- emit_pop_insn (insn, regstack, src, emit_insn_after);
-
- delete_insn_for_stacker (insn);
- return;
- }
-
- regstack->reg[i] = REGNO (dest);
-
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest));
- CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (src));
-
- delete_insn_for_stacker (insn);
-
- return;
- }
-
- /* The source reg does not die. */
-
- /* If this appears to be a no-op move, delete it, or else it
- will confuse the machine description output patterns. But if
- it is REG_UNUSED, we must pop the reg now, as per-insn processing
- for REG_UNUSED will not work for deleted insns. */
-
- if (REGNO (src) == REGNO (dest))
- {
- if (find_regno_note (insn, REG_UNUSED, REGNO (dest)))
- emit_pop_insn (insn, regstack, dest, emit_insn_after);
-
- delete_insn_for_stacker (insn);
- return;
- }
-
- /* The destination ought to be dead */
- if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
- abort ();
-
- replace_reg (psrc, get_hard_regnum (regstack, src));
-
- regstack->reg[++regstack->top] = REGNO (dest);
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest));
- replace_reg (pdest, FIRST_STACK_REG);
- }
- else if (STACK_REG_P (src))
- {
- /* Save from a stack reg to MEM, or possibly integer reg. Since
- only top of stack may be saved, emit an exchange first if
- needs be. */
-
- emit_swap_insn (insn, regstack, src);
-
- note = find_regno_note (insn, REG_DEAD, REGNO (src));
- if (note)
- {
- replace_reg (&XEXP (note, 0), FIRST_STACK_REG);
- regstack->top--;
- CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (src));
- }
- else if (GET_MODE (src) == XFmode && regstack->top < REG_STACK_SIZE - 1)
- {
- /* A 387 cannot write an XFmode value to a MEM without
- clobbering the source reg. The output code can handle
- this by reading back the value from the MEM.
- But it is more efficient to use a temp register if one is
- available. Push the source value here if the register
- stack is not full, and then write the value to memory via
- a pop. */
- rtx push_rtx, push_insn;
- rtx top_stack_reg = FP_MODE_REG (FIRST_STACK_REG, XFmode);
-
- push_rtx = gen_movxf (top_stack_reg, top_stack_reg);
- push_insn = emit_insn_before (push_rtx, insn);
- PUT_MODE (push_insn, QImode);
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_DEAD, top_stack_reg,
- REG_NOTES (insn));
- }
-
- replace_reg (psrc, FIRST_STACK_REG);
- }
- else if (STACK_REG_P (dest))
- {
- /* Load from MEM, or possibly integer REG or constant, into the
- stack regs. The actual target is always the top of the
- stack. The stack mapping is changed to reflect that DEST is
- now at top of stack. */
-
- /* The destination ought to be dead */
- if (get_hard_regnum (regstack, dest) >= FIRST_STACK_REG)
- abort ();
-
- if (regstack->top >= REG_STACK_SIZE)
- abort ();
-
- regstack->reg[++regstack->top] = REGNO (dest);
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (dest));
- replace_reg (pdest, FIRST_STACK_REG);
- }
- else
- abort ();
-}
-
-static void
-swap_rtx_condition (pat)
- rtx pat;
-{
- register char *fmt;
- register int i;
-
- if (GET_RTX_CLASS (GET_CODE (pat)) == '<')
- {
- PUT_CODE (pat, swap_condition (GET_CODE (pat)));
- return;
- }
-
- fmt = GET_RTX_FORMAT (GET_CODE (pat));
- for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'E')
- {
- register int j;
-
- for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
- swap_rtx_condition (XVECEXP (pat, i, j));
- }
- else if (fmt[i] == 'e')
- swap_rtx_condition (XEXP (pat, i));
- }
-}
-
-/* Handle a comparison. Special care needs to be taken to avoid
- causing comparisons that a 387 cannot do correctly, such as EQ.
-
- Also, a pop insn may need to be emitted. The 387 does have an
- `fcompp' insn that can pop two regs, but it is sometimes too expensive
- to do this - a `fcomp' followed by a `fstpl %st(0)' may be easier to
- set up. */
-
-static void
-compare_for_stack_reg (insn, regstack, pat)
- rtx insn;
- stack regstack;
- rtx pat;
-{
- rtx *src1, *src2;
- rtx src1_note, src2_note;
- rtx cc0_user;
- int have_cmove;
-
- src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
- src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
- cc0_user = next_cc0_user (insn);
-
- /* If the insn that uses cc0 is an FP-conditional move, then the destination
- must be the top of stack */
- if (GET_CODE (PATTERN (cc0_user)) == SET
- && SET_DEST (PATTERN (cc0_user)) != pc_rtx
- && GET_CODE (SET_SRC (PATTERN (cc0_user))) == IF_THEN_ELSE
- && (GET_MODE_CLASS (GET_MODE (SET_DEST (PATTERN (cc0_user))))
- == MODE_FLOAT))
- {
- rtx *dest;
-
- dest = get_true_reg (&SET_DEST (PATTERN (cc0_user)));
-
- have_cmove = 1;
- if (get_hard_regnum (regstack, *dest) >= FIRST_STACK_REG
- && REGNO (*dest) != regstack->reg[regstack->top])
- {
- emit_swap_insn (insn, regstack, *dest);
- }
- }
- else
- have_cmove = 0;
-
- /* ??? If fxch turns out to be cheaper than fstp, give priority to
- registers that die in this insn - move those to stack top first. */
- if (! STACK_REG_P (*src1)
- || (STACK_REG_P (*src2)
- && get_hard_regnum (regstack, *src2) == FIRST_STACK_REG))
- {
- rtx temp, next;
-
- temp = XEXP (SET_SRC (pat), 0);
- XEXP (SET_SRC (pat), 0) = XEXP (SET_SRC (pat), 1);
- XEXP (SET_SRC (pat), 1) = temp;
-
- src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
- src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
-
- next = next_cc0_user (insn);
- if (next == NULL_RTX)
- abort ();
-
- swap_rtx_condition (PATTERN (next));
- INSN_CODE (next) = -1;
- INSN_CODE (insn) = -1;
- }
-
- /* We will fix any death note later. */
-
- src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
-
- if (STACK_REG_P (*src2))
- src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2));
- else
- src2_note = NULL_RTX;
-
- if (! have_cmove)
- emit_swap_insn (insn, regstack, *src1);
-
- replace_reg (src1, FIRST_STACK_REG);
-
- if (STACK_REG_P (*src2))
- replace_reg (src2, get_hard_regnum (regstack, *src2));
-
- if (src1_note)
- {
- pop_stack (regstack, REGNO (XEXP (src1_note, 0)));
- replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
- }
-
- /* If the second operand dies, handle that. But if the operands are
- the same stack register, don't bother, because only one death is
- needed, and it was just handled. */
-
- if (src2_note
- && ! (STACK_REG_P (*src1) && STACK_REG_P (*src2)
- && REGNO (*src1) == REGNO (*src2)))
- {
- /* As a special case, two regs may die in this insn if src2 is
- next to top of stack and the top of stack also dies. Since
- we have already popped src1, "next to top of stack" is really
- at top (FIRST_STACK_REG) now. */
-
- if (get_hard_regnum (regstack, XEXP (src2_note, 0)) == FIRST_STACK_REG
- && src1_note)
- {
- pop_stack (regstack, REGNO (XEXP (src2_note, 0)));
- replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG + 1);
- }
- else
- {
- /* The 386 can only represent death of the first operand in
- the case handled above. In all other cases, emit a separate
- pop and remove the death note from here. */
-
- link_cc0_insns (insn);
-
- remove_regno_note (insn, REG_DEAD, REGNO (XEXP (src2_note, 0)));
-
- emit_pop_insn (insn, regstack, XEXP (src2_note, 0),
- emit_insn_after);
- }
- }
-}
-
-/* Substitute new registers in PAT, which is part of INSN. REGSTACK
- is the current register layout. */
-
-static void
-subst_stack_regs_pat (insn, regstack, pat)
- rtx insn;
- stack regstack;
- rtx pat;
-{
- rtx *dest, *src;
- rtx *src1 = (rtx *) NULL_PTR, *src2;
- rtx src1_note, src2_note;
-
- if (GET_CODE (pat) != SET)
- return;
-
- dest = get_true_reg (&SET_DEST (pat));
- src = get_true_reg (&SET_SRC (pat));
-
- /* See if this is a `movM' pattern, and handle elsewhere if so. */
-
- if (*dest != cc0_rtx
- && (STACK_REG_P (*src)
- || (STACK_REG_P (*dest)
- && (GET_CODE (*src) == REG || GET_CODE (*src) == MEM
- || GET_CODE (*src) == CONST_DOUBLE))))
- move_for_stack_reg (insn, regstack, pat);
- else
- switch (GET_CODE (SET_SRC (pat)))
- {
- case COMPARE:
- compare_for_stack_reg (insn, regstack, pat);
- break;
-
- case CALL:
- {
- int count;
- for (count = HARD_REGNO_NREGS (REGNO (*dest), GET_MODE (*dest));
- --count >= 0;)
- {
- regstack->reg[++regstack->top] = REGNO (*dest) + count;
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest) + count);
- }
- }
- replace_reg (dest, FIRST_STACK_REG);
- break;
-
- case REG:
- /* This is a `tstM2' case. */
- if (*dest != cc0_rtx)
- abort ();
-
- src1 = src;
-
- /* Fall through. */
-
- case FLOAT_TRUNCATE:
- case SQRT:
- case ABS:
- case NEG:
- /* These insns only operate on the top of the stack. DEST might
- be cc0_rtx if we're processing a tstM pattern. Also, it's
- possible that the tstM case results in a REG_DEAD note on the
- source. */
-
- if (src1 == 0)
- src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
-
- emit_swap_insn (insn, regstack, *src1);
-
- src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
-
- if (STACK_REG_P (*dest))
- replace_reg (dest, FIRST_STACK_REG);
-
- if (src1_note)
- {
- replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
- regstack->top--;
- CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src1));
- }
-
- replace_reg (src1, FIRST_STACK_REG);
-
- break;
-
- case MINUS:
- case DIV:
- /* On i386, reversed forms of subM3 and divM3 exist for
- MODE_FLOAT, so the same code that works for addM3 and mulM3
- can be used. */
- case MULT:
- case PLUS:
- /* These insns can accept the top of stack as a destination
- from a stack reg or mem, or can use the top of stack as a
- source and some other stack register (possibly top of stack)
- as a destination. */
-
- src1 = get_true_reg (&XEXP (SET_SRC (pat), 0));
- src2 = get_true_reg (&XEXP (SET_SRC (pat), 1));
-
- /* We will fix any death note later. */
-
- if (STACK_REG_P (*src1))
- src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
- else
- src1_note = NULL_RTX;
- if (STACK_REG_P (*src2))
- src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2));
- else
- src2_note = NULL_RTX;
-
- /* If either operand is not a stack register, then the dest
- must be top of stack. */
-
- if (! STACK_REG_P (*src1) || ! STACK_REG_P (*src2))
- emit_swap_insn (insn, regstack, *dest);
- else
- {
- /* Both operands are REG. If neither operand is already
- at the top of stack, choose to make the one that is the dest
- the new top of stack. */
-
- int src1_hard_regnum, src2_hard_regnum;
-
- src1_hard_regnum = get_hard_regnum (regstack, *src1);
- src2_hard_regnum = get_hard_regnum (regstack, *src2);
- if (src1_hard_regnum == -1 || src2_hard_regnum == -1)
- abort ();
-
- if (src1_hard_regnum != FIRST_STACK_REG
- && src2_hard_regnum != FIRST_STACK_REG)
- emit_swap_insn (insn, regstack, *dest);
- }
-
- if (STACK_REG_P (*src1))
- replace_reg (src1, get_hard_regnum (regstack, *src1));
- if (STACK_REG_P (*src2))
- replace_reg (src2, get_hard_regnum (regstack, *src2));
-
- if (src1_note)
- {
- /* If the register that dies is at the top of stack, then
- the destination is somewhere else - merely substitute it.
- But if the reg that dies is not at top of stack, then
- move the top of stack to the dead reg, as though we had
- done the insn and then a store-with-pop. */
-
- if (REGNO (XEXP (src1_note, 0)) == regstack->reg[regstack->top])
- {
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
- replace_reg (dest, get_hard_regnum (regstack, *dest));
- }
- else
- {
- int regno = get_hard_regnum (regstack, XEXP (src1_note, 0));
-
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
- replace_reg (dest, regno);
-
- regstack->reg[regstack->top - (regno - FIRST_STACK_REG)]
- = regstack->reg[regstack->top];
- }
-
- CLEAR_HARD_REG_BIT (regstack->reg_set,
- REGNO (XEXP (src1_note, 0)));
- replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
- regstack->top--;
- }
- else if (src2_note)
- {
- if (REGNO (XEXP (src2_note, 0)) == regstack->reg[regstack->top])
- {
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
- replace_reg (dest, get_hard_regnum (regstack, *dest));
- }
- else
- {
- int regno = get_hard_regnum (regstack, XEXP (src2_note, 0));
-
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
- replace_reg (dest, regno);
-
- regstack->reg[regstack->top - (regno - FIRST_STACK_REG)]
- = regstack->reg[regstack->top];
- }
-
- CLEAR_HARD_REG_BIT (regstack->reg_set,
- REGNO (XEXP (src2_note, 0)));
- replace_reg (&XEXP (src2_note, 0), FIRST_STACK_REG);
- regstack->top--;
- }
- else
- {
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
- replace_reg (dest, get_hard_regnum (regstack, *dest));
- }
-
- break;
-
- case UNSPEC:
- switch (XINT (SET_SRC (pat), 1))
- {
- case 1: /* sin */
- case 2: /* cos */
- /* These insns only operate on the top of the stack. */
-
- src1 = get_true_reg (&XVECEXP (SET_SRC (pat), 0, 0));
-
- emit_swap_insn (insn, regstack, *src1);
-
- src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
-
- if (STACK_REG_P (*dest))
- replace_reg (dest, FIRST_STACK_REG);
-
- if (src1_note)
- {
- replace_reg (&XEXP (src1_note, 0), FIRST_STACK_REG);
- regstack->top--;
- CLEAR_HARD_REG_BIT (regstack->reg_set, REGNO (*src1));
- }
-
- replace_reg (src1, FIRST_STACK_REG);
-
- break;
-
- default:
- abort ();
- }
- break;
-
- case IF_THEN_ELSE:
- /* This insn requires the top of stack to be the destination. */
-
- /* If the comparison operator is an FP comparison operator,
- it is handled correctly by compare_for_stack_reg () who
- will move the destination to the top of stack. But if the
- comparison operator is not an FP comparison operator, we
- have to handle it here. */
- if (get_hard_regnum (regstack, *dest) >= FIRST_STACK_REG
- && REGNO (*dest) != regstack->reg[regstack->top])
- emit_swap_insn (insn, regstack, *dest);
-
- src1 = get_true_reg (&XEXP (SET_SRC (pat), 1));
- src2 = get_true_reg (&XEXP (SET_SRC (pat), 2));
-
- src1_note = find_regno_note (insn, REG_DEAD, REGNO (*src1));
- src2_note = find_regno_note (insn, REG_DEAD, REGNO (*src2));
-
- {
- rtx src_note [3];
- int i;
-
- src_note[0] = 0;
- src_note[1] = src1_note;
- src_note[2] = src2_note;
-
- if (STACK_REG_P (*src1))
- replace_reg (src1, get_hard_regnum (regstack, *src1));
- if (STACK_REG_P (*src2))
- replace_reg (src2, get_hard_regnum (regstack, *src2));
-
- for (i = 1; i <= 2; i++)
- if (src_note [i])
- {
- /* If the register that dies is not at the top of stack, then
- move the top of stack to the dead reg */
- if (REGNO (XEXP (src_note[i], 0))
- != regstack->reg[regstack->top])
- {
- remove_regno_note (insn, REG_DEAD,
- REGNO (XEXP (src_note [i], 0)));
- emit_pop_insn (insn, regstack, XEXP (src_note[i], 0),
- emit_insn_after);
- }
- else
- {
- CLEAR_HARD_REG_BIT (regstack->reg_set,
- REGNO (XEXP (src_note[i], 0)));
- replace_reg (&XEXP (src_note[i], 0), FIRST_STACK_REG);
- regstack->top--;
- }
- }
- }
-
- /* Make dest the top of stack. Add dest to regstack if not present. */
- if (get_hard_regnum (regstack, *dest) < FIRST_STACK_REG)
- regstack->reg[++regstack->top] = REGNO (*dest);
- SET_HARD_REG_BIT (regstack->reg_set, REGNO (*dest));
- replace_reg (dest, FIRST_STACK_REG);
-
- break;
-
- default:
- abort ();
- }
-}
-
-/* Substitute hard regnums for any stack regs in INSN, which has
- N_INPUTS inputs and N_OUTPUTS outputs. REGSTACK is the stack info
- before the insn, and is updated with changes made here.
-
- There are several requirements and assumptions about the use of
- stack-like regs in asm statements. These rules are enforced by
- record_asm_stack_regs; see comments there for details. Any
- asm_operands left in the RTL at this point may be assume to meet the
- requirements, since record_asm_stack_regs removes any problem asm. */
-
-static void
-subst_asm_stack_regs (insn, regstack)
- rtx insn;
- stack regstack;
-{
- rtx body = PATTERN (insn);
- int alt;
-
- rtx *note_reg; /* Array of note contents */
- rtx **note_loc; /* Address of REG field of each note */
- enum reg_note *note_kind; /* The type of each note */
-
- rtx *clobber_reg;
- rtx **clobber_loc;
-
- struct stack_def temp_stack;
- int n_notes;
- int n_clobbers;
- rtx note;
- int i;
- int n_inputs, n_outputs;
-
- /* Find out what the constraints required. If no constraint
- alternative matches, that is a compiler bug: we should have caught
- such an insn during the life analysis pass (and reload should have
- caught it regardless). */
- extract_insn (insn);
- constrain_operands (1);
- alt = which_alternative;
-
- preprocess_constraints ();
-
- n_inputs = get_asm_operand_n_inputs (body);
- n_outputs = recog_n_operands - n_inputs;
-
- if (alt < 0)
- abort ();
-
- /* Strip SUBREGs here to make the following code simpler. */
- for (i = 0; i < recog_n_operands; i++)
- if (GET_CODE (recog_operand[i]) == SUBREG
- && GET_CODE (SUBREG_REG (recog_operand[i])) == REG)
- {
- recog_operand_loc[i] = & SUBREG_REG (recog_operand[i]);
- recog_operand[i] = SUBREG_REG (recog_operand[i]);
- }
-
- /* Set up NOTE_REG, NOTE_LOC and NOTE_KIND. */
-
- for (i = 0, note = REG_NOTES (insn); note; note = XEXP (note, 1))
- i++;
-
- note_reg = (rtx *) alloca (i * sizeof (rtx));
- note_loc = (rtx **) alloca (i * sizeof (rtx *));
- note_kind = (enum reg_note *) alloca (i * sizeof (enum reg_note));
-
- n_notes = 0;
- for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
- {
- rtx reg = XEXP (note, 0);
- rtx *loc = & XEXP (note, 0);
-
- if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
- {
- loc = & SUBREG_REG (reg);
- reg = SUBREG_REG (reg);
- }
-
- if (STACK_REG_P (reg)
- && (REG_NOTE_KIND (note) == REG_DEAD
- || REG_NOTE_KIND (note) == REG_UNUSED))
- {
- note_reg[n_notes] = reg;
- note_loc[n_notes] = loc;
- note_kind[n_notes] = REG_NOTE_KIND (note);
- n_notes++;
- }
- }
-
- /* Set up CLOBBER_REG and CLOBBER_LOC. */
-
- n_clobbers = 0;
-
- if (GET_CODE (body) == PARALLEL)
- {
- clobber_reg = (rtx *) alloca (XVECLEN (body, 0) * sizeof (rtx));
- clobber_loc = (rtx **) alloca (XVECLEN (body, 0) * sizeof (rtx *));
-
- for (i = 0; i < XVECLEN (body, 0); i++)
- if (GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
- {
- rtx clobber = XVECEXP (body, 0, i);
- rtx reg = XEXP (clobber, 0);
- rtx *loc = & XEXP (clobber, 0);
-
- if (GET_CODE (reg) == SUBREG && GET_CODE (SUBREG_REG (reg)) == REG)
- {
- loc = & SUBREG_REG (reg);
- reg = SUBREG_REG (reg);
- }
-
- if (STACK_REG_P (reg))
- {
- clobber_reg[n_clobbers] = reg;
- clobber_loc[n_clobbers] = loc;
- n_clobbers++;
- }
- }
- }
-
- copy_memory ((char *) regstack, (char *) &temp_stack, sizeof (temp_stack));
-
- /* Put the input regs into the desired place in TEMP_STACK. */
-
- for (i = n_outputs; i < n_outputs + n_inputs; i++)
- if (STACK_REG_P (recog_operand[i])
- && reg_class_subset_p (recog_op_alt[i][alt].class,
- FLOAT_REGS)
- && recog_op_alt[i][alt].class != FLOAT_REGS)
- {
- /* If an operand needs to be in a particular reg in
- FLOAT_REGS, the constraint was either 't' or 'u'. Since
- these constraints are for single register classes, and reload
- guaranteed that operand[i] is already in that class, we can
- just use REGNO (recog_operand[i]) to know which actual reg this
- operand needs to be in. */
-
- int regno = get_hard_regnum (&temp_stack, recog_operand[i]);
-
- if (regno < 0)
- abort ();
-
- if (regno != REGNO (recog_operand[i]))
- {
- /* recog_operand[i] is not in the right place. Find it
- and swap it with whatever is already in I's place.
- K is where recog_operand[i] is now. J is where it should
- be. */
- int j, k, temp;
-
- k = temp_stack.top - (regno - FIRST_STACK_REG);
- j = (temp_stack.top
- - (REGNO (recog_operand[i]) - FIRST_STACK_REG));
-
- temp = temp_stack.reg[k];
- temp_stack.reg[k] = temp_stack.reg[j];
- temp_stack.reg[j] = temp;
- }
- }
-
- /* emit insns before INSN to make sure the reg-stack is in the right
- order. */
-
- change_stack (insn, regstack, &temp_stack, emit_insn_before);
-
- /* Make the needed input register substitutions. Do death notes and
- clobbers too, because these are for inputs, not outputs. */
-
- for (i = n_outputs; i < n_outputs + n_inputs; i++)
- if (STACK_REG_P (recog_operand[i]))
- {
- int regnum = get_hard_regnum (regstack, recog_operand[i]);
-
- if (regnum < 0)
- abort ();
-
- replace_reg (recog_operand_loc[i], regnum);
- }
-
- for (i = 0; i < n_notes; i++)
- if (note_kind[i] == REG_DEAD)
- {
- int regnum = get_hard_regnum (regstack, note_reg[i]);
-
- if (regnum < 0)
- abort ();
-
- replace_reg (note_loc[i], regnum);
- }
-
- for (i = 0; i < n_clobbers; i++)
- {
- /* It's OK for a CLOBBER to reference a reg that is not live.
- Don't try to replace it in that case. */
- int regnum = get_hard_regnum (regstack, clobber_reg[i]);
-
- if (regnum >= 0)
- {
- /* Sigh - clobbers always have QImode. But replace_reg knows
- that these regs can't be MODE_INT and will abort. Just put
- the right reg there without calling replace_reg. */
-
- *clobber_loc[i] = FP_MODE_REG (regnum, DFmode);
- }
- }
-
- /* Now remove from REGSTACK any inputs that the asm implicitly popped. */
-
- for (i = n_outputs; i < n_outputs + n_inputs; i++)
- if (STACK_REG_P (recog_operand[i]))
- {
- /* An input reg is implicitly popped if it is tied to an
- output, or if there is a CLOBBER for it. */
- int j;
-
- for (j = 0; j < n_clobbers; j++)
- if (operands_match_p (clobber_reg[j], recog_operand[i]))
- break;
-
- if (j < n_clobbers || recog_op_alt[i][alt].matches >= 0)
- {
- /* recog_operand[i] might not be at the top of stack. But that's
- OK, because all we need to do is pop the right number of regs
- off of the top of the reg-stack. record_asm_stack_regs
- guaranteed that all implicitly popped regs were grouped
- at the top of the reg-stack. */
-
- CLEAR_HARD_REG_BIT (regstack->reg_set,
- regstack->reg[regstack->top]);
- regstack->top--;
- }
- }
-
- /* Now add to REGSTACK any outputs that the asm implicitly pushed.
- Note that there isn't any need to substitute register numbers.
- ??? Explain why this is true. */
-
- for (i = LAST_STACK_REG; i >= FIRST_STACK_REG; i--)
- {
- /* See if there is an output for this hard reg. */
- int j;
-
- for (j = 0; j < n_outputs; j++)
- if (STACK_REG_P (recog_operand[j]) && REGNO (recog_operand[j]) == i)
- {
- regstack->reg[++regstack->top] = i;
- SET_HARD_REG_BIT (regstack->reg_set, i);
- break;
- }
- }
-
- /* Now emit a pop insn for any REG_UNUSED output, or any REG_DEAD
- input that the asm didn't implicitly pop. If the asm didn't
- implicitly pop an input reg, that reg will still be live.
-
- Note that we can't use find_regno_note here: the register numbers
- in the death notes have already been substituted. */
-
- for (i = 0; i < n_outputs; i++)
- if (STACK_REG_P (recog_operand[i]))
- {
- int j;
-
- for (j = 0; j < n_notes; j++)
- if (REGNO (recog_operand[i]) == REGNO (note_reg[j])
- && note_kind[j] == REG_UNUSED)
- {
- insn = emit_pop_insn (insn, regstack, recog_operand[i],
- emit_insn_after);
- break;
- }
- }
-
- for (i = n_outputs; i < n_outputs + n_inputs; i++)
- if (STACK_REG_P (recog_operand[i]))
- {
- int j;
-
- for (j = 0; j < n_notes; j++)
- if (REGNO (recog_operand[i]) == REGNO (note_reg[j])
- && note_kind[j] == REG_DEAD
- && TEST_HARD_REG_BIT (regstack->reg_set,
- REGNO (recog_operand[i])))
- {
- insn = emit_pop_insn (insn, regstack, recog_operand[i],
- emit_insn_after);
- break;
- }
- }
-}
-
-/* Substitute stack hard reg numbers for stack virtual registers in
- INSN. Non-stack register numbers are not changed. REGSTACK is the
- current stack content. Insns may be emitted as needed to arrange the
- stack for the 387 based on the contents of the insn. */
-
-static void
-subst_stack_regs (insn, regstack)
- rtx insn;
- stack regstack;
-{
- register rtx *note_link, note;
- register int i;
-
- if (GET_CODE (insn) == CALL_INSN)
- {
- int top = regstack->top;
-
- /* If there are any floating point parameters to be passed in
- registers for this call, make sure they are in the right
- order. */
-
- if (top >= 0)
- {
- straighten_stack (PREV_INSN (insn), regstack);
-
- /* Now mark the arguments as dead after the call. */
-
- while (regstack->top >= 0)
- {
- CLEAR_HARD_REG_BIT (regstack->reg_set, FIRST_STACK_REG + regstack->top);
- regstack->top--;
- }
- }
- }
-
- /* Do the actual substitution if any stack regs are mentioned.
- Since we only record whether entire insn mentions stack regs, and
- subst_stack_regs_pat only works for patterns that contain stack regs,
- we must check each pattern in a parallel here. A call_value_pop could
- fail otherwise. */
-
- if (GET_MODE (insn) == QImode)
- {
- int n_operands = asm_noperands (PATTERN (insn));
- if (n_operands >= 0)
- {
- /* This insn is an `asm' with operands. Decode the operands,
- decide how many are inputs, and do register substitution.
- Any REG_UNUSED notes will be handled by subst_asm_stack_regs. */
-
- subst_asm_stack_regs (insn, regstack);
- return;
- }
-
- if (GET_CODE (PATTERN (insn)) == PARALLEL)
- for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
- {
- if (stack_regs_mentioned_p (XVECEXP (PATTERN (insn), 0, i)))
- subst_stack_regs_pat (insn, regstack,
- XVECEXP (PATTERN (insn), 0, i));
- }
- else
- subst_stack_regs_pat (insn, regstack, PATTERN (insn));
- }
-
- /* subst_stack_regs_pat may have deleted a no-op insn. If so, any
- REG_UNUSED will already have been dealt with, so just return. */
-
- if (GET_CODE (insn) == NOTE)
- return;
-
- /* If there is a REG_UNUSED note on a stack register on this insn,
- the indicated reg must be popped. The REG_UNUSED note is removed,
- since the form of the newly emitted pop insn references the reg,
- making it no longer `unset'. */
-
- note_link = &REG_NOTES(insn);
- for (note = *note_link; note; note = XEXP (note, 1))
- if (REG_NOTE_KIND (note) == REG_UNUSED && STACK_REG_P (XEXP (note, 0)))
- {
- *note_link = XEXP (note, 1);
- insn = emit_pop_insn (insn, regstack, XEXP (note, 0), emit_insn_after);
- }
- else
- note_link = &XEXP (note, 1);
-}
-
-/* Change the organization of the stack so that it fits a new basic
- block. Some registers might have to be popped, but there can never be
- a register live in the new block that is not now live.
-
- Insert any needed insns before or after INSN. WHEN is emit_insn_before
- or emit_insn_after. OLD is the original stack layout, and NEW is
- the desired form. OLD is updated to reflect the code emitted, ie, it
- will be the same as NEW upon return.
-
- This function will not preserve block_end[]. But that information
- is no longer needed once this has executed. */
-
-static void
-change_stack (insn, old, new, when)
- rtx insn;
- stack old;
- stack new;
- rtx (*when)();
-{
- int reg;
-
- /* We will be inserting new insns "backwards", by calling emit_insn_before.
- If we are to insert after INSN, find the next insn, and insert before
- it. */
-
- if (when == emit_insn_after)
- insn = NEXT_INSN (insn);
-
- /* Pop any registers that are not needed in the new block. */
-
- for (reg = old->top; reg >= 0; reg--)
- if (! TEST_HARD_REG_BIT (new->reg_set, old->reg[reg]))
- emit_pop_insn (insn, old, FP_MODE_REG (old->reg[reg], DFmode),
- emit_insn_before);
-
- if (new->top == -2)
- {
- /* If the new block has never been processed, then it can inherit
- the old stack order. */
-
- new->top = old->top;
- copy_memory (old->reg, new->reg, sizeof (new->reg));
- }
- else
- {
- /* This block has been entered before, and we must match the
- previously selected stack order. */
-
- /* By now, the only difference should be the order of the stack,
- not their depth or liveliness. */
-
- GO_IF_HARD_REG_EQUAL (old->reg_set, new->reg_set, win);
-
- abort ();
-
- win:
-
- if (old->top != new->top)
- abort ();
-
- /* Loop here emitting swaps until the stack is correct. The
- worst case number of swaps emitted is N + 2, where N is the
- depth of the stack. In some cases, the reg at the top of
- stack may be correct, but swapped anyway in order to fix
- other regs. But since we never swap any other reg away from
- its correct slot, this algorithm will converge. */
-
- do
- {
- /* Swap the reg at top of stack into the position it is
- supposed to be in, until the correct top of stack appears. */
-
- while (old->reg[old->top] != new->reg[new->top])
- {
- for (reg = new->top; reg >= 0; reg--)
- if (new->reg[reg] == old->reg[old->top])
- break;
-
- if (reg == -1)
- abort ();
-
- emit_swap_insn (insn, old,
- FP_MODE_REG (old->reg[reg], DFmode));
- }
-
- /* See if any regs remain incorrect. If so, bring an
- incorrect reg to the top of stack, and let the while loop
- above fix it. */
-
- for (reg = new->top; reg >= 0; reg--)
- if (new->reg[reg] != old->reg[reg])
- {
- emit_swap_insn (insn, old,
- FP_MODE_REG (old->reg[reg], DFmode));
- break;
- }
- } while (reg >= 0);
-
- /* At this point there must be no differences. */
-
- for (reg = old->top; reg >= 0; reg--)
- if (old->reg[reg] != new->reg[reg])
- abort ();
- }
-}
-
-/* Check PAT, which points to RTL in INSN, for a LABEL_REF. If it is
- found, ensure that a jump from INSN to the code_label to which the
- label_ref points ends up with the same stack as that at the
- code_label. Do this by inserting insns just before the code_label to
- pop and rotate the stack until it is in the correct order. REGSTACK
- is the order of the register stack in INSN.
-
- Any code that is emitted here must not be later processed as part
- of any block, as it will already contain hard register numbers. */
-
-static void
-goto_block_pat (insn, regstack, pat)
- rtx insn;
- stack regstack;
- rtx pat;
-{
- rtx label;
- rtx new_jump, new_label, new_barrier;
- rtx *ref;
- stack label_stack;
- struct stack_def temp_stack;
- int reg;
-
- switch (GET_CODE (pat))
- {
- case RETURN:
- straighten_stack (PREV_INSN (insn), regstack);
- return;
- default:
- {
- int i, j;
- char *fmt = GET_RTX_FORMAT (GET_CODE (pat));
-
- for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- goto_block_pat (insn, regstack, XEXP (pat, i));
- if (fmt[i] == 'E')
- for (j = 0; j < XVECLEN (pat, i); j++)
- goto_block_pat (insn, regstack, XVECEXP (pat, i, j));
- }
- return;
- }
- case LABEL_REF:;
- }
-
- label = XEXP (pat, 0);
- if (GET_CODE (label) != CODE_LABEL)
- abort ();
-
- /* First, see if in fact anything needs to be done to the stack at all. */
- if (INSN_UID (label) <= 0)
- return;
-
- label_stack = &block_stack_in[BLOCK_NUM (label)];
-
- if (label_stack->top == -2)
- {
- /* If the target block hasn't had a stack order selected, then
- we need merely ensure that no pops are needed. */
-
- for (reg = regstack->top; reg >= 0; reg--)
- if (! TEST_HARD_REG_BIT (label_stack->reg_set, regstack->reg[reg]))
- break;
-
- if (reg == -1)
- {
- /* change_stack will not emit any code in this case. */
-
- change_stack (label, regstack, label_stack, emit_insn_after);
- return;
- }
- }
- else if (label_stack->top == regstack->top)
- {
- for (reg = label_stack->top; reg >= 0; reg--)
- if (label_stack->reg[reg] != regstack->reg[reg])
- break;
-
- if (reg == -1)
- return;
- }
-
- /* At least one insn will need to be inserted before label. Insert
- a jump around the code we are about to emit. Emit a label for the new
- code, and point the original insn at this new label. We can't use
- redirect_jump here, because we're using fld[4] of the code labels as
- LABEL_REF chains, no NUSES counters. */
-
- new_jump = emit_jump_insn_before (gen_jump (label), label);
- record_label_references (new_jump, PATTERN (new_jump));
- JUMP_LABEL (new_jump) = label;
-
- new_barrier = emit_barrier_after (new_jump);
-
- new_label = gen_label_rtx ();
- emit_label_after (new_label, new_barrier);
- LABEL_REFS (new_label) = new_label;
-
- /* The old label_ref will no longer point to the code_label if now uses,
- so strip the label_ref from the code_label's chain of references. */
-
- for (ref = &LABEL_REFS (label); *ref != label; ref = &LABEL_NEXTREF (*ref))
- if (*ref == pat)
- break;
-
- if (*ref == label)
- abort ();
-
- *ref = LABEL_NEXTREF (*ref);
-
- XEXP (pat, 0) = new_label;
- record_label_references (insn, PATTERN (insn));
-
- if (JUMP_LABEL (insn) == label)
- JUMP_LABEL (insn) = new_label;
-
- /* Now emit the needed code. */
-
- temp_stack = *regstack;
-
- change_stack (new_label, &temp_stack, label_stack, emit_insn_after);
-}
-
-/* Traverse all basic blocks in a function, converting the register
- references in each insn from the "flat" register file that gcc uses, to
- the stack-like registers the 387 uses. */
-
-static void
-convert_regs ()
-{
- register int block, reg;
- register rtx insn, next;
- struct stack_def regstack;
-
- for (block = 0; block < blocks; block++)
- {
- if (block_stack_in[block].top == -2)
- {
- /* This block has not been previously encountered. Choose a
- default mapping for any stack regs live on entry */
-
- block_stack_in[block].top = -1;
-
- for (reg = LAST_STACK_REG; reg >= FIRST_STACK_REG; reg--)
- if (TEST_HARD_REG_BIT (block_stack_in[block].reg_set, reg))
- block_stack_in[block].reg[++block_stack_in[block].top] = reg;
- }
-
- /* Process all insns in this block. Keep track of `next' here,
- so that we don't process any insns emitted while making
- substitutions in INSN. */
-
- next = block_begin[block];
- regstack = block_stack_in[block];
- do
- {
- insn = next;
- next = NEXT_INSN (insn);
-
- /* Don't bother processing unless there is a stack reg
- mentioned or if it's a CALL_INSN (register passing of
- floating point values). */
-
- if (GET_MODE (insn) == QImode || GET_CODE (insn) == CALL_INSN)
- subst_stack_regs (insn, &regstack);
-
- } while (insn != block_end[block]);
-
- /* For all further actions, INSN needs to be the last insn in
- this basic block. If subst_stack_regs inserted additional
- instructions after INSN, it is no longer the last one at
- this point. */
- next = PREV_INSN (next);
-
- /* If subst_stack_regs inserted something after a JUMP_INSN, that
- is almost certainly a bug. */
- if (GET_CODE (insn) == JUMP_INSN && insn != next)
- abort ();
- insn = next;
-
- /* Something failed if the stack life doesn't match. */
-
- GO_IF_HARD_REG_EQUAL (regstack.reg_set, block_out_reg_set[block], win);
-
- abort ();
-
- win:
-
- /* Adjust the stack of this block on exit to match the stack of
- the target block, or copy stack information into stack of
- jump target if the target block's stack order hasn't been set
- yet. */
-
- if (GET_CODE (insn) == JUMP_INSN)
- goto_block_pat (insn, &regstack, PATTERN (insn));
-
- /* Likewise handle the case where we fall into the next block. */
-
- if ((block < blocks - 1) && block_drops_in[block+1])
- change_stack (insn, &regstack, &block_stack_in[block+1],
- emit_insn_after);
- }
-
- /* If the last basic block is the end of a loop, and that loop has
- regs live at its start, then the last basic block will have regs live
- at its end that need to be popped before the function returns. */
-
- {
- int value_reg_low, value_reg_high;
- value_reg_low = value_reg_high = -1;
- {
- rtx retvalue;
- if ((retvalue = stack_result (current_function_decl)))
- {
- value_reg_low = REGNO (retvalue);
- value_reg_high = value_reg_low +
- HARD_REGNO_NREGS (value_reg_low, GET_MODE (retvalue)) - 1;
- }
-
- }
- for (reg = regstack.top; reg >= 0; reg--)
- if (regstack.reg[reg] < value_reg_low
- || regstack.reg[reg] > value_reg_high)
- insn = emit_pop_insn (insn, &regstack,
- FP_MODE_REG (regstack.reg[reg], DFmode),
- emit_insn_after);
- }
- straighten_stack (insn, &regstack);
-}
-
-/* Check expression PAT, which is in INSN, for label references. if
- one is found, print the block number of destination to FILE. */
-
-static void
-print_blocks (file, insn, pat)
- FILE *file;
- rtx insn, pat;
-{
- register RTX_CODE code = GET_CODE (pat);
- register int i;
- register char *fmt;
-
- if (code == LABEL_REF)
- {
- register rtx label = XEXP (pat, 0);
-
- if (GET_CODE (label) != CODE_LABEL)
- abort ();
-
- fprintf (file, " %d", BLOCK_NUM (label));
-
- return;
- }
-
- fmt = GET_RTX_FORMAT (code);
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- print_blocks (file, insn, XEXP (pat, i));
- if (fmt[i] == 'E')
- {
- register int j;
- for (j = 0; j < XVECLEN (pat, i); j++)
- print_blocks (file, insn, XVECEXP (pat, i, j));
- }
- }
-}
-
-/* Write information about stack registers and stack blocks into FILE.
- This is part of making a debugging dump. */
-
-static void
-dump_stack_info (file)
- FILE *file;
-{
- register int block;
-
- fprintf (file, "\n%d stack blocks.\n", blocks);
- for (block = 0; block < blocks; block++)
- {
- register rtx head, jump, end;
- register int regno;
-
- fprintf (file, "\nStack block %d: first insn %d, last %d.\n",
- block, INSN_UID (block_begin[block]),
- INSN_UID (block_end[block]));
-
- head = block_begin[block];
-
- fprintf (file, "Reached from blocks: ");
- if (GET_CODE (head) == CODE_LABEL)
- for (jump = LABEL_REFS (head);
- jump != head;
- jump = LABEL_NEXTREF (jump))
- {
- register int from_block = BLOCK_NUM (CONTAINING_INSN (jump));
- fprintf (file, " %d", from_block);
- }
- if (block_drops_in[block])
- fprintf (file, " previous");
-
- fprintf (file, "\nlive stack registers on block entry: ");
- for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG; regno++)
- {
- if (TEST_HARD_REG_BIT (block_stack_in[block].reg_set, regno))
- fprintf (file, "%d ", regno);
- }
-
- fprintf (file, "\nlive stack registers on block exit: ");
- for (regno = FIRST_STACK_REG; regno <= LAST_STACK_REG; regno++)
- {
- if (TEST_HARD_REG_BIT (block_out_reg_set[block], regno))
- fprintf (file, "%d ", regno);
- }
-
- end = block_end[block];
-
- fprintf (file, "\nJumps to blocks: ");
- if (GET_CODE (end) == JUMP_INSN)
- print_blocks (file, end, PATTERN (end));
-
- if (block + 1 < blocks && block_drops_in[block+1])
- fprintf (file, " next");
- else if (block + 1 == blocks
- || (GET_CODE (end) == JUMP_INSN
- && GET_CODE (PATTERN (end)) == RETURN))
- fprintf (file, " return");
-
- fprintf (file, "\n");
- }
-}
-#endif /* STACK_REGS */
diff --git a/gcc/reload.c b/gcc/reload.c
index 6bb93df..c6670c8 100755
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -6301,11 +6301,6 @@ find_equiv_reg (goal, insn, class, other, reload_reg_p, goalreg, mode)
|| need_stable_sp))
return 0;
-#ifdef NON_SAVING_SETJMP
- if (NON_SAVING_SETJMP && GET_CODE (p) == NOTE
- && NOTE_LINE_NUMBER (p) == NOTE_INSN_SETJMP)
- return 0;
-#endif
#ifdef INSN_CLOBBERS_REGNO_P
if ((valueno >= 0 && valueno < FIRST_PSEUDO_REGISTER
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 8cf24a7..e8af2a2 100755
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -1218,108 +1218,6 @@ reload (first, global, dumpfile)
static void
maybe_fix_stack_asms ()
{
-#ifdef STACK_REGS
- char *constraints[MAX_RECOG_OPERANDS];
- enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
- struct insn_chain *chain;
-
- for (chain = reload_insn_chain; chain != 0; chain = chain->next)
- {
- int i, noperands;
- HARD_REG_SET clobbered, allowed;
- rtx pat;
-
- if (GET_RTX_CLASS (GET_CODE (chain->insn)) != 'i'
- || (noperands = asm_noperands (PATTERN (chain->insn))) < 0)
- continue;
- pat = PATTERN (chain->insn);
- if (GET_CODE (pat) != PARALLEL)
- continue;
-
- CLEAR_HARD_REG_SET (clobbered);
- CLEAR_HARD_REG_SET (allowed);
-
- /* First, make a mask of all stack regs that are clobbered. */
- for (i = 0; i < XVECLEN (pat, 0); i++)
- {
- rtx t = XVECEXP (pat, 0, i);
- if (GET_CODE (t) == CLOBBER && STACK_REG_P (XEXP (t, 0)))
- SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
- }
-
- /* Get the operand values and constraints out of the insn. */
- decode_asm_operands (pat, recog_operand, recog_operand_loc,
- constraints, operand_mode);
-
- /* For every operand, see what registers are allowed. */
- for (i = 0; i < noperands; i++)
- {
- char *p = constraints[i];
- /* For every alternative, we compute the class of registers allowed
- for reloading in CLS, and merge its contents into the reg set
- ALLOWED. */
- int cls = (int) NO_REGS;
-
- for (;;)
- {
- char c = *p++;
-
- if (c == '\0' || c == ',' || c == '#')
- {
- /* End of one alternative - mark the regs in the current
- class, and reset the class. */
- IOR_HARD_REG_SET (allowed, reg_class_contents[cls]);
- cls = NO_REGS;
- if (c == '#')
- do {
- c = *p++;
- } while (c != '\0' && c != ',');
- if (c == '\0')
- break;
- continue;
- }
-
- switch (c)
- {
- case '=': case '+': case '*': case '%': case '?': case '!':
- case '0': case '1': case '2': case '3': case '4': case 'm':
- case '<': case '>': case 'V': case 'o': case '&': case 'E':
- case 'F': case 's': case 'i': case 'n': case 'X': case 'I':
- case 'J': case 'K': case 'L': case 'M': case 'N': case 'O':
- case 'P':
-#ifdef EXTRA_CONSTRAINT
- case 'Q': case 'R': case 'S': case 'T': case 'U':
-#endif
- break;
-
- case 'p':
- cls = (int) reg_class_subunion[cls][(int) BASE_REG_CLASS];
- break;
-
- case 'g':
- case 'r':
- cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS];
- break;
-
- default:
- cls = (int) reg_class_subunion[cls][(int) REG_CLASS_FROM_LETTER (c)];
-
- }
- }
- }
- /* Those of the registers which are clobbered, but allowed by the
- constraints, must be usable as reload registers. So clear them
- out of the life information. */
- AND_HARD_REG_SET (allowed, clobbered);
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (TEST_HARD_REG_BIT (allowed, i))
- {
- CLEAR_REGNO_REG_SET (chain->live_before, i);
- CLEAR_REGNO_REG_SET (chain->live_after, i);
- }
- }
-
-#endif
}
@@ -8693,16 +8591,6 @@ reload_cse_regs_1 (first)
continue;
}
-#ifdef NON_SAVING_SETJMP
- if (NON_SAVING_SETJMP && GET_CODE (insn) == NOTE
- && NOTE_LINE_NUMBER (insn) == NOTE_INSN_SETJMP)
- {
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- reg_values[i] = 0;
-
- continue;
- }
-#endif
if (GET_RTX_CLASS (GET_CODE (insn)) != 'i')
continue;
diff --git a/gcc/reorg.c b/gcc/reorg.c
deleted file mode 100755
index a636a43..0000000
--- a/gcc/reorg.c
+++ /dev/null
@@ -1,3525 +0,0 @@
-/* Perform instruction reorganizations for delay slot filling.
- Copyright (C) 1992, 93-98, 1999 Free Software Foundation, Inc.
- Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
- Hacked by Michael Tiemann (tiemann@cygnus.com).
-
-This file is part of GNU CC.
-
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
-
-/* Instruction reorganization pass.
-
- This pass runs after register allocation and final jump
- optimization. It should be the last pass to run before peephole.
- It serves primarily to fill delay slots of insns, typically branch
- and call insns. Other insns typically involve more complicated
- interactions of data dependencies and resource constraints, and
- are better handled by scheduling before register allocation (by the
- function `schedule_insns').
-
- The Branch Penalty is the number of extra cycles that are needed to
- execute a branch insn. On an ideal machine, branches take a single
- cycle, and the Branch Penalty is 0. Several RISC machines approach
- branch delays differently:
-
- The MIPS and AMD 29000 have a single branch delay slot. Most insns
- (except other branches) can be used to fill this slot. When the
- slot is filled, two insns execute in two cycles, reducing the
- branch penalty to zero.
-
- The Motorola 88000 conditionally exposes its branch delay slot,
- so code is shorter when it is turned off, but will run faster
- when useful insns are scheduled there.
-
- The IBM ROMP has two forms of branch and call insns, both with and
- without a delay slot. Much like the 88k, insns not using the delay
- slot can be shorted (2 bytes vs. 4 bytes), but will run slowed.
-
- The SPARC always has a branch delay slot, but its effects can be
- annulled when the branch is not taken. This means that failing to
- find other sources of insns, we can hoist an insn from the branch
- target that would only be safe to execute knowing that the branch
- is taken.
-
- The HP-PA always has a branch delay slot. For unconditional branches
- its effects can be annulled when the branch is taken. The effects
- of the delay slot in a conditional branch can be nullified for forward
- taken branches, or for untaken backward branches. This means
- we can hoist insns from the fall-through path for forward branches or
- steal insns from the target of backward branches.
-
- The TMS320C3x and C4x have three branch delay slots. When the three
- slots are filled, the branch penalty is zero. Most insns can fill the
- delay slots except jump insns.
-
- Three techniques for filling delay slots have been implemented so far:
-
- (1) `fill_simple_delay_slots' is the simplest, most efficient way
- to fill delay slots. This pass first looks for insns which come
- from before the branch and which are safe to execute after the
- branch. Then it searches after the insn requiring delay slots or,
- in the case of a branch, for insns that are after the point at
- which the branch merges into the fallthrough code, if such a point
- exists. When such insns are found, the branch penalty decreases
- and no code expansion takes place.
-
- (2) `fill_eager_delay_slots' is more complicated: it is used for
- scheduling conditional jumps, or for scheduling jumps which cannot
- be filled using (1). A machine need not have annulled jumps to use
- this strategy, but it helps (by keeping more options open).
- `fill_eager_delay_slots' tries to guess the direction the branch
- will go; if it guesses right 100% of the time, it can reduce the
- branch penalty as much as `fill_simple_delay_slots' does. If it
- guesses wrong 100% of the time, it might as well schedule nops (or
- on the m88k, unexpose the branch slot). When
- `fill_eager_delay_slots' takes insns from the fall-through path of
- the jump, usually there is no code expansion; when it takes insns
- from the branch target, there is code expansion if it is not the
- only way to reach that target.
-
- (3) `relax_delay_slots' uses a set of rules to simplify code that
- has been reorganized by (1) and (2). It finds cases where
- conditional test can be eliminated, jumps can be threaded, extra
- insns can be eliminated, etc. It is the job of (1) and (2) to do a
- good job of scheduling locally; `relax_delay_slots' takes care of
- making the various individual schedules work well together. It is
- especially tuned to handle the control flow interactions of branch
- insns. It does nothing for insns with delay slots that do not
- branch.
-
- On machines that use CC0, we are very conservative. We will not make
- a copy of an insn involving CC0 since we want to maintain a 1-1
- correspondence between the insn that sets and uses CC0. The insns are
- allowed to be separated by placing an insn that sets CC0 (but not an insn
- that uses CC0; we could do this, but it doesn't seem worthwhile) in a
- delay slot. In that case, we point each insn at the other with REG_CC_USER
- and REG_CC_SETTER notes. Note that these restrictions affect very few
- machines because most RISC machines with delay slots will not use CC0
- (the RT is the only known exception at this point).
-
- Not yet implemented:
-
- The Acorn Risc Machine can conditionally execute most insns, so
- it is profitable to move single insns into a position to execute
- based on the condition code of the previous insn.
-
- The HP-PA can conditionally nullify insns, providing a similar
- effect to the ARM, differing mostly in which insn is "in charge". */
-
-#include "config.h"
-#include "system.h"
-#include "rtl.h"
-#include "expr.h"
-#include "insn-config.h"
-#include "conditions.h"
-#include "hard-reg-set.h"
-#include "basic-block.h"
-#include "regs.h"
-#include "insn-flags.h"
-#include "recog.h"
-#include "flags.h"
-#include "output.h"
-#include "obstack.h"
-#include "insn-attr.h"
-#include "resource.h"
-
-
-#ifdef DELAY_SLOTS
-
-#define obstack_chunk_alloc xmalloc
-#define obstack_chunk_free free
-
-#ifndef ANNUL_IFTRUE_SLOTS
-#define eligible_for_annul_true(INSN, SLOTS, TRIAL, FLAGS) 0
-#endif
-#ifndef ANNUL_IFFALSE_SLOTS
-#define eligible_for_annul_false(INSN, SLOTS, TRIAL, FLAGS) 0
-#endif
-
-/* Insns which have delay slots that have not yet been filled. */
-
-static struct obstack unfilled_slots_obstack;
-static rtx *unfilled_firstobj;
-
-/* Define macros to refer to the first and last slot containing unfilled
- insns. These are used because the list may move and its address
- should be recomputed at each use. */
-
-#define unfilled_slots_base \
- ((rtx *) obstack_base (&unfilled_slots_obstack))
-
-#define unfilled_slots_next \
- ((rtx *) obstack_next_free (&unfilled_slots_obstack))
-
-/* This structure is used to indicate which hardware resources are set or
-
-/* Points to the label before the end of the function. */
-static rtx end_of_function_label;
-
-/* Mapping between INSN_UID's and position in the code since INSN_UID's do
- not always monotonically increase. */
-static int *uid_to_ruid;
-
-/* Highest valid index in `uid_to_ruid'. */
-static int max_uid;
-
-static int stop_search_p PROTO((rtx, int));
-static int resource_conflicts_p PROTO((struct resources *,
- struct resources *));
-static int insn_references_resource_p PROTO((rtx, struct resources *, int));
-static int insn_sets_resource_p PROTO((rtx, struct resources *, int));
-static rtx find_end_label PROTO((void));
-static rtx emit_delay_sequence PROTO((rtx, rtx, int));
-static rtx add_to_delay_list PROTO((rtx, rtx));
-static rtx delete_from_delay_slot PROTO((rtx));
-static void delete_scheduled_jump PROTO((rtx));
-static void note_delay_statistics PROTO((int, int));
-static rtx optimize_skip PROTO((rtx));
-static int get_jump_flags PROTO((rtx, rtx));
-static int rare_destination PROTO((rtx));
-static int mostly_true_jump PROTO((rtx, rtx));
-static rtx get_branch_condition PROTO((rtx, rtx));
-static int condition_dominates_p PROTO((rtx, rtx));
-static int redirect_with_delay_slots_safe_p PROTO ((rtx, rtx, rtx));
-static int redirect_with_delay_list_safe_p PROTO ((rtx, rtx, rtx));
-static int check_annul_list_true_false PROTO ((int, rtx));
-static rtx steal_delay_list_from_target PROTO((rtx, rtx, rtx, rtx,
- struct resources *,
- struct resources *,
- struct resources *,
- int, int *, int *, rtx *));
-static rtx steal_delay_list_from_fallthrough PROTO((rtx, rtx, rtx, rtx,
- struct resources *,
- struct resources *,
- struct resources *,
- int, int *, int *));
-static void try_merge_delay_insns PROTO((rtx, rtx));
-static rtx redundant_insn PROTO((rtx, rtx, rtx));
-static int own_thread_p PROTO((rtx, rtx, int));
-static void update_block PROTO((rtx, rtx));
-static int reorg_redirect_jump PROTO((rtx, rtx));
-static void update_reg_dead_notes PROTO((rtx, rtx));
-static void fix_reg_dead_note PROTO((rtx, rtx));
-static void update_reg_unused_notes PROTO((rtx, rtx));
-static void fill_simple_delay_slots PROTO((int));
-static rtx fill_slots_from_thread PROTO((rtx, rtx, rtx, rtx, int, int,
- int, int, int *, rtx));
-static void fill_eager_delay_slots PROTO((void));
-static void relax_delay_slots PROTO((rtx));
-static void make_return_insns PROTO((rtx));
-
-/* Return TRUE if this insn should stop the search for insn to fill delay
- slots. LABELS_P indicates that labels should terminate the search.
- In all cases, jumps terminate the search. */
-
-static int
-stop_search_p (insn, labels_p)
- rtx insn;
- int labels_p;
-{
- if (insn == 0)
- return 1;
-
- switch (GET_CODE (insn))
- {
- case NOTE:
- case CALL_INSN:
- return 0;
-
- case CODE_LABEL:
- return labels_p;
-
- case JUMP_INSN:
- case BARRIER:
- return 1;
-
- case INSN:
- /* OK unless it contains a delay slot or is an `asm' insn of some type.
- We don't know anything about these. */
- return (GET_CODE (PATTERN (insn)) == SEQUENCE
- || GET_CODE (PATTERN (insn)) == ASM_INPUT
- || asm_noperands (PATTERN (insn)) >= 0);
-
- default:
- abort ();
- }
-}
-
-/* Return TRUE if any resources are marked in both RES1 and RES2 or if either
- resource set contains a volatile memory reference. Otherwise, return FALSE. */
-
-static int
-resource_conflicts_p (res1, res2)
- struct resources *res1, *res2;
-{
- if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
- || (res1->unch_memory && res2->unch_memory)
- || res1->volatil || res2->volatil)
- return 1;
-
-#ifdef HARD_REG_SET
- return (res1->regs & res2->regs) != HARD_CONST (0);
-#else
- {
- int i;
-
- for (i = 0; i < HARD_REG_SET_LONGS; i++)
- if ((res1->regs[i] & res2->regs[i]) != 0)
- return 1;
- return 0;
- }
-#endif
-}
-
-/* Return TRUE if any resource marked in RES, a `struct resources', is
- referenced by INSN. If INCLUDE_DELAYED_EFFECTS is set, return if the called
- routine is using those resources.
-
- We compute this by computing all the resources referenced by INSN and
- seeing if this conflicts with RES. It might be faster to directly check
- ourselves, and this is the way it used to work, but it means duplicating
- a large block of complex code. */
-
-static int
-insn_references_resource_p (insn, res, include_delayed_effects)
- register rtx insn;
- register struct resources *res;
- int include_delayed_effects;
-{
- struct resources insn_res;
-
- CLEAR_RESOURCE (&insn_res);
- mark_referenced_resources (insn, &insn_res, include_delayed_effects);
- return resource_conflicts_p (&insn_res, res);
-}
-
-/* Return TRUE if INSN modifies resources that are marked in RES.
- INCLUDE_DELAYED_EFFECTS is set if the actions of that routine should be
- included. CC0 is only modified if it is explicitly set; see comments
- in front of mark_set_resources for details. */
-
-static int
-insn_sets_resource_p (insn, res, include_delayed_effects)
- register rtx insn;
- register struct resources *res;
- int include_delayed_effects;
-{
- struct resources insn_sets;
-
- CLEAR_RESOURCE (&insn_sets);
- mark_set_resources (insn, &insn_sets, 0, include_delayed_effects);
- return resource_conflicts_p (&insn_sets, res);
-}
-
-/* Find a label at the end of the function or before a RETURN. If there is
- none, make one. */
-
-static rtx
-find_end_label ()
-{
- rtx insn;
-
- /* If we found one previously, return it. */
- if (end_of_function_label)
- return end_of_function_label;
-
- /* Otherwise, see if there is a label at the end of the function. If there
- is, it must be that RETURN insns aren't needed, so that is our return
- label and we don't have to do anything else. */
-
- insn = get_last_insn ();
- while (GET_CODE (insn) == NOTE
- || (GET_CODE (insn) == INSN
- && (GET_CODE (PATTERN (insn)) == USE
- || GET_CODE (PATTERN (insn)) == CLOBBER)))
- insn = PREV_INSN (insn);
-
- /* When a target threads its epilogue we might already have a
- suitable return insn. If so put a label before it for the
- end_of_function_label. */
- if (GET_CODE (insn) == BARRIER
- && GET_CODE (PREV_INSN (insn)) == JUMP_INSN
- && GET_CODE (PATTERN (PREV_INSN (insn))) == RETURN)
- {
- rtx temp = PREV_INSN (PREV_INSN (insn));
- end_of_function_label = gen_label_rtx ();
- LABEL_NUSES (end_of_function_label) = 0;
-
- /* Put the label before an USE insns that may proceed the RETURN insn. */
- while (GET_CODE (temp) == USE)
- temp = PREV_INSN (temp);
-
- emit_label_after (end_of_function_label, temp);
- }
-
- else if (GET_CODE (insn) == CODE_LABEL)
- end_of_function_label = insn;
- else
- {
- /* Otherwise, make a new label and emit a RETURN and BARRIER,
- if needed. */
- end_of_function_label = gen_label_rtx ();
- LABEL_NUSES (end_of_function_label) = 0;
- emit_label (end_of_function_label);
- }
-
- /* Show one additional use for this label so it won't go away until
- we are done. */
- ++LABEL_NUSES (end_of_function_label);
-
- return end_of_function_label;
-}
-
-/* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace
- the pattern of INSN with the SEQUENCE.
-
- Chain the insns so that NEXT_INSN of each insn in the sequence points to
- the next and NEXT_INSN of the last insn in the sequence points to
- the first insn after the sequence. Similarly for PREV_INSN. This makes
- it easier to scan all insns.
-
- Returns the SEQUENCE that replaces INSN. */
-
-static rtx
-emit_delay_sequence (insn, list, length)
- rtx insn;
- rtx list;
- int length;
-{
- register int i = 1;
- register rtx li;
- int had_barrier = 0;
-
- /* Allocate the rtvec to hold the insns and the SEQUENCE. */
- rtvec seqv = rtvec_alloc (length + 1);
- rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv);
- rtx seq_insn = make_insn_raw (seq);
- rtx first = get_insns ();
- rtx last = get_last_insn ();
-
- /* Make a copy of the insn having delay slots. */
- rtx delay_insn = copy_rtx (insn);
-
- /* If INSN is followed by a BARRIER, delete the BARRIER since it will only
- confuse further processing. Update LAST in case it was the last insn.
- We will put the BARRIER back in later. */
- if (NEXT_INSN (insn) && GET_CODE (NEXT_INSN (insn)) == BARRIER)
- {
- delete_insn (NEXT_INSN (insn));
- last = get_last_insn ();
- had_barrier = 1;
- }
-
- /* Splice our SEQUENCE into the insn stream where INSN used to be. */
- NEXT_INSN (seq_insn) = NEXT_INSN (insn);
- PREV_INSN (seq_insn) = PREV_INSN (insn);
-
- if (insn != last)
- PREV_INSN (NEXT_INSN (seq_insn)) = seq_insn;
-
- if (insn != first)
- NEXT_INSN (PREV_INSN (seq_insn)) = seq_insn;
-
- /* Note the calls to set_new_first_and_last_insn must occur after
- SEQ_INSN has been completely spliced into the insn stream.
-
- Otherwise CUR_INSN_UID will get set to an incorrect value because
- set_new_first_and_last_insn will not find SEQ_INSN in the chain. */
- if (insn == last)
- set_new_first_and_last_insn (first, seq_insn);
-
- if (insn == first)
- set_new_first_and_last_insn (seq_insn, last);
-
- /* Build our SEQUENCE and rebuild the insn chain. */
- XVECEXP (seq, 0, 0) = delay_insn;
- INSN_DELETED_P (delay_insn) = 0;
- PREV_INSN (delay_insn) = PREV_INSN (seq_insn);
-
- for (li = list; li; li = XEXP (li, 1), i++)
- {
- rtx tem = XEXP (li, 0);
- rtx note;
-
- /* Show that this copy of the insn isn't deleted. */
- INSN_DELETED_P (tem) = 0;
-
- XVECEXP (seq, 0, i) = tem;
- PREV_INSN (tem) = XVECEXP (seq, 0, i - 1);
- NEXT_INSN (XVECEXP (seq, 0, i - 1)) = tem;
-
- /* Remove any REG_DEAD notes because we can't rely on them now
- that the insn has been moved. */
- for (note = REG_NOTES (tem); note; note = XEXP (note, 1))
- if (REG_NOTE_KIND (note) == REG_DEAD)
- XEXP (note, 0) = const0_rtx;
- }
-
- NEXT_INSN (XVECEXP (seq, 0, length)) = NEXT_INSN (seq_insn);
-
- /* If the previous insn is a SEQUENCE, update the NEXT_INSN pointer on the
- last insn in that SEQUENCE to point to us. Similarly for the first
- insn in the following insn if it is a SEQUENCE. */
-
- if (PREV_INSN (seq_insn) && GET_CODE (PREV_INSN (seq_insn)) == INSN
- && GET_CODE (PATTERN (PREV_INSN (seq_insn))) == SEQUENCE)
- NEXT_INSN (XVECEXP (PATTERN (PREV_INSN (seq_insn)), 0,
- XVECLEN (PATTERN (PREV_INSN (seq_insn)), 0) - 1))
- = seq_insn;
-
- if (NEXT_INSN (seq_insn) && GET_CODE (NEXT_INSN (seq_insn)) == INSN
- && GET_CODE (PATTERN (NEXT_INSN (seq_insn))) == SEQUENCE)
- PREV_INSN (XVECEXP (PATTERN (NEXT_INSN (seq_insn)), 0, 0)) = seq_insn;
-
- /* If there used to be a BARRIER, put it back. */
- if (had_barrier)
- emit_barrier_after (seq_insn);
-
- if (i != length + 1)
- abort ();
-
- return seq_insn;
-}
-
-/* Add INSN to DELAY_LIST and return the head of the new list. The list must
- be in the order in which the insns are to be executed. */
-
-static rtx
-add_to_delay_list (insn, delay_list)
- rtx insn;
- rtx delay_list;
-{
- /* If we have an empty list, just make a new list element. If
- INSN has its block number recorded, clear it since we may
- be moving the insn to a new block. */
-
- if (delay_list == 0)
- {
- clear_hashed_info_for_insn (insn);
- return gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
- }
-
- /* Otherwise this must be an INSN_LIST. Add INSN to the end of the
- list. */
- XEXP (delay_list, 1) = add_to_delay_list (insn, XEXP (delay_list, 1));
-
- return delay_list;
-}
-
-/* Delete INSN from the delay slot of the insn that it is in, which may
- produce an insn with no delay slots. Return the new insn. */
-
-static rtx
-delete_from_delay_slot (insn)
- rtx insn;
-{
- rtx trial, seq_insn, seq, prev;
- rtx delay_list = 0;
- int i;
-
- /* We first must find the insn containing the SEQUENCE with INSN in its
- delay slot. Do this by finding an insn, TRIAL, where
- PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */
-
- for (trial = insn;
- PREV_INSN (NEXT_INSN (trial)) == trial;
- trial = NEXT_INSN (trial))
- ;
-
- seq_insn = PREV_INSN (NEXT_INSN (trial));
- seq = PATTERN (seq_insn);
-
- /* Create a delay list consisting of all the insns other than the one
- we are deleting (unless we were the only one). */
- if (XVECLEN (seq, 0) > 2)
- for (i = 1; i < XVECLEN (seq, 0); i++)
- if (XVECEXP (seq, 0, i) != insn)
- delay_list = add_to_delay_list (XVECEXP (seq, 0, i), delay_list);
-
- /* Delete the old SEQUENCE, re-emit the insn that used to have the delay
- list, and rebuild the delay list if non-empty. */
- prev = PREV_INSN (seq_insn);
- trial = XVECEXP (seq, 0, 0);
- delete_insn (seq_insn);
- add_insn_after (trial, prev);
-
- if (GET_CODE (trial) == JUMP_INSN
- && (simplejump_p (trial) || GET_CODE (PATTERN (trial)) == RETURN))
- emit_barrier_after (trial);
-
- /* If there are any delay insns, remit them. Otherwise clear the
- annul flag. */
- if (delay_list)
- trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0) - 2);
- else
- INSN_ANNULLED_BRANCH_P (trial) = 0;
-
- INSN_FROM_TARGET_P (insn) = 0;
-
- /* Show we need to fill this insn again. */
- obstack_ptr_grow (&unfilled_slots_obstack, trial);
-
- return trial;
-}
-
-/* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down
- the insn that sets CC0 for it and delete it too. */
-
-static void
-delete_scheduled_jump (insn)
- rtx insn;
-{
- /* Delete the insn that sets cc0 for us. On machines without cc0, we could
- delete the insn that sets the condition code, but it is hard to find it.
- Since this case is rare anyway, don't bother trying; there would likely
- be other insns that became dead anyway, which we wouldn't know to
- delete. */
-
-#ifdef HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, insn))
- {
- rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
-
- /* If a reg-note was found, it points to an insn to set CC0. This
- insn is in the delay list of some other insn. So delete it from
- the delay list it was in. */
- if (note)
- {
- if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)
- && sets_cc0_p (PATTERN (XEXP (note, 0))) == 1)
- delete_from_delay_slot (XEXP (note, 0));
- }
- else
- {
- /* The insn setting CC0 is our previous insn, but it may be in
- a delay slot. It will be the last insn in the delay slot, if
- it is. */
- rtx trial = previous_insn (insn);
- if (GET_CODE (trial) == NOTE)
- trial = prev_nonnote_insn (trial);
- if (sets_cc0_p (PATTERN (trial)) != 1
- || FIND_REG_INC_NOTE (trial, 0))
- return;
- if (PREV_INSN (NEXT_INSN (trial)) == trial)
- delete_insn (trial);
- else
- delete_from_delay_slot (trial);
- }
- }
-#endif
-
- delete_insn (insn);
-}
-
-/* Counters for delay-slot filling. */
-
-#define NUM_REORG_FUNCTIONS 2
-#define MAX_DELAY_HISTOGRAM 3
-#define MAX_REORG_PASSES 2
-
-static int num_insns_needing_delays[NUM_REORG_FUNCTIONS][MAX_REORG_PASSES];
-
-static int num_filled_delays[NUM_REORG_FUNCTIONS][MAX_DELAY_HISTOGRAM+1][MAX_REORG_PASSES];
-
-static int reorg_pass_number;
-
-static void
-note_delay_statistics (slots_filled, index)
- int slots_filled, index;
-{
- num_insns_needing_delays[index][reorg_pass_number]++;
- if (slots_filled > MAX_DELAY_HISTOGRAM)
- slots_filled = MAX_DELAY_HISTOGRAM;
- num_filled_delays[index][slots_filled][reorg_pass_number]++;
-}
-
-#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
-
-/* Optimize the following cases:
-
- 1. When a conditional branch skips over only one instruction,
- use an annulling branch and put that insn in the delay slot.
- Use either a branch that annuls when the condition if true or
- invert the test with a branch that annuls when the condition is
- false. This saves insns, since otherwise we must copy an insn
- from the L1 target.
-
- (orig) (skip) (otherwise)
- Bcc.n L1 Bcc',a L1 Bcc,a L1'
- insn insn insn2
- L1: L1: L1:
- insn2 insn2 insn2
- insn3 insn3 L1':
- insn3
-
- 2. When a conditional branch skips over only one instruction,
- and after that, it unconditionally branches somewhere else,
- perform the similar optimization. This saves executing the
- second branch in the case where the inverted condition is true.
-
- Bcc.n L1 Bcc',a L2
- insn insn
- L1: L1:
- Bra L2 Bra L2
-
- INSN is a JUMP_INSN.
-
- This should be expanded to skip over N insns, where N is the number
- of delay slots required. */
-
-static rtx
-optimize_skip (insn)
- register rtx insn;
-{
- register rtx trial = next_nonnote_insn (insn);
- rtx next_trial = next_active_insn (trial);
- rtx delay_list = 0;
- rtx target_label;
- int flags;
-
- flags = get_jump_flags (insn, JUMP_LABEL (insn));
-
- if (trial == 0
- || GET_CODE (trial) != INSN
- || GET_CODE (PATTERN (trial)) == SEQUENCE
- || recog_memoized (trial) < 0
- || (! eligible_for_annul_false (insn, 0, trial, flags)
- && ! eligible_for_annul_true (insn, 0, trial, flags)))
- return 0;
-
- /* There are two cases where we are just executing one insn (we assume
- here that a branch requires only one insn; this should be generalized
- at some point): Where the branch goes around a single insn or where
- we have one insn followed by a branch to the same label we branch to.
- In both of these cases, inverting the jump and annulling the delay
- slot give the same effect in fewer insns. */
- if ((next_trial == next_active_insn (JUMP_LABEL (insn)))
- || (next_trial != 0
- && GET_CODE (next_trial) == JUMP_INSN
- && JUMP_LABEL (insn) == JUMP_LABEL (next_trial)
- && (simplejump_p (next_trial)
- || GET_CODE (PATTERN (next_trial)) == RETURN)))
- {
- if (eligible_for_annul_false (insn, 0, trial, flags))
- {
- if (invert_jump (insn, JUMP_LABEL (insn)))
- INSN_FROM_TARGET_P (trial) = 1;
- else if (! eligible_for_annul_true (insn, 0, trial, flags))
- return 0;
- }
-
- delay_list = add_to_delay_list (trial, NULL_RTX);
- next_trial = next_active_insn (trial);
- update_block (trial, trial);
- delete_insn (trial);
-
- /* Also, if we are targeting an unconditional
- branch, thread our jump to the target of that branch. Don't
- change this into a RETURN here, because it may not accept what
- we have in the delay slot. We'll fix this up later. */
- if (next_trial && GET_CODE (next_trial) == JUMP_INSN
- && (simplejump_p (next_trial)
- || GET_CODE (PATTERN (next_trial)) == RETURN))
- {
- target_label = JUMP_LABEL (next_trial);
- if (target_label == 0)
- target_label = find_end_label ();
-
- /* Recompute the flags based on TARGET_LABEL since threading
- the jump to TARGET_LABEL may change the direction of the
- jump (which may change the circumstances in which the
- delay slot is nullified). */
- flags = get_jump_flags (insn, target_label);
- if (eligible_for_annul_true (insn, 0, trial, flags))
- reorg_redirect_jump (insn, target_label);
- }
-
- INSN_ANNULLED_BRANCH_P (insn) = 1;
- }
-
- return delay_list;
-}
-#endif
-
-
-/* Encode and return branch direction and prediction information for
- INSN assuming it will jump to LABEL.
-
- Non conditional branches return no direction information and
- are predicted as very likely taken. */
-
-static int
-get_jump_flags (insn, label)
- rtx insn, label;
-{
- int flags;
-
- /* get_jump_flags can be passed any insn with delay slots, these may
- be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch
- direction information, and only if they are conditional jumps.
-
- If LABEL is zero, then there is no way to determine the branch
- direction. */
- if (GET_CODE (insn) == JUMP_INSN
- && (condjump_p (insn) || condjump_in_parallel_p (insn))
- && INSN_UID (insn) <= max_uid
- && label != 0
- && INSN_UID (label) <= max_uid)
- flags
- = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)])
- ? ATTR_FLAG_forward : ATTR_FLAG_backward;
- /* No valid direction information. */
- else
- flags = 0;
-
- /* If insn is a conditional branch call mostly_true_jump to get
- determine the branch prediction.
-
- Non conditional branches are predicted as very likely taken. */
- if (GET_CODE (insn) == JUMP_INSN
- && (condjump_p (insn) || condjump_in_parallel_p (insn)))
- {
- int prediction;
-
- prediction = mostly_true_jump (insn, get_branch_condition (insn, label));
- switch (prediction)
- {
- case 2:
- flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
- break;
- case 1:
- flags |= ATTR_FLAG_likely;
- break;
- case 0:
- flags |= ATTR_FLAG_unlikely;
- break;
- case -1:
- flags |= (ATTR_FLAG_very_unlikely | ATTR_FLAG_unlikely);
- break;
-
- default:
- abort();
- }
- }
- else
- flags |= (ATTR_FLAG_very_likely | ATTR_FLAG_likely);
-
- return flags;
-}
-
-/* Return 1 if INSN is a destination that will be branched to rarely (the
- return point of a function); return 2 if DEST will be branched to very
- rarely (a call to a function that doesn't return). Otherwise,
- return 0. */
-
-static int
-rare_destination (insn)
- rtx insn;
-{
- int jump_count = 0;
- rtx next;
-
- for (; insn; insn = next)
- {
- if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
- insn = XVECEXP (PATTERN (insn), 0, 0);
-
- next = NEXT_INSN (insn);
-
- switch (GET_CODE (insn))
- {
- case CODE_LABEL:
- return 0;
- case BARRIER:
- /* A BARRIER can either be after a JUMP_INSN or a CALL_INSN. We
- don't scan past JUMP_INSNs, so any barrier we find here must
- have been after a CALL_INSN and hence mean the call doesn't
- return. */
- return 2;
- case JUMP_INSN:
- if (GET_CODE (PATTERN (insn)) == RETURN)
- return 1;
- else if (simplejump_p (insn)
- && jump_count++ < 10)
- next = JUMP_LABEL (insn);
- else
- return 0;
-
- default:
- break;
- }
- }
-
- /* If we got here it means we hit the end of the function. So this
- is an unlikely destination. */
-
- return 1;
-}
-
-/* Return truth value of the statement that this branch
- is mostly taken. If we think that the branch is extremely likely
- to be taken, we return 2. If the branch is slightly more likely to be
- taken, return 1. If the branch is slightly less likely to be taken,
- return 0 and if the branch is highly unlikely to be taken, return -1.
-
- CONDITION, if non-zero, is the condition that JUMP_INSN is testing. */
-
-static int
-mostly_true_jump (jump_insn, condition)
- rtx jump_insn, condition;
-{
- rtx target_label = JUMP_LABEL (jump_insn);
- rtx insn;
- int rare_dest = rare_destination (target_label);
- int rare_fallthrough = rare_destination (NEXT_INSN (jump_insn));
-
- /* CYGNUS LOCAL -- branch prediction */
- int expected = condjump_expect_p (jump_insn);
-
- if (expected > 0)
- return 2;
- else if (expected < 0)
- return -1;
- /* END CYGNUS LOCAL -- branch prediction */
-
- /* If this is a branch outside a loop, it is highly unlikely. */
- if (GET_CODE (PATTERN (jump_insn)) == SET
- && GET_CODE (SET_SRC (PATTERN (jump_insn))) == IF_THEN_ELSE
- && ((GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 1)) == LABEL_REF
- && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 1)))
- || (GET_CODE (XEXP (SET_SRC (PATTERN (jump_insn)), 2)) == LABEL_REF
- && LABEL_OUTSIDE_LOOP_P (XEXP (SET_SRC (PATTERN (jump_insn)), 2)))))
- return -1;
-
- if (target_label)
- {
- /* If this is the test of a loop, it is very likely true. We scan
- backwards from the target label. If we find a NOTE_INSN_LOOP_BEG
- before the next real insn, we assume the branch is to the top of
- the loop. */
- for (insn = PREV_INSN (target_label);
- insn && GET_CODE (insn) == NOTE;
- insn = PREV_INSN (insn))
- if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
- return 2;
-
- /* If this is a jump to the test of a loop, it is likely true. We scan
- forwards from the target label. If we find a NOTE_INSN_LOOP_VTOP
- before the next real insn, we assume the branch is to the loop branch
- test. */
- for (insn = NEXT_INSN (target_label);
- insn && GET_CODE (insn) == NOTE;
- insn = PREV_INSN (insn))
- if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_VTOP)
- return 1;
- }
-
- /* Look at the relative rarities of the fallthrough and destination. If
- they differ, we can predict the branch that way. */
-
- switch (rare_fallthrough - rare_dest)
- {
- case -2:
- return -1;
- case -1:
- return 0;
- case 0:
- break;
- case 1:
- return 1;
- case 2:
- return 2;
- }
-
- /* If we couldn't figure out what this jump was, assume it won't be
- taken. This should be rare. */
- if (condition == 0)
- return 0;
-
- /* EQ tests are usually false and NE tests are usually true. Also,
- most quantities are positive, so we can make the appropriate guesses
- about signed comparisons against zero. */
- switch (GET_CODE (condition))
- {
- case CONST_INT:
- /* Unconditional branch. */
- return 1;
- case EQ:
- return 0;
- case NE:
- return 1;
- case LE:
- case LT:
- if (XEXP (condition, 1) == const0_rtx)
- return 0;
- break;
- case GE:
- case GT:
- if (XEXP (condition, 1) == const0_rtx)
- return 1;
- break;
-
- default:
- break;
- }
-
- /* Predict backward branches usually take, forward branches usually not. If
- we don't know whether this is forward or backward, assume the branch
- will be taken, since most are. */
- return (target_label == 0 || INSN_UID (jump_insn) > max_uid
- || INSN_UID (target_label) > max_uid
- || (uid_to_ruid[INSN_UID (jump_insn)]
- > uid_to_ruid[INSN_UID (target_label)]));;
-}
-
-/* Return the condition under which INSN will branch to TARGET. If TARGET
- is zero, return the condition under which INSN will return. If INSN is
- an unconditional branch, return const_true_rtx. If INSN isn't a simple
- type of jump, or it doesn't go to TARGET, return 0. */
-
-static rtx
-get_branch_condition (insn, target)
- rtx insn;
- rtx target;
-{
- rtx pat = PATTERN (insn);
- rtx src;
-
- if (condjump_in_parallel_p (insn))
- pat = XVECEXP (pat, 0, 0);
-
- if (GET_CODE (pat) == RETURN)
- return target == 0 ? const_true_rtx : 0;
-
- else if (GET_CODE (pat) != SET || SET_DEST (pat) != pc_rtx)
- return 0;
-
- src = SET_SRC (pat);
- if (GET_CODE (src) == LABEL_REF && XEXP (src, 0) == target)
- return const_true_rtx;
-
- else if (GET_CODE (src) == IF_THEN_ELSE
- && ((target == 0 && GET_CODE (XEXP (src, 1)) == RETURN)
- || (GET_CODE (XEXP (src, 1)) == LABEL_REF
- && XEXP (XEXP (src, 1), 0) == target))
- && XEXP (src, 2) == pc_rtx)
- return XEXP (src, 0);
-
- else if (GET_CODE (src) == IF_THEN_ELSE
- && ((target == 0 && GET_CODE (XEXP (src, 2)) == RETURN)
- || (GET_CODE (XEXP (src, 2)) == LABEL_REF
- && XEXP (XEXP (src, 2), 0) == target))
- && XEXP (src, 1) == pc_rtx)
- return gen_rtx_fmt_ee (reverse_condition (GET_CODE (XEXP (src, 0))),
- GET_MODE (XEXP (src, 0)),
- XEXP (XEXP (src, 0), 0), XEXP (XEXP (src, 0), 1));
-
- return 0;
-}
-
-/* Return non-zero if CONDITION is more strict than the condition of
- INSN, i.e., if INSN will always branch if CONDITION is true. */
-
-static int
-condition_dominates_p (condition, insn)
- rtx condition;
- rtx insn;
-{
- rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn));
- enum rtx_code code = GET_CODE (condition);
- enum rtx_code other_code;
-
- if (rtx_equal_p (condition, other_condition)
- || other_condition == const_true_rtx)
- return 1;
-
- else if (condition == const_true_rtx || other_condition == 0)
- return 0;
-
- other_code = GET_CODE (other_condition);
- if (GET_RTX_LENGTH (code) != 2 || GET_RTX_LENGTH (other_code) != 2
- || ! rtx_equal_p (XEXP (condition, 0), XEXP (other_condition, 0))
- || ! rtx_equal_p (XEXP (condition, 1), XEXP (other_condition, 1)))
- return 0;
-
- return comparison_dominates_p (code, other_code);
-}
-
-/* Return non-zero if redirecting JUMP to NEWLABEL does not invalidate
- any insns already in the delay slot of JUMP. */
-
-static int
-redirect_with_delay_slots_safe_p (jump, newlabel, seq)
- rtx jump, newlabel, seq;
-{
- int flags, i;
- rtx pat = PATTERN (seq);
-
- /* Make sure all the delay slots of this jump would still
- be valid after threading the jump. If they are still
- valid, then return non-zero. */
-
- flags = get_jump_flags (jump, newlabel);
- for (i = 1; i < XVECLEN (pat, 0); i++)
- if (! (
-#ifdef ANNUL_IFFALSE_SLOTS
- (INSN_ANNULLED_BRANCH_P (jump)
- && INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
- ? eligible_for_annul_false (jump, i - 1,
- XVECEXP (pat, 0, i), flags) :
-#endif
-#ifdef ANNUL_IFTRUE_SLOTS
- (INSN_ANNULLED_BRANCH_P (jump)
- && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)))
- ? eligible_for_annul_true (jump, i - 1,
- XVECEXP (pat, 0, i), flags) :
-#endif
- eligible_for_delay (jump, i -1, XVECEXP (pat, 0, i), flags)))
- break;
-
- return (i == XVECLEN (pat, 0));
-}
-
-/* Return non-zero if redirecting JUMP to NEWLABEL does not invalidate
- any insns we wish to place in the delay slot of JUMP. */
-
-static int
-redirect_with_delay_list_safe_p (jump, newlabel, delay_list)
- rtx jump, newlabel, delay_list;
-{
- int flags, i;
- rtx li;
-
- /* Make sure all the insns in DELAY_LIST would still be
- valid after threading the jump. If they are still
- valid, then return non-zero. */
-
- flags = get_jump_flags (jump, newlabel);
- for (li = delay_list, i = 0; li; li = XEXP (li, 1), i++)
- if (! (
-#ifdef ANNUL_IFFALSE_SLOTS
- (INSN_ANNULLED_BRANCH_P (jump)
- && INSN_FROM_TARGET_P (XEXP (li, 0)))
- ? eligible_for_annul_false (jump, i, XEXP (li, 0), flags) :
-#endif
-#ifdef ANNUL_IFTRUE_SLOTS
- (INSN_ANNULLED_BRANCH_P (jump)
- && ! INSN_FROM_TARGET_P (XEXP (li, 0)))
- ? eligible_for_annul_true (jump, i, XEXP (li, 0), flags) :
-#endif
- eligible_for_delay (jump, i, XEXP (li, 0), flags)))
- break;
-
- return (li == NULL);
-}
-
-/* DELAY_LIST is a list of insns that have already been placed into delay
- slots. See if all of them have the same annulling status as ANNUL_TRUE_P.
- If not, return 0; otherwise return 1. */
-
-static int
-check_annul_list_true_false (annul_true_p, delay_list)
- int annul_true_p;
- rtx delay_list;
-{
- rtx temp;
-
- if (delay_list)
- {
- for (temp = delay_list; temp; temp = XEXP (temp, 1))
- {
- rtx trial = XEXP (temp, 0);
-
- if ((annul_true_p && INSN_FROM_TARGET_P (trial))
- || (!annul_true_p && !INSN_FROM_TARGET_P (trial)))
- return 0;
- }
- }
-
- return 1;
-}
-
-
-/* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that
- the condition tested by INSN is CONDITION and the resources shown in
- OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns
- from SEQ's delay list, in addition to whatever insns it may execute
- (in DELAY_LIST). SETS and NEEDED are denote resources already set and
- needed while searching for delay slot insns. Return the concatenated
- delay list if possible, otherwise, return 0.
-
- SLOTS_TO_FILL is the total number of slots required by INSN, and
- PSLOTS_FILLED points to the number filled so far (also the number of
- insns in DELAY_LIST). It is updated with the number that have been
- filled from the SEQUENCE, if any.
-
- PANNUL_P points to a non-zero value if we already know that we need
- to annul INSN. If this routine determines that annulling is needed,
- it may set that value non-zero.
-
- PNEW_THREAD points to a location that is to receive the place at which
- execution should continue. */
-
-static rtx
-steal_delay_list_from_target (insn, condition, seq, delay_list,
- sets, needed, other_needed,
- slots_to_fill, pslots_filled, pannul_p,
- pnew_thread)
- rtx insn, condition;
- rtx seq;
- rtx delay_list;
- struct resources *sets, *needed, *other_needed;
- int slots_to_fill;
- int *pslots_filled;
- int *pannul_p;
- rtx *pnew_thread;
-{
- rtx temp;
- int slots_remaining = slots_to_fill - *pslots_filled;
- int total_slots_filled = *pslots_filled;
- rtx new_delay_list = 0;
- int must_annul = *pannul_p;
- int used_annul = 0;
- int i;
- struct resources cc_set;
-
- /* We can't do anything if there are more delay slots in SEQ than we
- can handle, or if we don't know that it will be a taken branch.
- We know that it will be a taken branch if it is either an unconditional
- branch or a conditional branch with a stricter branch condition.
-
- Also, exit if the branch has more than one set, since then it is computing
- other results that can't be ignored, e.g. the HPPA mov&branch instruction.
- ??? It may be possible to move other sets into INSN in addition to
- moving the instructions in the delay slots.
-
- We can not steal the delay list if one of the instructions in the
- current delay_list modifies the condition codes and the jump in the
- sequence is a conditional jump. We can not do this because we can
- not change the direction of the jump because the condition codes
- will effect the direction of the jump in the sequence. */
-
- CLEAR_RESOURCE (&cc_set);
- for (temp = delay_list; temp; temp = XEXP (temp, 1))
- {
- rtx trial = XEXP (temp, 0);
-
- mark_set_resources (trial, &cc_set, 0, 1);
- if (insn_references_resource_p (XVECEXP (seq , 0, 0), &cc_set, 0))
- return delay_list;
- }
-
- if (XVECLEN (seq, 0) - 1 > slots_remaining
- || ! condition_dominates_p (condition, XVECEXP (seq, 0, 0))
- || ! single_set (XVECEXP (seq, 0, 0)))
- return delay_list;
-
- for (i = 1; i < XVECLEN (seq, 0); i++)
- {
- rtx trial = XVECEXP (seq, 0, i);
- int flags;
-
- if (insn_references_resource_p (trial, sets, 0)
- || insn_sets_resource_p (trial, needed, 0)
- || insn_sets_resource_p (trial, sets, 0)
-#ifdef HAVE_cc0
- /* If TRIAL sets CC0, we can't copy it, so we can't steal this
- delay list. */
- || find_reg_note (trial, REG_CC_USER, NULL_RTX)
-#endif
- /* If TRIAL is from the fallthrough code of an annulled branch insn
- in SEQ, we cannot use it. */
- || (INSN_ANNULLED_BRANCH_P (XVECEXP (seq, 0, 0))
- && ! INSN_FROM_TARGET_P (trial)))
- return delay_list;
-
- /* If this insn was already done (usually in a previous delay slot),
- pretend we put it in our delay slot. */
- if (redundant_insn (trial, insn, new_delay_list))
- continue;
-
- /* We will end up re-vectoring this branch, so compute flags
- based on jumping to the new label. */
- flags = get_jump_flags (insn, JUMP_LABEL (XVECEXP (seq, 0, 0)));
-
- if (! must_annul
- && ((condition == const_true_rtx
- || (! insn_sets_resource_p (trial, other_needed, 0)
- && ! may_trap_p (PATTERN (trial)))))
- ? eligible_for_delay (insn, total_slots_filled, trial, flags)
- : (must_annul || (delay_list == NULL && new_delay_list == NULL))
- && (must_annul = 1,
- check_annul_list_true_false (0, delay_list)
- && check_annul_list_true_false (0, new_delay_list)
- && eligible_for_annul_false (insn, total_slots_filled,
- trial, flags)))
- {
- if (must_annul)
- used_annul = 1;
- temp = copy_rtx (trial);
- INSN_FROM_TARGET_P (temp) = 1;
- new_delay_list = add_to_delay_list (temp, new_delay_list);
- total_slots_filled++;
-
- if (--slots_remaining == 0)
- break;
- }
- else
- return delay_list;
- }
-
- /* Show the place to which we will be branching. */
- *pnew_thread = next_active_insn (JUMP_LABEL (XVECEXP (seq, 0, 0)));
-
- /* Add any new insns to the delay list and update the count of the
- number of slots filled. */
- *pslots_filled = total_slots_filled;
- if (used_annul)
- *pannul_p = 1;
-
- if (delay_list == 0)
- return new_delay_list;
-
- for (temp = new_delay_list; temp; temp = XEXP (temp, 1))
- delay_list = add_to_delay_list (XEXP (temp, 0), delay_list);
-
- return delay_list;
-}
-
-/* Similar to steal_delay_list_from_target except that SEQ is on the
- fallthrough path of INSN. Here we only do something if the delay insn
- of SEQ is an unconditional branch. In that case we steal its delay slot
- for INSN since unconditional branches are much easier to fill. */
-
-static rtx
-steal_delay_list_from_fallthrough (insn, condition, seq,
- delay_list, sets, needed, other_needed,
- slots_to_fill, pslots_filled, pannul_p)
- rtx insn, condition;
- rtx seq;
- rtx delay_list;
- struct resources *sets, *needed, *other_needed;
- int slots_to_fill;
- int *pslots_filled;
- int *pannul_p;
-{
- int i;
- int flags;
- int must_annul = *pannul_p;
- int used_annul = 0;
-
- flags = get_jump_flags (insn, JUMP_LABEL (insn));
-
- /* We can't do anything if SEQ's delay insn isn't an
- unconditional branch. */
-
- if (! simplejump_p (XVECEXP (seq, 0, 0))
- && GET_CODE (PATTERN (XVECEXP (seq, 0, 0))) != RETURN)
- return delay_list;
-
- for (i = 1; i < XVECLEN (seq, 0); i++)
- {
- rtx trial = XVECEXP (seq, 0, i);
-
- /* If TRIAL sets CC0, stealing it will move it too far from the use
- of CC0. */
- if (insn_references_resource_p (trial, sets, 0)
- || insn_sets_resource_p (trial, needed, 0)
- || insn_sets_resource_p (trial, sets, 0)
-#ifdef HAVE_cc0
- || sets_cc0_p (PATTERN (trial))
-#endif
- )
-
- break;
-
- /* If this insn was already done, we don't need it. */
- if (redundant_insn (trial, insn, delay_list))
- {
- delete_from_delay_slot (trial);
- continue;
- }
-
- if (! must_annul
- && ((condition == const_true_rtx
- || (! insn_sets_resource_p (trial, other_needed, 0)
- && ! may_trap_p (PATTERN (trial)))))
- ? eligible_for_delay (insn, *pslots_filled, trial, flags)
- : (must_annul || delay_list == NULL) && (must_annul = 1,
- check_annul_list_true_false (1, delay_list)
- && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
- {
- if (must_annul)
- used_annul = 1;
- delete_from_delay_slot (trial);
- delay_list = add_to_delay_list (trial, delay_list);
-
- if (++(*pslots_filled) == slots_to_fill)
- break;
- }
- else
- break;
- }
-
- if (used_annul)
- *pannul_p = 1;
- return delay_list;
-}
-
-
-/* Try merging insns starting at THREAD which match exactly the insns in
- INSN's delay list.
-
- If all insns were matched and the insn was previously annulling, the
- annul bit will be cleared.
-
- For each insn that is merged, if the branch is or will be non-annulling,
- we delete the merged insn. */
-
-static void
-try_merge_delay_insns (insn, thread)
- rtx insn, thread;
-{
- rtx trial, next_trial;
- rtx delay_insn = XVECEXP (PATTERN (insn), 0, 0);
- int annul_p = INSN_ANNULLED_BRANCH_P (delay_insn);
- int slot_number = 1;
- int num_slots = XVECLEN (PATTERN (insn), 0);
- rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
- struct resources set, needed;
- rtx merged_insns = 0;
- int i;
- int flags;
-
- flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn));
-
- CLEAR_RESOURCE (&needed);
- CLEAR_RESOURCE (&set);
-
- /* If this is not an annulling branch, take into account anything needed in
- INSN's delay slot. This prevents two increments from being incorrectly
- folded into one. If we are annulling, this would be the correct
- thing to do. (The alternative, looking at things set in NEXT_TO_MATCH
- will essentially disable this optimization. This method is somewhat of
- a kludge, but I don't see a better way.) */
- if (! annul_p)
- for (i = 1 ; i < num_slots ; i++)
- if (XVECEXP (PATTERN (insn), 0, i))
- mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i), &needed, 1);
-
- for (trial = thread; !stop_search_p (trial, 1); trial = next_trial)
- {
- rtx pat = PATTERN (trial);
- rtx oldtrial = trial;
-
- next_trial = next_nonnote_insn (trial);
-
- /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */
- if (GET_CODE (trial) == INSN
- && (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER))
- continue;
-
- if (GET_CODE (next_to_match) == GET_CODE (trial)
-#ifdef HAVE_cc0
- /* We can't share an insn that sets cc0. */
- && ! sets_cc0_p (pat)
-#endif
- && ! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
- && (trial = try_split (pat, trial, 0)) != 0
- /* Update next_trial, in case try_split succeeded. */
- && (next_trial = next_nonnote_insn (trial))
- /* Likewise THREAD. */
- && (thread = oldtrial == thread ? trial : thread)
- && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial))
- /* Have to test this condition if annul condition is different
- from (and less restrictive than) non-annulling one. */
- && eligible_for_delay (delay_insn, slot_number - 1, trial, flags))
- {
-
- if (! annul_p)
- {
- update_block (trial, thread);
- if (trial == thread)
- thread = next_active_insn (thread);
-
- delete_insn (trial);
- INSN_FROM_TARGET_P (next_to_match) = 0;
- }
- else
- merged_insns = gen_rtx_INSN_LIST (VOIDmode, trial, merged_insns);
-
- if (++slot_number == num_slots)
- break;
-
- next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
- }
-
- mark_set_resources (trial, &set, 0, 1);
- mark_referenced_resources (trial, &needed, 1);
- }
-
- /* See if we stopped on a filled insn. If we did, try to see if its
- delay slots match. */
- if (slot_number != num_slots
- && trial && GET_CODE (trial) == INSN
- && GET_CODE (PATTERN (trial)) == SEQUENCE
- && ! INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0)))
- {
- rtx pat = PATTERN (trial);
- rtx filled_insn = XVECEXP (pat, 0, 0);
-
- /* Account for resources set/needed by the filled insn. */
- mark_set_resources (filled_insn, &set, 0, 1);
- mark_referenced_resources (filled_insn, &needed, 1);
-
- for (i = 1; i < XVECLEN (pat, 0); i++)
- {
- rtx dtrial = XVECEXP (pat, 0, i);
-
- if (! insn_references_resource_p (dtrial, &set, 1)
- && ! insn_sets_resource_p (dtrial, &set, 1)
- && ! insn_sets_resource_p (dtrial, &needed, 1)
-#ifdef HAVE_cc0
- && ! sets_cc0_p (PATTERN (dtrial))
-#endif
- && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial))
- && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags))
- {
- if (! annul_p)
- {
- rtx new;
-
- update_block (dtrial, thread);
- new = delete_from_delay_slot (dtrial);
- if (INSN_DELETED_P (thread))
- thread = new;
- INSN_FROM_TARGET_P (next_to_match) = 0;
- }
- else
- merged_insns = gen_rtx_INSN_LIST (SImode, dtrial,
- merged_insns);
-
- if (++slot_number == num_slots)
- break;
-
- next_to_match = XVECEXP (PATTERN (insn), 0, slot_number);
- }
- else
- {
- /* Keep track of the set/referenced resources for the delay
- slots of any trial insns we encounter. */
- mark_set_resources (dtrial, &set, 0, 1);
- mark_referenced_resources (dtrial, &needed, 1);
- }
- }
- }
-
- /* If all insns in the delay slot have been matched and we were previously
- annulling the branch, we need not any more. In that case delete all the
- merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn in
- the delay list so that we know that it isn't only being used at the
- target. */
- if (slot_number == num_slots && annul_p)
- {
- for (; merged_insns; merged_insns = XEXP (merged_insns, 1))
- {
- if (GET_MODE (merged_insns) == SImode)
- {
- rtx new;
-
- update_block (XEXP (merged_insns, 0), thread);
- new = delete_from_delay_slot (XEXP (merged_insns, 0));
- if (INSN_DELETED_P (thread))
- thread = new;
- }
- else
- {
- update_block (XEXP (merged_insns, 0), thread);
- delete_insn (XEXP (merged_insns, 0));
- }
- }
-
- INSN_ANNULLED_BRANCH_P (delay_insn) = 0;
-
- for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
- INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i)) = 0;
- }
-}
-
-/* See if INSN is redundant with an insn in front of TARGET. Often this
- is called when INSN is a candidate for a delay slot of TARGET.
- DELAY_LIST are insns that will be placed in delay slots of TARGET in front
- of INSN. Often INSN will be redundant with an insn in a delay slot of
- some previous insn. This happens when we have a series of branches to the
- same label; in that case the first insn at the target might want to go
- into each of the delay slots.
-
- If we are not careful, this routine can take up a significant fraction
- of the total compilation time (4%), but only wins rarely. Hence we
- speed this routine up by making two passes. The first pass goes back
- until it hits a label and sees if it find an insn with an identical
- pattern. Only in this (relatively rare) event does it check for
- data conflicts.
-
- We do not split insns we encounter. This could cause us not to find a
- redundant insn, but the cost of splitting seems greater than the possible
- gain in rare cases. */
-
-static rtx
-redundant_insn (insn, target, delay_list)
- rtx insn;
- rtx target;
- rtx delay_list;
-{
- rtx target_main = target;
- rtx ipat = PATTERN (insn);
- rtx trial, pat;
- struct resources needed, set;
- int i;
-
- /* If INSN has any REG_UNUSED notes, it can't match anything since we
- are allowed to not actually assign to such a register. */
- if (find_reg_note (insn, REG_UNUSED, NULL_RTX) != 0)
- return 0;
-
- /* Scan backwards looking for a match. */
- for (trial = PREV_INSN (target); trial; trial = PREV_INSN (trial))
- {
- if (GET_CODE (trial) == CODE_LABEL)
- return 0;
-
- if (GET_RTX_CLASS (GET_CODE (trial)) != 'i')
- continue;
-
- pat = PATTERN (trial);
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
-
- if (GET_CODE (pat) == SEQUENCE)
- {
- /* Stop for a CALL and its delay slots because it is difficult to
- track its resource needs correctly. */
- if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN)
- return 0;
-
- /* Stop for an INSN or JUMP_INSN with delayed effects and its delay
- slots because it is difficult to track its resource needs
- correctly. */
-
-#ifdef INSN_SETS_ARE_DELAYED
- if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
- return 0;
-#endif
-
-#ifdef INSN_REFERENCES_ARE_DELAYED
- if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
- return 0;
-#endif
-
- /* See if any of the insns in the delay slot match, updating
- resource requirements as we go. */
- for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
- if (GET_CODE (XVECEXP (pat, 0, i)) == GET_CODE (insn)
- && rtx_equal_p (PATTERN (XVECEXP (pat, 0, i)), ipat)
- && ! find_reg_note (XVECEXP (pat, 0, i), REG_UNUSED, NULL_RTX))
- break;
-
- /* If found a match, exit this loop early. */
- if (i > 0)
- break;
- }
-
- else if (GET_CODE (trial) == GET_CODE (insn) && rtx_equal_p (pat, ipat)
- && ! find_reg_note (trial, REG_UNUSED, NULL_RTX))
- break;
- }
-
- /* If we didn't find an insn that matches, return 0. */
- if (trial == 0)
- return 0;
-
- /* See what resources this insn sets and needs. If they overlap, or
- if this insn references CC0, it can't be redundant. */
-
- CLEAR_RESOURCE (&needed);
- CLEAR_RESOURCE (&set);
- mark_set_resources (insn, &set, 0, 1);
- mark_referenced_resources (insn, &needed, 1);
-
- /* If TARGET is a SEQUENCE, get the main insn. */
- if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE)
- target_main = XVECEXP (PATTERN (target), 0, 0);
-
- if (resource_conflicts_p (&needed, &set)
-#ifdef HAVE_cc0
- || reg_mentioned_p (cc0_rtx, ipat)
-#endif
- /* The insn requiring the delay may not set anything needed or set by
- INSN. */
- || insn_sets_resource_p (target_main, &needed, 1)
- || insn_sets_resource_p (target_main, &set, 1))
- return 0;
-
- /* Insns we pass may not set either NEEDED or SET, so merge them for
- simpler tests. */
- needed.memory |= set.memory;
- needed.unch_memory |= set.unch_memory;
- IOR_HARD_REG_SET (needed.regs, set.regs);
-
- /* This insn isn't redundant if it conflicts with an insn that either is
- or will be in a delay slot of TARGET. */
-
- while (delay_list)
- {
- if (insn_sets_resource_p (XEXP (delay_list, 0), &needed, 1))
- return 0;
- delay_list = XEXP (delay_list, 1);
- }
-
- if (GET_CODE (target) == INSN && GET_CODE (PATTERN (target)) == SEQUENCE)
- for (i = 1; i < XVECLEN (PATTERN (target), 0); i++)
- if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i), &needed, 1))
- return 0;
-
- /* Scan backwards until we reach a label or an insn that uses something
- INSN sets or sets something insn uses or sets. */
-
- for (trial = PREV_INSN (target);
- trial && GET_CODE (trial) != CODE_LABEL;
- trial = PREV_INSN (trial))
- {
- if (GET_CODE (trial) != INSN && GET_CODE (trial) != CALL_INSN
- && GET_CODE (trial) != JUMP_INSN)
- continue;
-
- pat = PATTERN (trial);
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
-
- if (GET_CODE (pat) == SEQUENCE)
- {
- /* If this is a CALL_INSN and its delay slots, it is hard to track
- the resource needs properly, so give up. */
- if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL_INSN)
- return 0;
-
- /* If this is an INSN or JUMP_INSN with delayed effects, it
- is hard to track the resource needs properly, so give up. */
-
-#ifdef INSN_SETS_ARE_DELAYED
- if (INSN_SETS_ARE_DELAYED (XVECEXP (pat, 0, 0)))
- return 0;
-#endif
-
-#ifdef INSN_REFERENCES_ARE_DELAYED
- if (INSN_REFERENCES_ARE_DELAYED (XVECEXP (pat, 0, 0)))
- return 0;
-#endif
-
- /* See if any of the insns in the delay slot match, updating
- resource requirements as we go. */
- for (i = XVECLEN (pat, 0) - 1; i > 0; i--)
- {
- rtx candidate = XVECEXP (pat, 0, i);
-
- /* If an insn will be annulled if the branch is false, it isn't
- considered as a possible duplicate insn. */
- if (rtx_equal_p (PATTERN (candidate), ipat)
- && ! (INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
- && INSN_FROM_TARGET_P (candidate)))
- {
- /* Show that this insn will be used in the sequel. */
- INSN_FROM_TARGET_P (candidate) = 0;
- return candidate;
- }
-
- /* Unless this is an annulled insn from the target of a branch,
- we must stop if it sets anything needed or set by INSN. */
- if ((! INSN_ANNULLED_BRANCH_P (XVECEXP (pat, 0, 0))
- || ! INSN_FROM_TARGET_P (candidate))
- && insn_sets_resource_p (candidate, &needed, 1))
- return 0;
- }
-
-
- /* If the insn requiring the delay slot conflicts with INSN, we
- must stop. */
- if (insn_sets_resource_p (XVECEXP (pat, 0, 0), &needed, 1))
- return 0;
- }
- else
- {
- /* See if TRIAL is the same as INSN. */
- pat = PATTERN (trial);
- if (rtx_equal_p (pat, ipat))
- return trial;
-
- /* Can't go any further if TRIAL conflicts with INSN. */
- if (insn_sets_resource_p (trial, &needed, 1))
- return 0;
- }
- }
-
- return 0;
-}
-
-/* Return 1 if THREAD can only be executed in one way. If LABEL is non-zero,
- it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH
- is non-zero, we are allowed to fall into this thread; otherwise, we are
- not.
-
- If LABEL is used more than one or we pass a label other than LABEL before
- finding an active insn, we do not own this thread. */
-
-static int
-own_thread_p (thread, label, allow_fallthrough)
- rtx thread;
- rtx label;
- int allow_fallthrough;
-{
- rtx active_insn;
- rtx insn;
-
- /* We don't own the function end. */
- if (thread == 0)
- return 0;
-
- /* Get the first active insn, or THREAD, if it is an active insn. */
- active_insn = next_active_insn (PREV_INSN (thread));
-
- for (insn = thread; insn != active_insn; insn = NEXT_INSN (insn))
- if (GET_CODE (insn) == CODE_LABEL
- && (insn != label || LABEL_NUSES (insn) != 1))
- return 0;
-
- if (allow_fallthrough)
- return 1;
-
- /* Ensure that we reach a BARRIER before any insn or label. */
- for (insn = prev_nonnote_insn (thread);
- insn == 0 || GET_CODE (insn) != BARRIER;
- insn = prev_nonnote_insn (insn))
- if (insn == 0
- || GET_CODE (insn) == CODE_LABEL
- || (GET_CODE (insn) == INSN
- && GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER))
- return 0;
-
- return 1;
-}
-
-/* Called when INSN is being moved from a location near the target of a jump.
- We leave a marker of the form (use (INSN)) immediately in front
- of WHERE for mark_target_live_regs. These markers will be deleted when
- reorg finishes.
-
- We used to try to update the live status of registers if WHERE is at
- the start of a basic block, but that can't work since we may remove a
- BARRIER in relax_delay_slots. */
-
-static void
-update_block (insn, where)
- rtx insn;
- rtx where;
-{
- /* Ignore if this was in a delay slot and it came from the target of
- a branch. */
- if (INSN_FROM_TARGET_P (insn))
- return;
-
- emit_insn_before (gen_rtx_USE (VOIDmode, insn), where);
-
- /* INSN might be making a value live in a block where it didn't use to
- be. So recompute liveness information for this block. */
-
- incr_ticks_for_insn (insn);
-}
-
-/* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for
- the basic block containing the jump. */
-
-static int
-reorg_redirect_jump (jump, nlabel)
- rtx jump;
- rtx nlabel;
-{
- incr_ticks_for_insn (jump);
- return redirect_jump (jump, nlabel);
-}
-
-/* Called when INSN is being moved forward into a delay slot of DELAYED_INSN.
- We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes
- that reference values used in INSN. If we find one, then we move the
- REG_DEAD note to INSN.
-
- This is needed to handle the case where an later insn (after INSN) has a
- REG_DEAD note for a register used by INSN, and this later insn subsequently
- gets moved before a CODE_LABEL because it is a redundant insn. In this
- case, mark_target_live_regs may be confused into thinking the register
- is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */
-
-static void
-update_reg_dead_notes (insn, delayed_insn)
- rtx insn, delayed_insn;
-{
- rtx p, link, next;
-
- for (p = next_nonnote_insn (insn); p != delayed_insn;
- p = next_nonnote_insn (p))
- for (link = REG_NOTES (p); link; link = next)
- {
- next = XEXP (link, 1);
-
- if (REG_NOTE_KIND (link) != REG_DEAD
- || GET_CODE (XEXP (link, 0)) != REG)
- continue;
-
- if (reg_referenced_p (XEXP (link, 0), PATTERN (insn)))
- {
- /* Move the REG_DEAD note from P to INSN. */
- remove_note (p, link);
- XEXP (link, 1) = REG_NOTES (insn);
- REG_NOTES (insn) = link;
- }
- }
-}
-
-/* Called when an insn redundant with start_insn is deleted. If there
- is a REG_DEAD note for the target of start_insn between start_insn
- and stop_insn, then the REG_DEAD note needs to be deleted since the
- value no longer dies there.
-
- If the REG_DEAD note isn't deleted, then mark_target_live_regs may be
- confused into thinking the register is dead. */
-
-static void
-fix_reg_dead_note (start_insn, stop_insn)
- rtx start_insn, stop_insn;
-{
- rtx p, link, next;
-
- for (p = next_nonnote_insn (start_insn); p != stop_insn;
- p = next_nonnote_insn (p))
- for (link = REG_NOTES (p); link; link = next)
- {
- next = XEXP (link, 1);
-
- if (REG_NOTE_KIND (link) != REG_DEAD
- || GET_CODE (XEXP (link, 0)) != REG)
- continue;
-
- if (reg_set_p (XEXP (link, 0), PATTERN (start_insn)))
- {
- remove_note (p, link);
- return;
- }
- }
-}
-
-/* Delete any REG_UNUSED notes that exist on INSN but not on REDUNDANT_INSN.
-
- This handles the case of udivmodXi4 instructions which optimize their
- output depending on whether any REG_UNUSED notes are present.
- we must make sure that INSN calculates as many results as REDUNDANT_INSN
- does. */
-
-static void
-update_reg_unused_notes (insn, redundant_insn)
- rtx insn, redundant_insn;
-{
- rtx link, next;
-
- for (link = REG_NOTES (insn); link; link = next)
- {
- next = XEXP (link, 1);
-
- if (REG_NOTE_KIND (link) != REG_UNUSED
- || GET_CODE (XEXP (link, 0)) != REG)
- continue;
-
- if (! find_regno_note (redundant_insn, REG_UNUSED,
- REGNO (XEXP (link, 0))))
- remove_note (insn, link);
- }
-}
-
-/* Scan a function looking for insns that need a delay slot and find insns to
- put into the delay slot.
-
- NON_JUMPS_P is non-zero if we are to only try to fill non-jump insns (such
- as calls). We do these first since we don't want jump insns (that are
- easier to fill) to get the only insns that could be used for non-jump insns.
- When it is zero, only try to fill JUMP_INSNs.
-
- When slots are filled in this manner, the insns (including the
- delay_insn) are put together in a SEQUENCE rtx. In this fashion,
- it is possible to tell whether a delay slot has really been filled
- or not. `final' knows how to deal with this, by communicating
- through FINAL_SEQUENCE. */
-
-static void
-fill_simple_delay_slots (non_jumps_p)
- int non_jumps_p;
-{
- register rtx insn, pat, trial, next_trial;
- register int i;
- int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
- struct resources needed, set;
- int slots_to_fill, slots_filled;
- rtx delay_list;
-
- for (i = 0; i < num_unfilled_slots; i++)
- {
- int flags;
- /* Get the next insn to fill. If it has already had any slots assigned,
- we can't do anything with it. Maybe we'll improve this later. */
-
- insn = unfilled_slots_base[i];
- if (insn == 0
- || INSN_DELETED_P (insn)
- || (GET_CODE (insn) == INSN
- && GET_CODE (PATTERN (insn)) == SEQUENCE)
- || (GET_CODE (insn) == JUMP_INSN && non_jumps_p)
- || (GET_CODE (insn) != JUMP_INSN && ! non_jumps_p))
- continue;
-
- if (GET_CODE (insn) == JUMP_INSN)
- flags = get_jump_flags (insn, JUMP_LABEL (insn));
- else
- flags = get_jump_flags (insn, NULL_RTX);
- slots_to_fill = num_delay_slots (insn);
-
- /* Some machine description have defined instructions to have
- delay slots only in certain circumstances which may depend on
- nearby insns (which change due to reorg's actions).
-
- For example, the PA port normally has delay slots for unconditional
- jumps.
-
- However, the PA port claims such jumps do not have a delay slot
- if they are immediate successors of certain CALL_INSNs. This
- allows the port to favor filling the delay slot of the call with
- the unconditional jump. */
- if (slots_to_fill == 0)
- continue;
-
- /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL
- says how many. After initialization, first try optimizing
-
- call _foo call _foo
- nop add %o7,.-L1,%o7
- b,a L1
- nop
-
- If this case applies, the delay slot of the call is filled with
- the unconditional jump. This is done first to avoid having the
- delay slot of the call filled in the backward scan. Also, since
- the unconditional jump is likely to also have a delay slot, that
- insn must exist when it is subsequently scanned.
-
- This is tried on each insn with delay slots as some machines
- have insns which perform calls, but are not represented as
- CALL_INSNs. */
-
- slots_filled = 0;
- delay_list = 0;
-
- if ((trial = next_active_insn (insn))
- && GET_CODE (trial) == JUMP_INSN
- && simplejump_p (trial)
- && eligible_for_delay (insn, slots_filled, trial, flags)
- && no_labels_between_p (insn, trial))
- {
- rtx *tmp;
- slots_filled++;
- delay_list = add_to_delay_list (trial, delay_list);
-
- /* TRIAL may have had its delay slot filled, then unfilled. When
- the delay slot is unfilled, TRIAL is placed back on the unfilled
- slots obstack. Unfortunately, it is placed on the end of the
- obstack, not in its original location. Therefore, we must search
- from entry i + 1 to the end of the unfilled slots obstack to
- try and find TRIAL. */
- tmp = &unfilled_slots_base[i + 1];
- while (*tmp != trial && tmp != unfilled_slots_next)
- tmp++;
-
- /* Remove the unconditional jump from consideration for delay slot
- filling and unthread it. */
- if (*tmp == trial)
- *tmp = 0;
- {
- rtx next = NEXT_INSN (trial);
- rtx prev = PREV_INSN (trial);
- if (prev)
- NEXT_INSN (prev) = next;
- if (next)
- PREV_INSN (next) = prev;
- }
- }
-
- /* Now, scan backwards from the insn to search for a potential
- delay-slot candidate. Stop searching when a label or jump is hit.
-
- For each candidate, if it is to go into the delay slot (moved
- forward in execution sequence), it must not need or set any resources
- that were set by later insns and must not set any resources that
- are needed for those insns.
-
- The delay slot insn itself sets resources unless it is a call
- (in which case the called routine, not the insn itself, is doing
- the setting). */
-
- if (slots_filled < slots_to_fill)
- {
- CLEAR_RESOURCE (&needed);
- CLEAR_RESOURCE (&set);
- mark_set_resources (insn, &set, 0, 0);
- mark_referenced_resources (insn, &needed, 0);
-
- for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1);
- trial = next_trial)
- {
- next_trial = prev_nonnote_insn (trial);
-
- /* This must be an INSN or CALL_INSN. */
- pat = PATTERN (trial);
-
- /* USE and CLOBBER at this level was just for flow; ignore it. */
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
-
- /* Check for resource conflict first, to avoid unnecessary
- splitting. */
- if (! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
-#ifdef HAVE_cc0
- /* Can't separate set of cc0 from its use. */
- && ! (reg_mentioned_p (cc0_rtx, pat)
- && ! sets_cc0_p (pat))
-#endif
- )
- {
- trial = try_split (pat, trial, 1);
- next_trial = prev_nonnote_insn (trial);
- if (eligible_for_delay (insn, slots_filled, trial, flags))
- {
- /* In this case, we are searching backward, so if we
- find insns to put on the delay list, we want
- to put them at the head, rather than the
- tail, of the list. */
-
- update_reg_dead_notes (trial, insn);
- delay_list = gen_rtx_INSN_LIST (VOIDmode,
- trial, delay_list);
- update_block (trial, trial);
- delete_insn (trial);
- if (slots_to_fill == ++slots_filled)
- break;
- continue;
- }
- }
-
- mark_set_resources (trial, &set, 0, 1);
- mark_referenced_resources (trial, &needed, 1);
- }
- }
-
- /* If all needed slots haven't been filled, we come here. */
-
- /* Try to optimize case of jumping around a single insn. */
-#if defined(ANNUL_IFFALSE_SLOTS) || defined(ANNUL_IFTRUE_SLOTS)
- if (slots_filled != slots_to_fill
- && delay_list == 0
- && GET_CODE (insn) == JUMP_INSN
- && (condjump_p (insn) || condjump_in_parallel_p (insn)))
- {
- delay_list = optimize_skip (insn);
- if (delay_list)
- slots_filled += 1;
- }
-#endif
-
- /* Try to get insns from beyond the insn needing the delay slot.
- These insns can neither set or reference resources set in insns being
- skipped, cannot set resources in the insn being skipped, and, if this
- is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the
- call might not return).
-
- There used to be code which continued past the target label if
- we saw all uses of the target label. This code did not work,
- because it failed to account for some instructions which were
- both annulled and marked as from the target. This can happen as a
- result of optimize_skip. Since this code was redundant with
- fill_eager_delay_slots anyways, it was just deleted. */
-
- if (slots_filled != slots_to_fill
- && (GET_CODE (insn) != JUMP_INSN
- || ((condjump_p (insn) || condjump_in_parallel_p (insn))
- && ! simplejump_p (insn)
- && JUMP_LABEL (insn) != 0)))
- {
- rtx target = 0;
- int maybe_never = 0;
- struct resources needed_at_jump;
-
- CLEAR_RESOURCE (&needed);
- CLEAR_RESOURCE (&set);
-
- if (GET_CODE (insn) == CALL_INSN)
- {
- mark_set_resources (insn, &set, 0, 1);
- mark_referenced_resources (insn, &needed, 1);
- maybe_never = 1;
- }
- else
- {
- mark_set_resources (insn, &set, 0, 1);
- mark_referenced_resources (insn, &needed, 1);
- if (GET_CODE (insn) == JUMP_INSN)
- target = JUMP_LABEL (insn);
- }
-
- for (trial = next_nonnote_insn (insn); trial; trial = next_trial)
- {
- rtx pat, trial_delay;
-
- next_trial = next_nonnote_insn (trial);
-
- if (GET_CODE (trial) == CODE_LABEL
- || GET_CODE (trial) == BARRIER)
- break;
-
- /* We must have an INSN, JUMP_INSN, or CALL_INSN. */
- pat = PATTERN (trial);
-
- /* Stand-alone USE and CLOBBER are just for flow. */
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
-
- /* If this already has filled delay slots, get the insn needing
- the delay slots. */
- if (GET_CODE (pat) == SEQUENCE)
- trial_delay = XVECEXP (pat, 0, 0);
- else
- trial_delay = trial;
-
- /* If this is a jump insn to our target, indicate that we have
- seen another jump to it. If we aren't handling a conditional
- jump, stop our search. Otherwise, compute the needs at its
- target and add them to NEEDED. */
- if (GET_CODE (trial_delay) == JUMP_INSN)
- {
- if (target == 0)
- break;
- else if (JUMP_LABEL (trial_delay) != target)
- {
- rtx ninsn =
- next_active_insn (JUMP_LABEL (trial_delay));
-
- mark_target_live_regs (get_insns (), ninsn,
- &needed_at_jump);
- needed.memory |= needed_at_jump.memory;
- needed.unch_memory |= needed_at_jump.unch_memory;
- IOR_HARD_REG_SET (needed.regs, needed_at_jump.regs);
- }
- }
-
- /* See if we have a resource problem before we try to
- split. */
- if (target == 0
- && GET_CODE (pat) != SEQUENCE
- && ! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
-#ifdef HAVE_cc0
- && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))
-#endif
- && ! (maybe_never && may_trap_p (pat))
- && (trial = try_split (pat, trial, 0))
- && eligible_for_delay (insn, slots_filled, trial, flags))
- {
- next_trial = next_nonnote_insn (trial);
- delay_list = add_to_delay_list (trial, delay_list);
-
-#ifdef HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, pat))
- link_cc0_insns (trial);
-#endif
-
- delete_insn (trial);
- if (slots_to_fill == ++slots_filled)
- break;
- continue;
- }
-
- mark_set_resources (trial, &set, 0, 1);
- mark_referenced_resources (trial, &needed, 1);
-
- /* Ensure we don't put insns between the setting of cc and the
- comparison by moving a setting of cc into an earlier delay
- slot since these insns could clobber the condition code. */
- set.cc = 1;
-
- /* If this is a call or jump, we might not get here. */
- if (GET_CODE (trial_delay) == CALL_INSN
- || GET_CODE (trial_delay) == JUMP_INSN)
- maybe_never = 1;
- }
-
- /* If there are slots left to fill and our search was stopped by an
- unconditional branch, try the insn at the branch target. We can
- redirect the branch if it works.
-
- Don't do this if the insn at the branch target is a branch. */
- if (slots_to_fill != slots_filled
- && trial
- && GET_CODE (trial) == JUMP_INSN
- && simplejump_p (trial)
- && (target == 0 || JUMP_LABEL (trial) == target)
- && (next_trial = next_active_insn (JUMP_LABEL (trial))) != 0
- && ! (GET_CODE (next_trial) == INSN
- && GET_CODE (PATTERN (next_trial)) == SEQUENCE)
- && GET_CODE (next_trial) != JUMP_INSN
- && ! insn_references_resource_p (next_trial, &set, 1)
- && ! insn_sets_resource_p (next_trial, &set, 1)
- && ! insn_sets_resource_p (next_trial, &needed, 1)
-#ifdef HAVE_cc0
- && ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))
-#endif
- && ! (maybe_never && may_trap_p (PATTERN (next_trial)))
- && (next_trial = try_split (PATTERN (next_trial), next_trial, 0))
- && eligible_for_delay (insn, slots_filled, next_trial, flags))
- {
- rtx new_label = next_active_insn (next_trial);
-
- if (new_label != 0)
- new_label = get_label_before (new_label);
- else
- new_label = find_end_label ();
-
- delay_list
- = add_to_delay_list (copy_rtx (next_trial), delay_list);
- slots_filled++;
- reorg_redirect_jump (trial, new_label);
-
- /* If we merged because we both jumped to the same place,
- redirect the original insn also. */
- if (target)
- reorg_redirect_jump (insn, new_label);
- }
- }
-
- /* If this is an unconditional jump, then try to get insns from the
- target of the jump. */
- if (GET_CODE (insn) == JUMP_INSN
- && simplejump_p (insn)
- && slots_filled != slots_to_fill)
- delay_list
- = fill_slots_from_thread (insn, const_true_rtx,
- next_active_insn (JUMP_LABEL (insn)),
- NULL, 1, 1,
- own_thread_p (JUMP_LABEL (insn),
- JUMP_LABEL (insn), 0),
- slots_to_fill, &slots_filled,
- delay_list);
-
- if (delay_list)
- unfilled_slots_base[i]
- = emit_delay_sequence (insn, delay_list, slots_filled);
-
- if (slots_to_fill == slots_filled)
- unfilled_slots_base[i] = 0;
-
- note_delay_statistics (slots_filled, 0);
- }
-
-#ifdef DELAY_SLOTS_FOR_EPILOGUE
- /* See if the epilogue needs any delay slots. Try to fill them if so.
- The only thing we can do is scan backwards from the end of the
- function. If we did this in a previous pass, it is incorrect to do it
- again. */
- if (current_function_epilogue_delay_list)
- return;
-
- slots_to_fill = DELAY_SLOTS_FOR_EPILOGUE;
- if (slots_to_fill == 0)
- return;
-
- slots_filled = 0;
- CLEAR_RESOURCE (&set);
-
- /* The frame pointer and stack pointer are needed at the beginning of
- the epilogue, so instructions setting them can not be put in the
- epilogue delay slot. However, everything else needed at function
- end is safe, so we don't want to use end_of_function_needs here. */
- CLEAR_RESOURCE (&needed);
- if (frame_pointer_needed)
- {
- SET_HARD_REG_BIT (needed.regs, FRAME_POINTER_REGNUM);
-#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
- SET_HARD_REG_BIT (needed.regs, HARD_FRAME_POINTER_REGNUM);
-#endif
-#ifdef EXIT_IGNORE_STACK
- if (! EXIT_IGNORE_STACK
- || current_function_sp_is_unchanging)
-#endif
- SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
- }
- else
- SET_HARD_REG_BIT (needed.regs, STACK_POINTER_REGNUM);
-
-#ifdef EPILOGUE_USES
- for (i = 0; i <FIRST_PSEUDO_REGISTER; i++)
- {
- if (EPILOGUE_USES (i))
- SET_HARD_REG_BIT (needed.regs, i);
- }
-#endif
-
- for (trial = get_last_insn (); ! stop_search_p (trial, 1);
- trial = PREV_INSN (trial))
- {
- if (GET_CODE (trial) == NOTE)
- continue;
- pat = PATTERN (trial);
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
-
- if (! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
-#ifdef HAVE_cc0
- /* Don't want to mess with cc0 here. */
- && ! reg_mentioned_p (cc0_rtx, pat)
-#endif
- )
- {
- trial = try_split (pat, trial, 1);
- if (ELIGIBLE_FOR_EPILOGUE_DELAY (trial, slots_filled))
- {
- /* Here as well we are searching backward, so put the
- insns we find on the head of the list. */
-
- current_function_epilogue_delay_list
- = gen_rtx_INSN_LIST (VOIDmode, trial,
- current_function_epilogue_delay_list);
- mark_end_of_function_resources (trial, 1);
- update_block (trial, trial);
- delete_insn (trial);
-
- /* Clear deleted bit so final.c will output the insn. */
- INSN_DELETED_P (trial) = 0;
-
- if (slots_to_fill == ++slots_filled)
- break;
- continue;
- }
- }
-
- mark_set_resources (trial, &set, 0, 1);
- mark_referenced_resources (trial, &needed, 1);
- }
-
- note_delay_statistics (slots_filled, 0);
-#endif
-}
-
-/* Try to find insns to place in delay slots.
-
- INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION
- or is an unconditional branch if CONDITION is const_true_rtx.
- *PSLOTS_FILLED is updated with the number of slots that we have filled.
-
- THREAD is a flow-of-control, either the insns to be executed if the
- branch is true or if the branch is false, THREAD_IF_TRUE says which.
-
- OPPOSITE_THREAD is the thread in the opposite direction. It is used
- to see if any potential delay slot insns set things needed there.
-
- LIKELY is non-zero if it is extremely likely that the branch will be
- taken and THREAD_IF_TRUE is set. This is used for the branch at the
- end of a loop back up to the top.
-
- OWN_THREAD and OWN_OPPOSITE_THREAD are true if we are the only user of the
- thread. I.e., it is the fallthrough code of our jump or the target of the
- jump when we are the only jump going there.
-
- If OWN_THREAD is false, it must be the "true" thread of a jump. In that
- case, we can only take insns from the head of the thread for our delay
- slot. We then adjust the jump to point after the insns we have taken. */
-
-static rtx
-fill_slots_from_thread (insn, condition, thread, opposite_thread, likely,
- thread_if_true, own_thread,
- slots_to_fill, pslots_filled, delay_list)
- rtx insn;
- rtx condition;
- rtx thread, opposite_thread;
- int likely;
- int thread_if_true;
- int own_thread;
- int slots_to_fill, *pslots_filled;
- rtx delay_list;
-{
- rtx new_thread;
- struct resources opposite_needed, set, needed;
- rtx trial;
- int lose = 0;
- int must_annul = 0;
- int flags;
-
- /* Validate our arguments. */
- if ((condition == const_true_rtx && ! thread_if_true)
- || (! own_thread && ! thread_if_true))
- abort ();
-
- flags = get_jump_flags (insn, JUMP_LABEL (insn));
-
- /* If our thread is the end of subroutine, we can't get any delay
- insns from that. */
- if (thread == 0)
- return delay_list;
-
- /* If this is an unconditional branch, nothing is needed at the
- opposite thread. Otherwise, compute what is needed there. */
- if (condition == const_true_rtx)
- CLEAR_RESOURCE (&opposite_needed);
- else
- mark_target_live_regs (get_insns (), opposite_thread, &opposite_needed);
-
- /* If the insn at THREAD can be split, do it here to avoid having to
- update THREAD and NEW_THREAD if it is done in the loop below. Also
- initialize NEW_THREAD. */
-
- new_thread = thread = try_split (PATTERN (thread), thread, 0);
-
- /* Scan insns at THREAD. We are looking for an insn that can be removed
- from THREAD (it neither sets nor references resources that were set
- ahead of it and it doesn't set anything needs by the insns ahead of
- it) and that either can be placed in an annulling insn or aren't
- needed at OPPOSITE_THREAD. */
-
- CLEAR_RESOURCE (&needed);
- CLEAR_RESOURCE (&set);
-
- /* If we do not own this thread, we must stop as soon as we find
- something that we can't put in a delay slot, since all we can do
- is branch into THREAD at a later point. Therefore, labels stop
- the search if this is not the `true' thread. */
-
- for (trial = thread;
- ! stop_search_p (trial, ! thread_if_true) && (! lose || own_thread);
- trial = next_nonnote_insn (trial))
- {
- rtx pat, old_trial;
-
- /* If we have passed a label, we no longer own this thread. */
- if (GET_CODE (trial) == CODE_LABEL)
- {
- own_thread = 0;
- continue;
- }
-
- pat = PATTERN (trial);
- if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER)
- continue;
-
- /* If TRIAL conflicts with the insns ahead of it, we lose. Also,
- don't separate or copy insns that set and use CC0. */
- if (! insn_references_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &set, 1)
- && ! insn_sets_resource_p (trial, &needed, 1)
-#ifdef HAVE_cc0
- && ! (reg_mentioned_p (cc0_rtx, pat)
- && (! own_thread || ! sets_cc0_p (pat)))
-#endif
- )
- {
- rtx prior_insn;
-
- /* If TRIAL is redundant with some insn before INSN, we don't
- actually need to add it to the delay list; we can merely pretend
- we did. */
- if ((prior_insn = redundant_insn (trial, insn, delay_list)))
- {
- fix_reg_dead_note (prior_insn, insn);
- if (own_thread)
- {
- update_block (trial, thread);
- if (trial == thread)
- {
- thread = next_active_insn (thread);
- if (new_thread == trial)
- new_thread = thread;
- }
-
- delete_insn (trial);
- }
- else
- {
- update_reg_unused_notes (prior_insn, trial);
- new_thread = next_active_insn (trial);
- }
-
- continue;
- }
-
- /* There are two ways we can win: If TRIAL doesn't set anything
- needed at the opposite thread and can't trap, or if it can
- go into an annulled delay slot. */
- if (!must_annul
- && (condition == const_true_rtx
- || (! insn_sets_resource_p (trial, &opposite_needed, 1)
- && ! may_trap_p (pat))))
- {
- old_trial = trial;
- trial = try_split (pat, trial, 0);
- if (new_thread == old_trial)
- new_thread = trial;
- if (thread == old_trial)
- thread = trial;
- pat = PATTERN (trial);
- if (eligible_for_delay (insn, *pslots_filled, trial, flags))
- goto winner;
- }
- else if (0
-#ifdef ANNUL_IFTRUE_SLOTS
- || ! thread_if_true
-#endif
-#ifdef ANNUL_IFFALSE_SLOTS
- || thread_if_true
-#endif
- )
- {
- old_trial = trial;
- trial = try_split (pat, trial, 0);
- if (new_thread == old_trial)
- new_thread = trial;
- if (thread == old_trial)
- thread = trial;
- pat = PATTERN (trial);
- if ((must_annul || delay_list == NULL) && (thread_if_true
- ? check_annul_list_true_false (0, delay_list)
- && eligible_for_annul_false (insn, *pslots_filled, trial, flags)
- : check_annul_list_true_false (1, delay_list)
- && eligible_for_annul_true (insn, *pslots_filled, trial, flags)))
- {
- rtx temp;
-
- must_annul = 1;
- winner:
-
-#ifdef HAVE_cc0
- if (reg_mentioned_p (cc0_rtx, pat))
- link_cc0_insns (trial);
-#endif
-
- /* If we own this thread, delete the insn. If this is the
- destination of a branch, show that a basic block status
- may have been updated. In any case, mark the new
- starting point of this thread. */
- if (own_thread)
- {
- update_block (trial, thread);
- if (trial == thread)
- {
- thread = next_active_insn (thread);
- if (new_thread == trial)
- new_thread = thread;
- }
- delete_insn (trial);
- }
- else
- new_thread = next_active_insn (trial);
-
- temp = own_thread ? trial : copy_rtx (trial);
- if (thread_if_true)
- INSN_FROM_TARGET_P (temp) = 1;
-
- delay_list = add_to_delay_list (temp, delay_list);
-
- if (slots_to_fill == ++(*pslots_filled))
- {
- /* Even though we have filled all the slots, we
- may be branching to a location that has a
- redundant insn. Skip any if so. */
- while (new_thread && ! own_thread
- && ! insn_sets_resource_p (new_thread, &set, 1)
- && ! insn_sets_resource_p (new_thread, &needed, 1)
- && ! insn_references_resource_p (new_thread,
- &set, 1)
- && (prior_insn
- = redundant_insn (new_thread, insn,
- delay_list)))
- {
- /* We know we do not own the thread, so no need
- to call update_block and delete_insn. */
- fix_reg_dead_note (prior_insn, insn);
- update_reg_unused_notes (prior_insn, new_thread);
- new_thread = next_active_insn (new_thread);
- }
- break;
- }
-
- continue;
- }
- }
- }
-
- /* This insn can't go into a delay slot. */
- lose = 1;
- mark_set_resources (trial, &set, 0, 1);
- mark_referenced_resources (trial, &needed, 1);
-
- /* Ensure we don't put insns between the setting of cc and the comparison
- by moving a setting of cc into an earlier delay slot since these insns
- could clobber the condition code. */
- set.cc = 1;
-
- /* If this insn is a register-register copy and the next insn has
- a use of our destination, change it to use our source. That way,
- it will become a candidate for our delay slot the next time
- through this loop. This case occurs commonly in loops that
- scan a list.
-
- We could check for more complex cases than those tested below,
- but it doesn't seem worth it. It might also be a good idea to try
- to swap the two insns. That might do better.
-
- We can't do this if the next insn modifies our destination, because
- that would make the replacement into the insn invalid. We also can't
- do this if it modifies our source, because it might be an earlyclobber
- operand. This latter test also prevents updating the contents of
- a PRE_INC. */
-
- if (GET_CODE (trial) == INSN && GET_CODE (pat) == SET
- && GET_CODE (SET_SRC (pat)) == REG
- && GET_CODE (SET_DEST (pat)) == REG)
- {
- rtx next = next_nonnote_insn (trial);
-
- if (next && GET_CODE (next) == INSN
- && GET_CODE (PATTERN (next)) != USE
- && ! reg_set_p (SET_DEST (pat), next)
- && ! reg_set_p (SET_SRC (pat), next)
- && reg_referenced_p (SET_DEST (pat), PATTERN (next)))
- validate_replace_rtx (SET_DEST (pat), SET_SRC (pat), next);
- }
- }
-
- /* If we stopped on a branch insn that has delay slots, see if we can
- steal some of the insns in those slots. */
- if (trial && GET_CODE (trial) == INSN
- && GET_CODE (PATTERN (trial)) == SEQUENCE
- && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN)
- {
- /* If this is the `true' thread, we will want to follow the jump,
- so we can only do this if we have taken everything up to here. */
- if (thread_if_true && trial == new_thread)
- delay_list
- = steal_delay_list_from_target (insn, condition, PATTERN (trial),
- delay_list, &set, &needed,
- &opposite_needed, slots_to_fill,
- pslots_filled, &must_annul,
- &new_thread);
- else if (! thread_if_true)
- delay_list
- = steal_delay_list_from_fallthrough (insn, condition,
- PATTERN (trial),
- delay_list, &set, &needed,
- &opposite_needed, slots_to_fill,
- pslots_filled, &must_annul);
- }
-
- /* If we haven't found anything for this delay slot and it is very
- likely that the branch will be taken, see if the insn at our target
- increments or decrements a register with an increment that does not
- depend on the destination register. If so, try to place the opposite
- arithmetic insn after the jump insn and put the arithmetic insn in the
- delay slot. If we can't do this, return. */
- if (delay_list == 0 && likely && new_thread
- && GET_CODE (new_thread) == INSN
- && GET_CODE (PATTERN (new_thread)) != ASM_INPUT
- && asm_noperands (PATTERN (new_thread)) < 0)
- {
- rtx pat = PATTERN (new_thread);
- rtx dest;
- rtx src;
-
- trial = new_thread;
- pat = PATTERN (trial);
-
- if (GET_CODE (trial) != INSN || GET_CODE (pat) != SET
- || ! eligible_for_delay (insn, 0, trial, flags))
- return 0;
-
- dest = SET_DEST (pat), src = SET_SRC (pat);
- if ((GET_CODE (src) == PLUS || GET_CODE (src) == MINUS)
- && rtx_equal_p (XEXP (src, 0), dest)
- && ! reg_overlap_mentioned_p (dest, XEXP (src, 1)))
- {
- rtx other = XEXP (src, 1);
- rtx new_arith;
- rtx ninsn;
-
- /* If this is a constant adjustment, use the same code with
- the negated constant. Otherwise, reverse the sense of the
- arithmetic. */
- if (GET_CODE (other) == CONST_INT)
- new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest,
- negate_rtx (GET_MODE (src), other));
- else
- new_arith = gen_rtx_fmt_ee (GET_CODE (src) == PLUS ? MINUS : PLUS,
- GET_MODE (src), dest, other);
-
- ninsn = emit_insn_after (gen_rtx_SET (VOIDmode, dest, new_arith),
- insn);
-
- if (recog_memoized (ninsn) < 0
- || (extract_insn (ninsn), ! constrain_operands (1)))
- {
- delete_insn (ninsn);
- return 0;
- }
-
- if (own_thread)
- {
- update_block (trial, thread);
- if (trial == thread)
- {
- thread = next_active_insn (thread);
- if (new_thread == trial)
- new_thread = thread;
- }
- delete_insn (trial);
- }
- else
- new_thread = next_active_insn (trial);
-
- ninsn = own_thread ? trial : copy_rtx (trial);
- if (thread_if_true)
- INSN_FROM_TARGET_P (ninsn) = 1;
-
- delay_list = add_to_delay_list (ninsn, NULL_RTX);
- (*pslots_filled)++;
- }
- }
-
- if (delay_list && must_annul)
- INSN_ANNULLED_BRANCH_P (insn) = 1;
-
- /* If we are to branch into the middle of this thread, find an appropriate
- label or make a new one if none, and redirect INSN to it. If we hit the
- end of the function, use the end-of-function label. */
- if (new_thread != thread)
- {
- rtx label;
-
- if (! thread_if_true)
- abort ();
-
- if (new_thread && GET_CODE (new_thread) == JUMP_INSN
- && (simplejump_p (new_thread)
- || GET_CODE (PATTERN (new_thread)) == RETURN)
- && redirect_with_delay_list_safe_p (insn,
- JUMP_LABEL (new_thread),
- delay_list))
- new_thread = follow_jumps (JUMP_LABEL (new_thread));
-
- if (new_thread == 0)
- label = find_end_label ();
- else if (GET_CODE (new_thread) == CODE_LABEL)
- label = new_thread;
- else
- label = get_label_before (new_thread);
-
- reorg_redirect_jump (insn, label);
- }
-
- return delay_list;
-}
-
-/* Make another attempt to find insns to place in delay slots.
-
- We previously looked for insns located in front of the delay insn
- and, for non-jump delay insns, located behind the delay insn.
-
- Here only try to schedule jump insns and try to move insns from either
- the target or the following insns into the delay slot. If annulling is
- supported, we will be likely to do this. Otherwise, we can do this only
- if safe. */
-
-static void
-fill_eager_delay_slots ()
-{
- register rtx insn;
- register int i;
- int num_unfilled_slots = unfilled_slots_next - unfilled_slots_base;
-
- for (i = 0; i < num_unfilled_slots; i++)
- {
- rtx condition;
- rtx target_label, insn_at_target, fallthrough_insn;
- rtx delay_list = 0;
- int own_target;
- int own_fallthrough;
- int prediction, slots_to_fill, slots_filled;
-
- insn = unfilled_slots_base[i];
- if (insn == 0
- || INSN_DELETED_P (insn)
- || GET_CODE (insn) != JUMP_INSN
- || ! (condjump_p (insn) || condjump_in_parallel_p (insn)))
- continue;
-
- slots_to_fill = num_delay_slots (insn);
- /* Some machine description have defined instructions to have
- delay slots only in certain circumstances which may depend on
- nearby insns (which change due to reorg's actions).
-
- For example, the PA port normally has delay slots for unconditional
- jumps.
-
- However, the PA port claims such jumps do not have a delay slot
- if they are immediate successors of certain CALL_INSNs. This
- allows the port to favor filling the delay slot of the call with
- the unconditional jump. */
- if (slots_to_fill == 0)
- continue;
-
- slots_filled = 0;
- target_label = JUMP_LABEL (insn);
- condition = get_branch_condition (insn, target_label);
-
- if (condition == 0)
- continue;
-
- /* Get the next active fallthrough and target insns and see if we own
- them. Then see whether the branch is likely true. We don't need
- to do a lot of this for unconditional branches. */
-
- insn_at_target = next_active_insn (target_label);
- own_target = own_thread_p (target_label, target_label, 0);
-
- if (condition == const_true_rtx)
- {
- own_fallthrough = 0;
- fallthrough_insn = 0;
- prediction = 2;
- }
- else
- {
- fallthrough_insn = next_active_insn (insn);
- own_fallthrough = own_thread_p (NEXT_INSN (insn), NULL_RTX, 1);
- prediction = mostly_true_jump (insn, condition);
- }
-
- /* If this insn is expected to branch, first try to get insns from our
- target, then our fallthrough insns. If it is not, expected to branch,
- try the other order. */
-
- if (prediction > 0)
- {
- delay_list
- = fill_slots_from_thread (insn, condition, insn_at_target,
- fallthrough_insn, prediction == 2, 1,
- own_target,
- slots_to_fill, &slots_filled, delay_list);
-
- if (delay_list == 0 && own_fallthrough)
- {
- /* Even though we didn't find anything for delay slots,
- we might have found a redundant insn which we deleted
- from the thread that was filled. So we have to recompute
- the next insn at the target. */
- target_label = JUMP_LABEL (insn);
- insn_at_target = next_active_insn (target_label);
-
- delay_list
- = fill_slots_from_thread (insn, condition, fallthrough_insn,
- insn_at_target, 0, 0,
- own_fallthrough,
- slots_to_fill, &slots_filled,
- delay_list);
- }
- }
- else
- {
- if (own_fallthrough)
- delay_list
- = fill_slots_from_thread (insn, condition, fallthrough_insn,
- insn_at_target, 0, 0,
- own_fallthrough,
- slots_to_fill, &slots_filled,
- delay_list);
-
- if (delay_list == 0)
- delay_list
- = fill_slots_from_thread (insn, condition, insn_at_target,
- next_active_insn (insn), 0, 1,
- own_target,
- slots_to_fill, &slots_filled,
- delay_list);
- }
-
- if (delay_list)
- unfilled_slots_base[i]
- = emit_delay_sequence (insn, delay_list, slots_filled);
-
- if (slots_to_fill == slots_filled)
- unfilled_slots_base[i] = 0;
-
- note_delay_statistics (slots_filled, 1);
- }
-}
-
-/* Once we have tried two ways to fill a delay slot, make a pass over the
- code to try to improve the results and to do such things as more jump
- threading. */
-
-static void
-relax_delay_slots (first)
- rtx first;
-{
- register rtx insn, next, pat;
- register rtx trial, delay_insn, target_label;
-
- /* Look at every JUMP_INSN and see if we can improve it. */
- for (insn = first; insn; insn = next)
- {
- rtx other;
-
- next = next_active_insn (insn);
-
- /* If this is a jump insn, see if it now jumps to a jump, jumps to
- the next insn, or jumps to a label that is not the last of a
- group of consecutive labels. */
- if (GET_CODE (insn) == JUMP_INSN
- && (condjump_p (insn) || condjump_in_parallel_p (insn))
- && (target_label = JUMP_LABEL (insn)) != 0)
- {
- target_label = follow_jumps (target_label);
- target_label = prev_label (next_active_insn (target_label));
-
- if (target_label == 0)
- target_label = find_end_label ();
-
- if (next_active_insn (target_label) == next
- && ! condjump_in_parallel_p (insn))
- {
- delete_jump (insn);
- continue;
- }
-
- if (target_label != JUMP_LABEL (insn))
- reorg_redirect_jump (insn, target_label);
-
- /* See if this jump branches around a unconditional jump.
- If so, invert this jump and point it to the target of the
- second jump. */
- if (next && GET_CODE (next) == JUMP_INSN
- && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
- && next_active_insn (target_label) == next_active_insn (next)
- && no_labels_between_p (insn, next))
- {
- rtx label = JUMP_LABEL (next);
-
- /* Be careful how we do this to avoid deleting code or
- labels that are momentarily dead. See similar optimization
- in jump.c.
-
- We also need to ensure we properly handle the case when
- invert_jump fails. */
-
- ++LABEL_NUSES (target_label);
- if (label)
- ++LABEL_NUSES (label);
-
- if (invert_jump (insn, label))
- {
- delete_insn (next);
- next = insn;
- }
-
- if (label)
- --LABEL_NUSES (label);
-
- if (--LABEL_NUSES (target_label) == 0)
- delete_insn (target_label);
-
- continue;
- }
- }
-
- /* If this is an unconditional jump and the previous insn is a
- conditional jump, try reversing the condition of the previous
- insn and swapping our targets. The next pass might be able to
- fill the slots.
-
- Don't do this if we expect the conditional branch to be true, because
- we would then be making the more common case longer. */
-
- if (GET_CODE (insn) == JUMP_INSN
- && (simplejump_p (insn) || GET_CODE (PATTERN (insn)) == RETURN)
- && (other = prev_active_insn (insn)) != 0
- && (condjump_p (other) || condjump_in_parallel_p (other))
- && no_labels_between_p (other, insn)
- && 0 > mostly_true_jump (other,
- get_branch_condition (other,
- JUMP_LABEL (other))))
- {
- rtx other_target = JUMP_LABEL (other);
- target_label = JUMP_LABEL (insn);
-
- /* Increment the count of OTHER_TARGET, so it doesn't get deleted
- as we move the label. */
- if (other_target)
- ++LABEL_NUSES (other_target);
-
- if (invert_jump (other, target_label))
- reorg_redirect_jump (insn, other_target);
-
- if (other_target)
- --LABEL_NUSES (other_target);
- }
-
- /* Now look only at cases where we have filled a delay slot. */
- if (GET_CODE (insn) != INSN
- || GET_CODE (PATTERN (insn)) != SEQUENCE)
- continue;
-
- pat = PATTERN (insn);
- delay_insn = XVECEXP (pat, 0, 0);
-
- /* See if the first insn in the delay slot is redundant with some
- previous insn. Remove it from the delay slot if so; then set up
- to reprocess this insn. */
- if (redundant_insn (XVECEXP (pat, 0, 1), delay_insn, 0))
- {
- delete_from_delay_slot (XVECEXP (pat, 0, 1));
- next = prev_active_insn (next);
- continue;
- }
-
- /* See if we have a RETURN insn with a filled delay slot followed
- by a RETURN insn with an unfilled a delay slot. If so, we can delete
- the first RETURN (but not it's delay insn). This gives the same
- effect in fewer instructions.
-
- Only do so if optimizing for size since this results in slower, but
- smaller code. */
- if (optimize_size
- && GET_CODE (PATTERN (delay_insn)) == RETURN
- && next
- && GET_CODE (next) == JUMP_INSN
- && GET_CODE (PATTERN (next)) == RETURN)
- {
- int i;
-
- /* Delete the RETURN and just execute the delay list insns.
-
- We do this by deleting the INSN containing the SEQUENCE, then
- re-emitting the insns separately, and then deleting the RETURN.
- This allows the count of the jump target to be properly
- decremented. */
-
- /* Clear the from target bit, since these insns are no longer
- in delay slots. */
- for (i = 0; i < XVECLEN (pat, 0); i++)
- INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
-
- trial = PREV_INSN (insn);
- delete_insn (insn);
- emit_insn_after (pat, trial);
- delete_scheduled_jump (delay_insn);
- continue;
- }
-
- /* Now look only at the cases where we have a filled JUMP_INSN. */
- if (GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) != JUMP_INSN
- || ! (condjump_p (XVECEXP (PATTERN (insn), 0, 0))
- || condjump_in_parallel_p (XVECEXP (PATTERN (insn), 0, 0))))
- continue;
-
- target_label = JUMP_LABEL (delay_insn);
-
- if (target_label)
- {
- /* If this jump goes to another unconditional jump, thread it, but
- don't convert a jump into a RETURN here. */
- trial = follow_jumps (target_label);
- /* We use next_real_insn instead of next_active_insn, so that
- the special USE insns emitted by reorg won't be ignored.
- If they are ignored, then they will get deleted if target_label
- is now unreachable, and that would cause mark_target_live_regs
- to fail. */
- trial = prev_label (next_real_insn (trial));
- if (trial == 0 && target_label != 0)
- trial = find_end_label ();
-
- if (trial != target_label
- && redirect_with_delay_slots_safe_p (delay_insn, trial, insn))
- {
- reorg_redirect_jump (delay_insn, trial);
- target_label = trial;
- }
-
- /* If the first insn at TARGET_LABEL is redundant with a previous
- insn, redirect the jump to the following insn process again. */
- trial = next_active_insn (target_label);
- if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE
- && redundant_insn (trial, insn, 0))
- {
- rtx tmp;
-
- /* Figure out where to emit the special USE insn so we don't
- later incorrectly compute register live/death info. */
- tmp = next_active_insn (trial);
- if (tmp == 0)
- tmp = find_end_label ();
-
- /* Insert the special USE insn and update dataflow info. */
- update_block (trial, tmp);
-
- /* Now emit a label before the special USE insn, and
- redirect our jump to the new label. */
- target_label = get_label_before (PREV_INSN (tmp));
- reorg_redirect_jump (delay_insn, target_label);
- next = insn;
- continue;
- }
-
- /* Similarly, if it is an unconditional jump with one insn in its
- delay list and that insn is redundant, thread the jump. */
- if (trial && GET_CODE (PATTERN (trial)) == SEQUENCE
- && XVECLEN (PATTERN (trial), 0) == 2
- && GET_CODE (XVECEXP (PATTERN (trial), 0, 0)) == JUMP_INSN
- && (simplejump_p (XVECEXP (PATTERN (trial), 0, 0))
- || GET_CODE (PATTERN (XVECEXP (PATTERN (trial), 0, 0))) == RETURN)
- && redundant_insn (XVECEXP (PATTERN (trial), 0, 1), insn, 0))
- {
- target_label = JUMP_LABEL (XVECEXP (PATTERN (trial), 0, 0));
- if (target_label == 0)
- target_label = find_end_label ();
-
- if (redirect_with_delay_slots_safe_p (delay_insn, target_label,
- insn))
- {
- reorg_redirect_jump (delay_insn, target_label);
- next = insn;
- continue;
- }
- }
- }
-
- if (! INSN_ANNULLED_BRANCH_P (delay_insn)
- && prev_active_insn (target_label) == insn
- && ! condjump_in_parallel_p (delay_insn)
-#ifdef HAVE_cc0
- /* If the last insn in the delay slot sets CC0 for some insn,
- various code assumes that it is in a delay slot. We could
- put it back where it belonged and delete the register notes,
- but it doesn't seem worthwhile in this uncommon case. */
- && ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1),
- REG_CC_USER, NULL_RTX)
-#endif
- )
- {
- int i;
-
- /* All this insn does is execute its delay list and jump to the
- following insn. So delete the jump and just execute the delay
- list insns.
-
- We do this by deleting the INSN containing the SEQUENCE, then
- re-emitting the insns separately, and then deleting the jump.
- This allows the count of the jump target to be properly
- decremented. */
-
- /* Clear the from target bit, since these insns are no longer
- in delay slots. */
- for (i = 0; i < XVECLEN (pat, 0); i++)
- INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0;
-
- trial = PREV_INSN (insn);
- delete_insn (insn);
- emit_insn_after (pat, trial);
- delete_scheduled_jump (delay_insn);
- continue;
- }
-
- /* See if this is an unconditional jump around a single insn which is
- identical to the one in its delay slot. In this case, we can just
- delete the branch and the insn in its delay slot. */
- if (next && GET_CODE (next) == INSN
- && prev_label (next_active_insn (next)) == target_label
- && simplejump_p (insn)
- && XVECLEN (pat, 0) == 2
- && rtx_equal_p (PATTERN (next), PATTERN (XVECEXP (pat, 0, 1))))
- {
- delete_insn (insn);
- continue;
- }
-
- /* See if this jump (with its delay slots) branches around another
- jump (without delay slots). If so, invert this jump and point
- it to the target of the second jump. We cannot do this for
- annulled jumps, though. Again, don't convert a jump to a RETURN
- here. */
- if (! INSN_ANNULLED_BRANCH_P (delay_insn)
- && next && GET_CODE (next) == JUMP_INSN
- && (simplejump_p (next) || GET_CODE (PATTERN (next)) == RETURN)
- && next_active_insn (target_label) == next_active_insn (next)
- && no_labels_between_p (insn, next))
- {
- rtx label = JUMP_LABEL (next);
- rtx old_label = JUMP_LABEL (delay_insn);
-
- if (label == 0)
- label = find_end_label ();
-
- if (redirect_with_delay_slots_safe_p (delay_insn, label, insn))
- {
- /* Be careful how we do this to avoid deleting code or labels
- that are momentarily dead. See similar optimization in
- jump.c */
- if (old_label)
- ++LABEL_NUSES (old_label);
-
- if (invert_jump (delay_insn, label))
- {
- int i;
-
- /* Must update the INSN_FROM_TARGET_P bits now that
- the branch is reversed, so that mark_target_live_regs
- will handle the delay slot insn correctly. */
- for (i = 1; i < XVECLEN (PATTERN (insn), 0); i++)
- {
- rtx slot = XVECEXP (PATTERN (insn), 0, i);
- INSN_FROM_TARGET_P (slot) = ! INSN_FROM_TARGET_P (slot);
- }
-
- delete_insn (next);
- next = insn;
- }
-
- if (old_label && --LABEL_NUSES (old_label) == 0)
- delete_insn (old_label);
- continue;
- }
- }
-
- /* If we own the thread opposite the way this insn branches, see if we
- can merge its delay slots with following insns. */
- if (INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
- && own_thread_p (NEXT_INSN (insn), 0, 1))
- try_merge_delay_insns (insn, next);
- else if (! INSN_FROM_TARGET_P (XVECEXP (pat, 0, 1))
- && own_thread_p (target_label, target_label, 0))
- try_merge_delay_insns (insn, next_active_insn (target_label));
-
- /* If we get here, we haven't deleted INSN. But we may have deleted
- NEXT, so recompute it. */
- next = next_active_insn (insn);
- }
-}
-
-
-/* Try to find insns to place in delay slots. */
-
-void
-dbr_schedule (first, file)
- rtx first;
- FILE *file;
-{
- rtx insn, next, epilogue_insn = 0;
- int i;
-#if 0
- int old_flag_no_peephole = flag_no_peephole;
-
- /* Execute `final' once in prescan mode to delete any insns that won't be
- used. Don't let final try to do any peephole optimization--it will
- ruin dataflow information for this pass. */
-
- flag_no_peephole = 1;
- final (first, 0, NO_DEBUG, 1, 1);
- flag_no_peephole = old_flag_no_peephole;
-#endif
-
- /* If the current function has no insns other than the prologue and
- epilogue, then do not try to fill any delay slots. */
- if (n_basic_blocks == 0)
- return;
-
- /* Find the highest INSN_UID and allocate and initialize our map from
- INSN_UID's to position in code. */
- for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn))
- {
- if (INSN_UID (insn) > max_uid)
- max_uid = INSN_UID (insn);
- if (GET_CODE (insn) == NOTE
- && NOTE_LINE_NUMBER (insn) == NOTE_INSN_EPILOGUE_BEG)
- epilogue_insn = insn;
- }
-
- uid_to_ruid = (int *) alloca ((max_uid + 1) * sizeof (int));
- for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn))
- uid_to_ruid[INSN_UID (insn)] = i;
-
- /* Initialize the list of insns that need filling. */
- if (unfilled_firstobj == 0)
- {
- gcc_obstack_init (&unfilled_slots_obstack);
- unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0);
- }
-
- for (insn = next_active_insn (first); insn; insn = next_active_insn (insn))
- {
- rtx target;
-
- INSN_ANNULLED_BRANCH_P (insn) = 0;
- INSN_FROM_TARGET_P (insn) = 0;
-
- /* Skip vector tables. We can't get attributes for them. */
- if (GET_CODE (insn) == JUMP_INSN
- && (GET_CODE (PATTERN (insn)) == ADDR_VEC
- || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
- continue;
-
- if (num_delay_slots (insn) > 0)
- obstack_ptr_grow (&unfilled_slots_obstack, insn);
-
- /* Ensure all jumps go to the last of a set of consecutive labels. */
- if (GET_CODE (insn) == JUMP_INSN
- && (condjump_p (insn) || condjump_in_parallel_p (insn))
- && JUMP_LABEL (insn) != 0
- && ((target = prev_label (next_active_insn (JUMP_LABEL (insn))))
- != JUMP_LABEL (insn)))
- redirect_jump (insn, target);
- }
-
- init_resource_info (epilogue_insn);
-
- /* Show we haven't computed an end-of-function label yet. */
- end_of_function_label = 0;
-
- /* Initialize the statistics for this function. */
- zero_memory ((char *) num_insns_needing_delays, sizeof num_insns_needing_delays);
- zero_memory ((char *) num_filled_delays, sizeof num_filled_delays);
-
- /* Now do the delay slot filling. Try everything twice in case earlier
- changes make more slots fillable. */
-
- for (reorg_pass_number = 0;
- reorg_pass_number < MAX_REORG_PASSES;
- reorg_pass_number++)
- {
- fill_simple_delay_slots (1);
- fill_simple_delay_slots (0);
- fill_eager_delay_slots ();
- relax_delay_slots (first);
- }
-
- /* Delete any USE insns made by update_block; subsequent passes don't need
- them or know how to deal with them. */
- for (insn = first; insn; insn = next)
- {
- next = NEXT_INSN (insn);
-
- if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == USE
- && GET_RTX_CLASS (GET_CODE (XEXP (PATTERN (insn), 0))) == 'i')
- next = delete_insn (insn);
- }
-
- /* If we made an end of function label, indicate that it is now
- safe to delete it by undoing our prior adjustment to LABEL_NUSES.
- If it is now unused, delete it. */
- if (end_of_function_label && --LABEL_NUSES (end_of_function_label) == 0)
- delete_insn (end_of_function_label);
-
-
- obstack_free (&unfilled_slots_obstack, unfilled_firstobj);
-
- /* It is not clear why the line below is needed, but it does seem to be. */
- unfilled_firstobj = (rtx *) obstack_alloc (&unfilled_slots_obstack, 0);
-
- /* Reposition the prologue and epilogue notes in case we moved the
- prologue/epilogue insns. */
- reposition_prologue_and_epilogue_notes (first);
-
- if (file)
- {
- register int i, j, need_comma;
-
- for (reorg_pass_number = 0;
- reorg_pass_number < MAX_REORG_PASSES;
- reorg_pass_number++)
- {
- fprintf (file, ";; Reorg pass #%d:\n", reorg_pass_number + 1);
- for (i = 0; i < NUM_REORG_FUNCTIONS; i++)
- {
- need_comma = 0;
- fprintf (file, ";; Reorg function #%d\n", i);
-
- fprintf (file, ";; %d insns needing delay slots\n;; ",
- num_insns_needing_delays[i][reorg_pass_number]);
-
- for (j = 0; j < MAX_DELAY_HISTOGRAM; j++)
- if (num_filled_delays[i][j][reorg_pass_number])
- {
- if (need_comma)
- fprintf (file, ", ");
- need_comma = 1;
- fprintf (file, "%d got %d delays",
- num_filled_delays[i][j][reorg_pass_number], j);
- }
- fprintf (file, "\n");
- }
- }
- }
-
- /* For all JUMP insns, fill in branch prediction notes, so that during
- assembler output a target can set branch prediction bits in the code.
- We have to do this now, as up until this point the destinations of
- JUMPS can be moved around and changed, but past right here that cannot
- happen. */
- for (insn = first; insn; insn = NEXT_INSN (insn))
- {
- int pred_flags;
-
- if (GET_CODE (insn) == INSN)
- {
- rtx pat = PATTERN (insn);
-
- if (GET_CODE (pat) == SEQUENCE)
- insn = XVECEXP (pat, 0, 0);
- }
- if (GET_CODE (insn) != JUMP_INSN)
- continue;
-
- pred_flags = get_jump_flags (insn, JUMP_LABEL (insn));
- REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_BR_PRED,
- GEN_INT (pred_flags),
- REG_NOTES (insn));
- }
- free_resource_info ();
-}
-#endif /* DELAY_SLOTS */
diff --git a/gcc/rtl.h b/gcc/rtl.h
index eed0476..49b9847 100755
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -1477,11 +1477,6 @@ extern void fix_register PROTO ((char *, int, int));
extern void regmove_optimize PROTO ((rtx, int, FILE *));
#endif
-/* In reorg.c */
-#ifdef BUFSIZ
-extern void dbr_schedule PROTO ((rtx, FILE *));
-#endif
-
/* In optabs.c */
extern void init_optabs PROTO ((void));
@@ -1503,12 +1498,6 @@ extern int reload PROTO ((rtx, int, FILE *));
/* In caller-save.c */
extern void init_caller_save PROTO ((void));
-/* In reg-stack.c */
-#ifdef BUFSIZ
-extern void reg_to_stack PROTO ((rtx, FILE *));
-#endif
-extern int stack_regs_mentioned_p PROTO ((rtx));
-
/* In fold-const.c */
extern int add_double PROTO ((HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, HOST_WIDE_INT,
diff --git a/gcc/stupid.c b/gcc/stupid.c
index e68f239..1ba8881 100755
--- a/gcc/stupid.c
+++ b/gcc/stupid.c
@@ -531,11 +531,6 @@ stupid_find_reg (call_preserved, class, mode,
for (ins = born_insn; ins < dead_insn; ins++)
IOR_HARD_REG_SET (used, after_insn_hard_regs[ins]);
-#ifdef STACK_REGS
- if (current_function_has_computed_jump)
- for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
- SET_HARD_REG_BIT (used, i);
-#endif
IOR_COMPL_HARD_REG_SET (used, reg_class_contents[(int) class]);
diff --git a/gcc/toplev.c b/gcc/toplev.c
index a9bf402..8c8112c 100755
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -219,13 +219,7 @@ int local_reg_dump = 0;
int global_reg_dump = 0;
int sched2_dump = 0;
int jump2_opt_dump = 0;
-#ifdef DELAY_SLOTS
-int dbr_sched_dump = 0;
-#endif
int flag_print_asm_name = 0;
-#ifdef STACK_REGS
-int stack_reg_dump = 0;
-#endif
#ifdef MACHINE_DEPENDENT_REORG
int mach_dep_reorg_dump = 0;
#endif
@@ -1148,9 +1142,6 @@ int sched_time;
int local_alloc_time;
int global_alloc_time;
int sched2_time;
-#ifdef DELAY_SLOTS
-int dbr_sched_time;
-#endif
int shorten_branch_time;
int stack_reg_time;
int final_time;
@@ -2451,9 +2442,6 @@ compile_file (name)
local_alloc_time = 0;
global_alloc_time = 0;
sched2_time = 0;
-#ifdef DELAY_SLOTS
- dbr_sched_time = 0;
-#endif
shorten_branch_time = 0;
stack_reg_time = 0;
final_time = 0;
@@ -2581,28 +2569,12 @@ compile_file (name)
if (graph_dump_format != no_graph)
clean_graph_dump_file (dump_base_name, ".jump2");
}
-#ifdef DELAY_SLOTS
- if (dbr_sched_dump)
- {
- clean_dump_file (".dbr");
- if (graph_dump_format != no_graph)
- clean_graph_dump_file (dump_base_name, ".dbr");
- }
-#endif
if (gcse_dump)
{
clean_dump_file (".gcse");
if (graph_dump_format != no_graph)
clean_graph_dump_file (dump_base_name, ".gcse");
}
-#ifdef STACK_REGS
- if (stack_reg_dump)
- {
- clean_dump_file (".stack");
- if (graph_dump_format != no_graph)
- clean_graph_dump_file (dump_base_name, ".stack");
- }
-#endif
#ifdef MACHINE_DEPENDENT_REORG
if (mach_dep_reorg_dump)
{
@@ -3021,16 +2993,8 @@ compile_file (name)
finish_graph_dump_file (dump_base_name, ".sched2");
if (jump2_opt_dump)
finish_graph_dump_file (dump_base_name, ".jump2");
-#ifdef DELAY_SLOTS
- if (dbr_sched_dump)
- finish_graph_dump_file (dump_base_name, ".dbr");
-#endif
if (gcse_dump)
finish_graph_dump_file (dump_base_name, ".gcse");
-#ifdef STACK_REGS
- if (stack_reg_dump)
- finish_graph_dump_file (dump_base_name, ".stack");
-#endif
#ifdef MACHINE_DEPENDENT_REORG
if (mach_dep_reorg_dump)
finish_graph_dump_file (dump_base_name, ".mach");
@@ -3060,9 +3024,6 @@ compile_file (name)
print_time ("local-alloc", local_alloc_time);
print_time ("global-alloc", global_alloc_time);
print_time ("sched2", sched2_time);
-#ifdef DELAY_SLOTS
- print_time ("dbranch", dbr_sched_time);
-#endif
print_time ("shorten-branch", shorten_branch_time);
print_time ("stack-reg", stack_reg_time);
print_time ("final", final_time);
@@ -3806,12 +3767,6 @@ rest_of_compilation (decl)
}
}
-#ifdef LEAF_REGISTERS
- leaf_function = 0;
- if (optimize > 0 && only_leaf_regs_used () && leaf_function_p ())
- leaf_function = 1;
-#endif
-
/* One more attempt to remove jumps to .+1
left by dead-store-elimination.
Also do cross-jumping this time
@@ -3845,42 +3800,12 @@ rest_of_compilation (decl)
}
#endif
- /* If a scheduling pass for delayed branches is to be done,
- call the scheduling code. */
-
-#ifdef DELAY_SLOTS
- if (optimize > 0 && flag_delayed_branch)
- {
- TIMEVAR (dbr_sched_time, dbr_schedule (insns, rtl_dump_file));
-
- if (dbr_sched_dump)
- {
- dump_rtl (".dbr", decl, print_rtl_with_bb, insns);
- if (graph_dump_format != no_graph)
- print_rtl_graph_with_bb (dump_base_name, ".dbr", insns);
- }
- }
-#endif
-
/* Shorten branches. */
TIMEVAR (shorten_branch_time,
{
shorten_branches (get_insns ());
});
-#ifdef STACK_REGS
- if (stack_reg_dump)
- open_dump_file (".stack", decl_printable_name (decl, 2));
-
- TIMEVAR (stack_reg_time, reg_to_stack (insns, rtl_dump_file));
-
- if (stack_reg_dump)
- {
- dump_rtl (".stack", decl, print_rtl_with_bb, insns);
- if (graph_dump_format != no_graph)
- print_rtl_graph_with_bb (dump_base_name, ".stack", insns);
- }
-#endif
/* Now turn the rtl into assembler code. */
@@ -4326,9 +4251,6 @@ main (argc, argv)
{
flag_defer_pop = 1;
flag_thread_jumps = 1;
-#ifdef DELAY_SLOTS
- flag_delayed_branch = 1;
-#endif
#ifdef CAN_DEBUG_WITHOUT_FP
flag_omit_frame_pointer = 1;
#endif
@@ -4414,9 +4336,6 @@ main (argc, argv)
{
case 'a':
combine_dump = 1;
-#ifdef DELAY_SLOTS
- dbr_sched_dump = 1;
-#endif
flow_dump = 1;
global_reg_dump = 1;
jump_opt_dump = 1;
@@ -4430,9 +4349,6 @@ main (argc, argv)
gcse_dump = 1;
sched_dump = 1;
sched2_dump = 1;
-#ifdef STACK_REGS
- stack_reg_dump = 1;
-#endif
#ifdef MACHINE_DEPENDENT_REORG
mach_dep_reorg_dump = 1;
#endif
@@ -4443,11 +4359,6 @@ main (argc, argv)
case 'c':
combine_dump = 1;
break;
-#ifdef DELAY_SLOTS
- case 'd':
- dbr_sched_dump = 1;
- break;
-#endif
case 'f':
flow_dump = 1;
break;
@@ -4466,11 +4377,6 @@ main (argc, argv)
case 'J':
jump2_opt_dump = 1;
break;
-#ifdef STACK_REGS
- case 'k':
- stack_reg_dump = 1;
- break;
-#endif
case 'l':
local_reg_dump = 1;
break;
@@ -4869,10 +4775,8 @@ main (argc, argv)
if (flag_schedule_insns || flag_schedule_insns_after_reload)
warning ("instruction scheduling not supported on this target machine");
#endif
-#ifndef DELAY_SLOTS
if (flag_delayed_branch)
warning ("this target machine does not have delayed branches");
-#endif
user_label_prefix = USER_LABEL_PREFIX;
if (flag_leading_underscore != -1)
diff --git a/gcc/varasm.c b/gcc/varasm.c
index bddb28d..5037b43 100755
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -895,12 +895,7 @@ assemble_start_function (decl, fnname)
/* CYGNUS LOCAL law */
if (align > 0)
{
-#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
- ASM_OUTPUT_MAX_SKIP_ALIGN (asm_out_file, align,
- FUNCTION_BOUNDARY_MAX_SKIP);
-#else
ASM_OUTPUT_ALIGN (asm_out_file, align);
-#endif
}
/* END CYGNUS LOCAL */