summaryrefslogtreecommitdiff
path: root/libgcc
diff options
context:
space:
mode:
authorYamaArashi <shadow962@live.com>2016-04-27 00:00:40 -0700
committerYamaArashi <shadow962@live.com>2016-04-27 00:00:40 -0700
commit9dc75fe3b4be91d6066c8e870eacec954117cc08 (patch)
tree0ff095d5dd600f23a98952682edf5226f40f0391 /libgcc
parent9e5f6a79618cb7df0b7d3816472b530c3b7d9c1a (diff)
reorganize files
Diffstat (limited to 'libgcc')
-rwxr-xr-xlibgcc/fp-bit.c1507
-rwxr-xr-xlibgcc/lib1thumb.asm736
-rwxr-xr-xlibgcc/libgcc1-test.c117
-rwxr-xr-xlibgcc/libgcc1.c596
-rwxr-xr-xlibgcc/libgcc2.c946
5 files changed, 3902 insertions, 0 deletions
diff --git a/libgcc/fp-bit.c b/libgcc/fp-bit.c
new file mode 100755
index 0000000..6b8bd70
--- /dev/null
+++ b/libgcc/fp-bit.c
@@ -0,0 +1,1507 @@
+/* This is a software floating point library which can be used instead of
+ the floating point routines in libgcc1.c for targets without hardware
+ floating point.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+/* This implements IEEE 754 format arithmetic, but does not provide a
+ mechanism for setting the rounding mode, or for generating or handling
+ exceptions.
+
+ The original code by Steve Chamberlain, hacked by Mark Eichin and Jim
+ Wilson, all of Cygnus Support. */
+
+/* The intended way to use this file is to make two copies, add `#define FLOAT'
+ to one copy, then compile both copies and add them to libgcc.a. */
+
+/* Defining FINE_GRAINED_LIBRARIES allows one to select which routines
+ from this file are compiled via additional -D options.
+
+ This avoids the need to pull in the entire fp emulation library
+ when only a small number of functions are needed.
+
+ If FINE_GRAINED_LIBRARIES is not defined, then compile every
+ suitable routine. */
+#ifndef FINE_GRAINED_LIBRARIES
+#define L_pack_df
+#define L_unpack_df
+#define L_pack_sf
+#define L_unpack_sf
+#define L_addsub_sf
+#define L_addsub_df
+#define L_mul_sf
+#define L_mul_df
+#define L_div_sf
+#define L_div_df
+#define L_fpcmp_parts_sf
+#define L_fpcmp_parts_df
+#define L_compare_sf
+#define L_compare_df
+#define L_eq_sf
+#define L_eq_df
+#define L_ne_sf
+#define L_ne_df
+#define L_gt_sf
+#define L_gt_df
+#define L_ge_sf
+#define L_ge_df
+#define L_lt_sf
+#define L_lt_df
+#define L_le_sf
+#define L_le_df
+#define L_si_to_sf
+#define L_si_to_df
+#define L_sf_to_si
+#define L_df_to_si
+#define L_f_to_usi
+#define L_df_to_usi
+#define L_negate_sf
+#define L_negate_df
+#define L_make_sf
+#define L_make_df
+#define L_sf_to_df
+#define L_df_to_sf
+#endif
+
+/* The following macros can be defined to change the behaviour of this file:
+ FLOAT: Implement a `float', aka SFmode, fp library. If this is not
+ defined, then this file implements a `double', aka DFmode, fp library.
+ FLOAT_ONLY: Used with FLOAT, to implement a `float' only library, i.e.
+ don't include float->double conversion which requires the double library.
+ This is useful only for machines which can't support doubles, e.g. some
+ 8-bit processors.
+ CMPtype: Specify the type that floating point compares should return.
+ This defaults to SItype, aka int.
+ US_SOFTWARE_GOFAST: This makes all entry points use the same names as the
+ US Software goFast library. If this is not defined, the entry points use
+ the same names as libgcc1.c.
+ _DEBUG_BITFLOAT: This makes debugging the code a little easier, by adding
+ two integers to the FLO_union_type.
+ NO_NANS: Disable nan and infinity handling
+ SMALL_MACHINE: Useful when operations on QIs and HIs are faster
+ than on an SI */
+
+/* We don't currently support extended floats (long doubles) on machines
+ without hardware to deal with them.
+
+ These stubs are just to keep the linker from complaining about unresolved
+ references which can be pulled in from libio & libstdc++, even if the
+ user isn't using long doubles. However, they may generate an unresolved
+ external to abort if abort is not used by the function, and the stubs
+ are referenced from within libc, since libgcc goes before and after the
+ system library. */
+
+#ifdef EXTENDED_FLOAT_STUBS
+__truncxfsf2 (){ abort(); }
+__extendsfxf2 (){ abort(); }
+__addxf3 (){ abort(); }
+__divxf3 (){ abort(); }
+__eqxf2 (){ abort(); }
+__extenddfxf2 (){ abort(); }
+__gtxf2 (){ abort(); }
+__lexf2 (){ abort(); }
+__ltxf2 (){ abort(); }
+__mulxf3 (){ abort(); }
+__negxf2 (){ abort(); }
+__nexf2 (){ abort(); }
+__subxf3 (){ abort(); }
+__truncxfdf2 (){ abort(); }
+
+__trunctfsf2 (){ abort(); }
+__extendsftf2 (){ abort(); }
+__addtf3 (){ abort(); }
+__divtf3 (){ abort(); }
+__eqtf2 (){ abort(); }
+__extenddftf2 (){ abort(); }
+__gttf2 (){ abort(); }
+__letf2 (){ abort(); }
+__lttf2 (){ abort(); }
+__multf3 (){ abort(); }
+__negtf2 (){ abort(); }
+__netf2 (){ abort(); }
+__subtf3 (){ abort(); }
+__trunctfdf2 (){ abort(); }
+__gexf2 (){ abort(); }
+__fixxfsi (){ abort(); }
+__floatsixf (){ abort(); }
+#else /* !EXTENDED_FLOAT_STUBS, rest of file */
+
+
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+
+typedef int HItype __attribute__ ((mode (HI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+
+/* The type of the result of a fp compare */
+#ifndef CMPtype
+#define CMPtype SItype
+#endif
+
+typedef unsigned int UHItype __attribute__ ((mode (HI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+
+#define MAX_SI_INT ((SItype) ((unsigned) (~0)>>1))
+#define MAX_USI_INT ((USItype) ~0)
+
+
+#ifdef FLOAT_ONLY
+#define NO_DI_MODE
+#endif
+
+#ifdef FLOAT
+# define NGARDS 7L
+# define GARDROUND 0x3f
+# define GARDMASK 0x7f
+# define GARDMSB 0x40
+# define EXPBITS 8
+# define EXPBIAS 127
+# define FRACBITS 23
+# define EXPMAX (0xff)
+# define QUIET_NAN 0x100000L
+# define FRAC_NBITS 32
+# define FRACHIGH 0x80000000L
+# define FRACHIGH2 0xc0000000L
+# define pack_d __pack_f
+# define unpack_d __unpack_f
+# define __fpcmp_parts __fpcmp_parts_f
+ typedef USItype fractype;
+ typedef UHItype halffractype;
+ typedef SFtype FLO_type;
+ typedef SItype intfrac;
+
+#else
+# define PREFIXFPDP dp
+# define PREFIXSFDF df
+# define NGARDS 8L
+# define GARDROUND 0x7f
+# define GARDMASK 0xff
+# define GARDMSB 0x80
+# define EXPBITS 11
+# define EXPBIAS 1023
+# define FRACBITS 52
+# define EXPMAX (0x7ff)
+# define QUIET_NAN 0x8000000000000LL
+# define FRAC_NBITS 64
+# define FRACHIGH 0x8000000000000000LL
+# define FRACHIGH2 0xc000000000000000LL
+# define pack_d __pack_d
+# define unpack_d __unpack_d
+# define __fpcmp_parts __fpcmp_parts_d
+ typedef UDItype fractype;
+ typedef USItype halffractype;
+ typedef DFtype FLO_type;
+ typedef DItype intfrac;
+#endif
+
+#ifdef US_SOFTWARE_GOFAST
+# ifdef FLOAT
+# define add fpadd
+# define sub fpsub
+# define multiply fpmul
+# define divide fpdiv
+# define compare fpcmp
+# define si_to_float sitofp
+# define float_to_si fptosi
+# define float_to_usi fptoui
+# define negate __negsf2
+# define sf_to_df fptodp
+# define dptofp dptofp
+#else
+# define add dpadd
+# define sub dpsub
+# define multiply dpmul
+# define divide dpdiv
+# define compare dpcmp
+# define si_to_float litodp
+# define float_to_si dptoli
+# define float_to_usi dptoul
+# define negate __negdf2
+# define df_to_sf dptofp
+#endif
+#else
+# ifdef FLOAT
+# define add __addsf3
+# define sub __subsf3
+# define multiply __mulsf3
+# define divide __divsf3
+# define compare __cmpsf2
+# define _eq_f2 __eqsf2
+# define _ne_f2 __nesf2
+# define _gt_f2 __gtsf2
+# define _ge_f2 __gesf2
+# define _lt_f2 __ltsf2
+# define _le_f2 __lesf2
+# define si_to_float __floatsisf
+# define float_to_si __fixsfsi
+# define float_to_usi __fixunssfsi
+# define negate __negsf2
+# define sf_to_df __extendsfdf2
+#else
+# define add __adddf3
+# define sub __subdf3
+# define multiply __muldf3
+# define divide __divdf3
+# define compare __cmpdf2
+# define _eq_f2 __eqdf2
+# define _ne_f2 __nedf2
+# define _gt_f2 __gtdf2
+# define _ge_f2 __gedf2
+# define _lt_f2 __ltdf2
+# define _le_f2 __ledf2
+# define si_to_float __floatsidf
+# define float_to_si __fixdfsi
+# define float_to_usi __fixunsdfsi
+# define negate __negdf2
+# define df_to_sf __truncdfsf2
+# endif
+#endif
+
+
+#ifndef INLINE
+#define INLINE __inline__
+#endif
+
+/* Preserve the sticky-bit when shifting fractions to the right. */
+#define LSHIFT(a) { a = (a & 1) | (a >> 1); }
+
+/* numeric parameters */
+/* F_D_BITOFF is the number of bits offset between the MSB of the mantissa
+ of a float and of a double. Assumes there are only two float types.
+ (double::FRAC_BITS+double::NGARDS-(float::FRAC_BITS-float::NGARDS))
+ */
+#define F_D_BITOFF (52+8-(23+7))
+
+
+#define NORMAL_EXPMIN (-(EXPBIAS)+1)
+#define IMPLICIT_1 (1LL<<(FRACBITS+NGARDS))
+#define IMPLICIT_2 (1LL<<(FRACBITS+1+NGARDS))
+
+/* common types */
+
+typedef enum
+{
+ CLASS_SNAN,
+ CLASS_QNAN,
+ CLASS_ZERO,
+ CLASS_NUMBER,
+ CLASS_INFINITY
+} fp_class_type;
+
+typedef struct
+{
+#ifdef SMALL_MACHINE
+ char class;
+ unsigned char sign;
+ short normal_exp;
+#else
+ fp_class_type class;
+ unsigned int sign;
+ int normal_exp;
+#endif
+
+ union
+ {
+ fractype ll;
+ halffractype l[2];
+ } fraction;
+} fp_number_type;
+
+typedef union
+{
+ FLO_type value;
+ fractype value_raw;
+
+#ifndef FLOAT
+ halffractype words[2];
+#endif
+
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ struct
+ {
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ unsigned int sign:1 __attribute__ ((packed));
+ }
+ bits;
+#endif
+
+#ifdef _DEBUG_BITFLOAT
+ struct
+ {
+ unsigned int sign:1 __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ }
+ bits_big_endian;
+
+ struct
+ {
+ fractype fraction:FRACBITS __attribute__ ((packed));
+ unsigned int exp:EXPBITS __attribute__ ((packed));
+ unsigned int sign:1 __attribute__ ((packed));
+ }
+ bits_little_endian;
+#endif
+}
+FLO_union_type;
+
+
+/* end of header */
+
+/* IEEE "special" number predicates */
+
+#ifdef NO_NANS
+
+#define nan() 0
+#define isnan(x) 0
+#define isinf(x) 0
+#else
+
+INLINE
+static fp_number_type *
+nan ()
+{
+ static fp_number_type thenan;
+
+ return &thenan;
+}
+
+INLINE
+static int
+isnan ( fp_number_type * x)
+{
+ return x->class == CLASS_SNAN || x->class == CLASS_QNAN;
+}
+
+INLINE
+static int
+isinf ( fp_number_type * x)
+{
+ return x->class == CLASS_INFINITY;
+}
+
+#endif
+
+INLINE
+static int
+iszero ( fp_number_type * x)
+{
+ return x->class == CLASS_ZERO;
+}
+
+INLINE
+static void
+flip_sign ( fp_number_type * x)
+{
+ x->sign = !x->sign;
+}
+
+extern FLO_type pack_d ( fp_number_type * );
+
+#if defined(L_pack_df) || defined(L_pack_sf)
+FLO_type
+pack_d ( fp_number_type * src)
+{
+ FLO_union_type dst;
+ fractype fraction = src->fraction.ll; /* wasn't unsigned before? */
+ int sign = src->sign;
+ int exp = 0;
+
+ if (isnan (src))
+ {
+ exp = EXPMAX;
+ if (src->class == CLASS_QNAN || 1)
+ {
+ fraction |= QUIET_NAN;
+ }
+ }
+ else if (isinf (src))
+ {
+ exp = EXPMAX;
+ fraction = 0;
+ }
+ else if (iszero (src))
+ {
+ exp = 0;
+ fraction = 0;
+ }
+ else if (fraction == 0)
+ {
+ exp = 0;
+ }
+ else
+ {
+ if (src->normal_exp < NORMAL_EXPMIN)
+ {
+ /* This number's exponent is too low to fit into the bits
+ available in the number, so we'll store 0 in the exponent and
+ shift the fraction to the right to make up for it. */
+
+ int shift = NORMAL_EXPMIN - src->normal_exp;
+
+ exp = 0;
+
+ if (shift > FRAC_NBITS - NGARDS)
+ {
+ /* No point shifting, since it's more that 64 out. */
+ fraction = 0;
+ }
+ else
+ {
+ /* Shift by the value */
+ fraction >>= shift;
+ }
+ fraction >>= NGARDS;
+ }
+ else if (src->normal_exp > EXPBIAS)
+ {
+ exp = EXPMAX;
+ fraction = 0;
+ }
+ else
+ {
+ exp = src->normal_exp + EXPBIAS;
+ /* IF the gard bits are the all zero, but the first, then we're
+ half way between two numbers, choose the one which makes the
+ lsb of the answer 0. */
+ if ((fraction & GARDMASK) == GARDMSB)
+ {
+ if (fraction & (1 << NGARDS))
+ fraction += GARDROUND + 1;
+ }
+ else
+ {
+ /* Add a one to the guards to round up */
+ fraction += GARDROUND;
+ }
+ if (fraction >= IMPLICIT_2)
+ {
+ fraction >>= 1;
+ exp += 1;
+ }
+ fraction >>= NGARDS;
+ }
+ }
+
+ /* We previously used bitfields to store the number, but this doesn't
+ handle little/big endian systems conveniently, so use shifts and
+ masks */
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ dst.bits.fraction = fraction;
+ dst.bits.exp = exp;
+ dst.bits.sign = sign;
+#else
+ dst.value_raw = fraction & ((((fractype)1) << FRACBITS) - (fractype)1);
+ dst.value_raw |= ((fractype) (exp & ((1 << EXPBITS) - 1))) << FRACBITS;
+ dst.value_raw |= ((fractype) (sign & 1)) << (FRACBITS | EXPBITS);
+#endif
+
+#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT)
+ {
+ halffractype tmp = dst.words[0];
+ dst.words[0] = dst.words[1];
+ dst.words[1] = tmp;
+ }
+#endif
+
+ return dst.value;
+}
+#endif
+
+extern void unpack_d (FLO_union_type *, fp_number_type *);
+
+#if defined(L_unpack_df) || defined(L_unpack_sf)
+void
+unpack_d (FLO_union_type * src, fp_number_type * dst)
+{
+ /* We previously used bitfields to store the number, but this doesn't
+ handle little/big endian systems conveniently, so use shifts and
+ masks */
+ fractype fraction;
+ int exp;
+ int sign;
+
+#if defined(FLOAT_WORD_ORDER_MISMATCH) && !defined(FLOAT)
+ FLO_union_type swapped;
+
+ swapped.words[0] = src->words[1];
+ swapped.words[1] = src->words[0];
+ src = &swapped;
+#endif
+
+#ifdef FLOAT_BIT_ORDER_MISMATCH
+ fraction = src->bits.fraction;
+ exp = src->bits.exp;
+ sign = src->bits.sign;
+#else
+ fraction = src->value_raw & ((((fractype)1) << FRACBITS) - (fractype)1);
+ exp = ((int)(src->value_raw >> FRACBITS)) & ((1 << EXPBITS) - 1);
+ sign = ((int)(src->value_raw >> (FRACBITS + EXPBITS))) & 1;
+#endif
+
+ dst->sign = sign;
+ if (exp == 0)
+ {
+ /* Hmm. Looks like 0 */
+ if (fraction == 0)
+ {
+ /* tastes like zero */
+ dst->class = CLASS_ZERO;
+ }
+ else
+ {
+ /* Zero exponent with non zero fraction - it's denormalized,
+ so there isn't a leading implicit one - we'll shift it so
+ it gets one. */
+ dst->normal_exp = exp - EXPBIAS + 1;
+ fraction <<= NGARDS;
+
+ dst->class = CLASS_NUMBER;
+#if 1
+ while (fraction < IMPLICIT_1)
+ {
+ fraction <<= 1;
+ dst->normal_exp--;
+ }
+#endif
+ dst->fraction.ll = fraction;
+ }
+ }
+ else if (exp == EXPMAX)
+ {
+ /* Huge exponent*/
+ if (fraction == 0)
+ {
+ /* Attached to a zero fraction - means infinity */
+ dst->class = CLASS_INFINITY;
+ }
+ else
+ {
+ /* Non zero fraction, means nan */
+ if (fraction & QUIET_NAN)
+ {
+ dst->class = CLASS_QNAN;
+ }
+ else
+ {
+ dst->class = CLASS_SNAN;
+ }
+ /* Keep the fraction part as the nan number */
+ dst->fraction.ll = fraction;
+ }
+ }
+ else
+ {
+ /* Nothing strange about this number */
+ dst->normal_exp = exp - EXPBIAS;
+ dst->class = CLASS_NUMBER;
+ dst->fraction.ll = (fraction << NGARDS) | IMPLICIT_1;
+ }
+}
+#endif
+
+#if defined(L_addsub_sf) || defined(L_addsub_df)
+static fp_number_type *
+_fpadd_parts (fp_number_type * a,
+ fp_number_type * b,
+ fp_number_type * tmp)
+{
+ intfrac tfraction;
+
+ /* Put commonly used fields in local variables. */
+ int a_normal_exp;
+ int b_normal_exp;
+ fractype a_fraction;
+ fractype b_fraction;
+
+ if (isnan (a))
+ {
+ return a;
+ }
+ if (isnan (b))
+ {
+ return b;
+ }
+ if (isinf (a))
+ {
+ /* Adding infinities with opposite signs yields a NaN. */
+ if (isinf (b) && a->sign != b->sign)
+ return nan ();
+ return a;
+ }
+ if (isinf (b))
+ {
+ return b;
+ }
+ if (iszero (b))
+ {
+ if (iszero (a))
+ {
+ *tmp = *a;
+ tmp->sign = a->sign & b->sign;
+ return tmp;
+ }
+ return a;
+ }
+ if (iszero (a))
+ {
+ return b;
+ }
+
+ /* Got two numbers. shift the smaller and increment the exponent till
+ they're the same */
+ {
+ int diff;
+
+ a_normal_exp = a->normal_exp;
+ b_normal_exp = b->normal_exp;
+ a_fraction = a->fraction.ll;
+ b_fraction = b->fraction.ll;
+
+ diff = a_normal_exp - b_normal_exp;
+
+ if (diff < 0)
+ diff = -diff;
+ if (diff < FRAC_NBITS)
+ {
+ /* ??? This does shifts one bit at a time. Optimize. */
+ while (a_normal_exp > b_normal_exp)
+ {
+ b_normal_exp++;
+ LSHIFT (b_fraction);
+ }
+ while (b_normal_exp > a_normal_exp)
+ {
+ a_normal_exp++;
+ LSHIFT (a_fraction);
+ }
+ }
+ else
+ {
+ /* Somethings's up.. choose the biggest */
+ if (a_normal_exp > b_normal_exp)
+ {
+ b_normal_exp = a_normal_exp;
+ b_fraction = 0;
+ }
+ else
+ {
+ a_normal_exp = b_normal_exp;
+ a_fraction = 0;
+ }
+ }
+ }
+
+ if (a->sign != b->sign)
+ {
+ if (a->sign)
+ {
+ tfraction = -a_fraction + b_fraction;
+ }
+ else
+ {
+ tfraction = a_fraction - b_fraction;
+ }
+ if (tfraction >= 0)
+ {
+ tmp->sign = 0;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = tfraction;
+ }
+ else
+ {
+ tmp->sign = 1;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = -tfraction;
+ }
+ /* and renormalize it */
+
+ while (tmp->fraction.ll < IMPLICIT_1 && tmp->fraction.ll)
+ {
+ tmp->fraction.ll <<= 1;
+ tmp->normal_exp--;
+ }
+ }
+ else
+ {
+ tmp->sign = a->sign;
+ tmp->normal_exp = a_normal_exp;
+ tmp->fraction.ll = a_fraction + b_fraction;
+ }
+ tmp->class = CLASS_NUMBER;
+ /* Now the fraction is added, we have to shift down to renormalize the
+ number */
+
+ if (tmp->fraction.ll >= IMPLICIT_2)
+ {
+ LSHIFT (tmp->fraction.ll);
+ tmp->normal_exp++;
+ }
+ return tmp;
+
+}
+
+FLO_type
+add (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ res = _fpadd_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+
+FLO_type
+sub (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ b.sign ^= 1;
+
+ res = _fpadd_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+#endif
+
+#if defined(L_mul_sf) || defined(L_mul_df)
+static INLINE fp_number_type *
+_fpmul_parts ( fp_number_type * a,
+ fp_number_type * b,
+ fp_number_type * tmp)
+{
+ fractype low = 0;
+ fractype high = 0;
+
+ if (isnan (a))
+ {
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (isnan (b))
+ {
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+ if (isinf (a))
+ {
+ if (iszero (b))
+ return nan ();
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (isinf (b))
+ {
+ if (iszero (a))
+ {
+ return nan ();
+ }
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+ if (iszero (a))
+ {
+ a->sign = a->sign != b->sign;
+ return a;
+ }
+ if (iszero (b))
+ {
+ b->sign = a->sign != b->sign;
+ return b;
+ }
+
+ /* Calculate the mantissa by multiplying both 64bit numbers to get a
+ 128 bit number */
+ {
+#if defined(NO_DI_MODE)
+ {
+ fractype x = a->fraction.ll;
+ fractype ylow = b->fraction.ll;
+ fractype yhigh = 0;
+ int bit;
+
+ /* ??? This does multiplies one bit at a time. Optimize. */
+ for (bit = 0; bit < FRAC_NBITS; bit++)
+ {
+ int carry;
+
+ if (x & 1)
+ {
+ carry = (low += ylow) < ylow;
+ high += yhigh + carry;
+ }
+ yhigh <<= 1;
+ if (ylow & FRACHIGH)
+ {
+ yhigh |= 1;
+ }
+ ylow <<= 1;
+ x >>= 1;
+ }
+ }
+#elif defined(FLOAT)
+ {
+ /* Multiplying two 32 bit numbers to get a 64 bit number on
+ a machine with DI, so we're safe */
+
+ DItype answer = (DItype)(a->fraction.ll) * (DItype)(b->fraction.ll);
+
+ high = answer >> 32;
+ low = answer;
+ }
+#else
+ /* Doing a 64*64 to 128 */
+ {
+ UDItype nl = a->fraction.ll & 0xffffffff;
+ UDItype nh = a->fraction.ll >> 32;
+ UDItype ml = b->fraction.ll & 0xffffffff;
+ UDItype mh = b->fraction.ll >>32;
+ UDItype pp_ll = ml * nl;
+ UDItype pp_hl = mh * nl;
+ UDItype pp_lh = ml * nh;
+ UDItype pp_hh = mh * nh;
+ UDItype res2 = 0;
+ UDItype res0 = 0;
+ UDItype ps_hh__ = pp_hl + pp_lh;
+ if (ps_hh__ < pp_hl)
+ res2 += 0x100000000LL;
+ pp_hl = (ps_hh__ << 32) & 0xffffffff00000000LL;
+ res0 = pp_ll + pp_hl;
+ if (res0 < pp_ll)
+ res2++;
+ res2 += ((ps_hh__ >> 32) & 0xffffffffL) + pp_hh;
+ high = res2;
+ low = res0;
+ }
+#endif
+ }
+
+ tmp->normal_exp = a->normal_exp + b->normal_exp;
+ tmp->sign = a->sign != b->sign;
+#ifdef FLOAT
+ tmp->normal_exp += 2; /* ??????????????? */
+#else
+ tmp->normal_exp += 4; /* ??????????????? */
+#endif
+ while (high >= IMPLICIT_2)
+ {
+ tmp->normal_exp++;
+ if (high & 1)
+ {
+ low >>= 1;
+ low |= FRACHIGH;
+ }
+ high >>= 1;
+ }
+ while (high < IMPLICIT_1)
+ {
+ tmp->normal_exp--;
+
+ high <<= 1;
+ if (low & FRACHIGH)
+ high |= 1;
+ low <<= 1;
+ }
+ /* rounding is tricky. if we only round if it won't make us round later. */
+#if 0
+ if (low & FRACHIGH2)
+ {
+ if (((high & GARDMASK) != GARDMSB)
+ && (((high + 1) & GARDMASK) == GARDMSB))
+ {
+ /* don't round, it gets done again later. */
+ }
+ else
+ {
+ high++;
+ }
+ }
+#endif
+ if ((high & GARDMASK) == GARDMSB)
+ {
+ if (high & (1 << NGARDS))
+ {
+ /* half way, so round to even */
+ high += GARDROUND + 1;
+ }
+ else if (low)
+ {
+ /* but we really weren't half way */
+ high += GARDROUND + 1;
+ }
+ }
+ tmp->fraction.ll = high;
+ tmp->class = CLASS_NUMBER;
+ return tmp;
+}
+
+FLO_type
+multiply (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type tmp;
+ fp_number_type *res;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ res = _fpmul_parts (&a, &b, &tmp);
+
+ return pack_d (res);
+}
+#endif
+
+#if defined(L_div_sf) || defined(L_div_df)
+static INLINE fp_number_type *
+_fpdiv_parts (fp_number_type * a,
+ fp_number_type * b)
+{
+ fractype bit;
+ fractype numerator;
+ fractype denominator;
+ fractype quotient;
+
+ if (isnan (a))
+ {
+ return a;
+ }
+ if (isnan (b))
+ {
+ return b;
+ }
+
+ a->sign = a->sign ^ b->sign;
+
+ if (isinf (a) || iszero (a))
+ {
+ if (a->class == b->class)
+ return nan ();
+ return a;
+ }
+
+ if (isinf (b))
+ {
+ a->fraction.ll = 0;
+ a->normal_exp = 0;
+ return a;
+ }
+ if (iszero (b))
+ {
+ a->class = CLASS_INFINITY;
+ return a;
+ }
+
+ /* Calculate the mantissa by multiplying both 64bit numbers to get a
+ 128 bit number */
+ {
+ /* quotient =
+ ( numerator / denominator) * 2^(numerator exponent - denominator exponent)
+ */
+
+ a->normal_exp = a->normal_exp - b->normal_exp;
+ numerator = a->fraction.ll;
+ denominator = b->fraction.ll;
+
+ if (numerator < denominator)
+ {
+ /* Fraction will be less than 1.0 */
+ numerator *= 2;
+ a->normal_exp--;
+ }
+ bit = IMPLICIT_1;
+ quotient = 0;
+ /* ??? Does divide one bit at a time. Optimize. */
+ while (bit)
+ {
+ if (numerator >= denominator)
+ {
+ quotient |= bit;
+ numerator -= denominator;
+ }
+ bit >>= 1;
+ numerator *= 2;
+ }
+
+ if ((quotient & GARDMASK) == GARDMSB)
+ {
+ if (quotient & (1 << NGARDS))
+ {
+ /* half way, so round to even */
+ quotient += GARDROUND + 1;
+ }
+ else if (numerator)
+ {
+ /* but we really weren't half way, more bits exist */
+ quotient += GARDROUND + 1;
+ }
+ }
+
+ a->fraction.ll = quotient;
+ return (a);
+ }
+}
+
+FLO_type
+divide (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+ fp_number_type *res;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ res = _fpdiv_parts (&a, &b);
+
+ return pack_d (res);
+}
+#endif
+
+int __fpcmp_parts (fp_number_type * a, fp_number_type *b);
+
+#if defined(L_fpcmp_parts_sf) || defined(L_fpcmp_parts_df)
+/* according to the demo, fpcmp returns a comparison with 0... thus
+ a<b -> -1
+ a==b -> 0
+ a>b -> +1
+ */
+
+int
+__fpcmp_parts (fp_number_type * a, fp_number_type * b)
+{
+#if 0
+ /* either nan -> unordered. Must be checked outside of this routine. */
+ if (isnan (a) && isnan (b))
+ {
+ return 1; /* still unordered! */
+ }
+#endif
+
+ if (isnan (a) || isnan (b))
+ {
+ return 1; /* how to indicate unordered compare? */
+ }
+ if (isinf (a) && isinf (b))
+ {
+ /* +inf > -inf, but +inf != +inf */
+ /* b \a| +inf(0)| -inf(1)
+ ______\+--------+--------
+ +inf(0)| a==b(0)| a<b(-1)
+ -------+--------+--------
+ -inf(1)| a>b(1) | a==b(0)
+ -------+--------+--------
+ So since unordered must be non zero, just line up the columns...
+ */
+ return b->sign - a->sign;
+ }
+ /* but not both... */
+ if (isinf (a))
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (isinf (b))
+ {
+ return b->sign ? 1 : -1;
+ }
+ if (iszero (a) && iszero (b))
+ {
+ return 0;
+ }
+ if (iszero (a))
+ {
+ return b->sign ? 1 : -1;
+ }
+ if (iszero (b))
+ {
+ return a->sign ? -1 : 1;
+ }
+ /* now both are "normal". */
+ if (a->sign != b->sign)
+ {
+ /* opposite signs */
+ return a->sign ? -1 : 1;
+ }
+ /* same sign; exponents? */
+ if (a->normal_exp > b->normal_exp)
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (a->normal_exp < b->normal_exp)
+ {
+ return a->sign ? 1 : -1;
+ }
+ /* same exponents; check size. */
+ if (a->fraction.ll > b->fraction.ll)
+ {
+ return a->sign ? -1 : 1;
+ }
+ if (a->fraction.ll < b->fraction.ll)
+ {
+ return a->sign ? 1 : -1;
+ }
+ /* after all that, they're equal. */
+ return 0;
+}
+#endif
+
+#if defined(L_compare_sf) || defined(L_compare_df)
+CMPtype
+compare (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif
+
+#ifndef US_SOFTWARE_GOFAST
+
+/* These should be optimized for their specific tasks someday. */
+
+#if defined(L_eq_sf) || defined(L_eq_df)
+CMPtype
+_eq_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth == 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif
+
+#if defined(L_ne_sf) || defined(L_ne_df)
+CMPtype
+_ne_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* true, truth != 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif
+
+#if defined(L_gt_sf) || defined(L_gt_df)
+CMPtype
+_gt_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return -1; /* false, truth > 0 */
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif
+
+#if defined(L_ge_sf) || defined(L_ge_df)
+CMPtype
+_ge_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return -1; /* false, truth >= 0 */
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif
+
+#if defined(L_lt_sf) || defined(L_lt_df)
+CMPtype
+_lt_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth < 0 */
+
+ return __fpcmp_parts (&a, &b);
+}
+#endif
+
+#if defined(L_le_sf) || defined(L_le_df)
+CMPtype
+_le_f2 (FLO_type arg_a, FLO_type arg_b)
+{
+ fp_number_type a;
+ fp_number_type b;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ unpack_d ((FLO_union_type *) & arg_b, &b);
+
+ if (isnan (&a) || isnan (&b))
+ return 1; /* false, truth <= 0 */
+
+ return __fpcmp_parts (&a, &b) ;
+}
+#endif
+
+#endif /* ! US_SOFTWARE_GOFAST */
+
+#if defined(L_si_to_sf) || defined(L_si_to_df)
+FLO_type
+si_to_float (SItype arg_a)
+{
+ fp_number_type in;
+
+ in.class = CLASS_NUMBER;
+ in.sign = arg_a < 0;
+ if (!arg_a)
+ {
+ in.class = CLASS_ZERO;
+ }
+ else
+ {
+ in.normal_exp = FRACBITS + NGARDS;
+ if (in.sign)
+ {
+ /* Special case for minint, since there is no +ve integer
+ representation for it */
+ if (arg_a == (SItype) 0x80000000)
+ {
+ return -2147483648.0;
+ }
+ in.fraction.ll = (-arg_a);
+ }
+ else
+ in.fraction.ll = arg_a;
+
+ while (in.fraction.ll < (1LL << (FRACBITS + NGARDS)))
+ {
+ in.fraction.ll <<= 1;
+ in.normal_exp -= 1;
+ }
+ }
+ return pack_d (&in);
+}
+#endif
+
+#if defined(L_sf_to_si) || defined(L_df_to_si)
+SItype
+float_to_si (FLO_type arg_a)
+{
+ fp_number_type a;
+ SItype tmp;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ if (iszero (&a))
+ return 0;
+ if (isnan (&a))
+ return 0;
+ /* get reasonable MAX_SI_INT... */
+ if (isinf (&a))
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
+ /* it is a number, but a small one */
+ if (a.normal_exp < 0)
+ return 0;
+ if (a.normal_exp > 30)
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
+ tmp = a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
+ return a.sign ? (-tmp) : (tmp);
+}
+#endif
+
+#if defined(L_sf_to_usi) || defined(L_df_to_usi)
+#ifdef US_SOFTWARE_GOFAST
+/* While libgcc2.c defines its own __fixunssfsi and __fixunsdfsi routines,
+ we also define them for GOFAST because the ones in libgcc2.c have the
+ wrong names and I'd rather define these here and keep GOFAST CYG-LOC's
+ out of libgcc2.c. We can't define these here if not GOFAST because then
+ there'd be duplicate copies. */
+
+USItype
+float_to_usi (FLO_type arg_a)
+{
+ fp_number_type a;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ if (iszero (&a))
+ return 0;
+ if (isnan (&a))
+ return 0;
+ /* it is a negative number */
+ if (a.sign)
+ return 0;
+ /* get reasonable MAX_USI_INT... */
+ if (isinf (&a))
+ return MAX_USI_INT;
+ /* it is a number, but a small one */
+ if (a.normal_exp < 0)
+ return 0;
+ if (a.normal_exp > 31)
+ return MAX_USI_INT;
+ else if (a.normal_exp > (FRACBITS + NGARDS))
+ return a.fraction.ll << (a.normal_exp - (FRACBITS + NGARDS));
+ else
+ return a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
+}
+#endif
+#endif
+
+#if defined(L_negate_sf) || defined(L_negate_df)
+FLO_type
+negate (FLO_type arg_a)
+{
+ fp_number_type a;
+
+ unpack_d ((FLO_union_type *) & arg_a, &a);
+ flip_sign (&a);
+ return pack_d (&a);
+}
+#endif
+
+#ifdef FLOAT
+
+#if defined(L_make_sf)
+SFtype
+__make_fp(fp_class_type class,
+ unsigned int sign,
+ int exp,
+ USItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif
+
+#ifndef FLOAT_ONLY
+
+/* This enables one to build an fp library that supports float but not double.
+ Otherwise, we would get an undefined reference to __make_dp.
+ This is needed for some 8-bit ports that can't handle well values that
+ are 8-bytes in size, so we just don't support double for them at all. */
+
+extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype frac);
+
+#if defined(L_sf_to_df)
+DFtype
+sf_to_df (SFtype arg_a)
+{
+ fp_number_type in;
+
+ unpack_d ((FLO_union_type *) & arg_a, &in);
+ return __make_dp (in.class, in.sign, in.normal_exp,
+ ((UDItype) in.fraction.ll) << F_D_BITOFF);
+}
+#endif
+
+#endif
+#endif
+
+#ifndef FLOAT
+
+extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+
+#if defined(L_make_df)
+DFtype
+__make_dp (fp_class_type class, unsigned int sign, int exp, UDItype frac)
+{
+ fp_number_type in;
+
+ in.class = class;
+ in.sign = sign;
+ in.normal_exp = exp;
+ in.fraction.ll = frac;
+ return pack_d (&in);
+}
+#endif
+
+#if defined(L_df_to_sf)
+SFtype
+df_to_sf (DFtype arg_a)
+{
+ fp_number_type in;
+ USItype sffrac;
+
+ unpack_d ((FLO_union_type *) & arg_a, &in);
+
+ sffrac = in.fraction.ll >> F_D_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((USItype) 1 << F_D_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_fp (in.class, in.sign, in.normal_exp, sffrac);
+}
+#endif
+
+#endif
+#endif /* !EXTENDED_FLOAT_STUBS */
diff --git a/libgcc/lib1thumb.asm b/libgcc/lib1thumb.asm
new file mode 100755
index 0000000..e0ff746
--- /dev/null
+++ b/libgcc/lib1thumb.asm
@@ -0,0 +1,736 @@
+@ libgcc1 routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+ .code 16
+
+#ifdef __elf__
+#define __PLT__ (PLT)
+#define TYPE(x) .type SYM(x),function
+#define SIZE(x) .size SYM(x), . - SYM(x)
+#else
+#define __PLT__
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+#define RET mov pc, lr
+
+#define SYM(x) x
+
+work .req r4 @ XXXX is this safe ?
+
+#ifdef L_udivsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__udivsi3)
+ TYPE (__udivsi3)
+ .align 0
+ .thumb_func
+SYM (__udivsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ mov result, #0
+
+ push { work }
+ cmp dividend, divisor
+ bcc Lgot_result
+
+ @ Load the constant 0x10000000 into our work register
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ bcc Over1
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+Over1:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over2
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+Over2:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over3
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, work
+Over3:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, work
+Over4:
+ cmp dividend, #0 @ Early termination?
+ beq Lgot_result
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Lgot_result
+ lsr divisor, #4
+ b Loop3
+Lgot_result:
+ mov r0, result
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__udivsi3)
+
+#endif /* L_udivsi3 */
+
+#ifdef L_umodsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__umodsi3)
+ TYPE (__umodsi3)
+ .align 0
+ .thumb_func
+SYM (__umodsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ cmp dividend, divisor
+ bcs Over1
+ RET
+
+Over1:
+ @ Load the constant 0x10000000 into our work register
+ push { work }
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ bcc Over2
+ sub dividend, dividend, divisor
+Over2:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over3
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over3:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over4:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over5
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over5:
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ beq Over6
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Over6
+ lsr divisor, #4
+ b Loop3
+
+Over6:
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ bne Over7
+ pop { work }
+ RET @ No fixups needed
+Over7:
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq Over8
+ lsr work, divisor, #3
+ add dividend, dividend, work
+Over8:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq Over9
+ lsr work, divisor, #2
+ add dividend, dividend, work
+Over9:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq Over10
+ lsr work, divisor, #1
+ add dividend, dividend, work
+Over10:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__umodsi3)
+
+#endif /* L_umodsi3 */
+
+#ifdef L_divsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__divsi3)
+ TYPE (__divsi3)
+ .align 0
+ .thumb_func
+SYM (__divsi3):
+ cmp divisor, #0
+ beq Ldiv0
+
+ push { work }
+ mov work, dividend
+ eor work, divisor @ Save the sign of the result.
+ mov ip, work
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ bpl Over1
+ neg divisor, divisor @ Loops below use unsigned.
+Over1:
+ cmp dividend, #0
+ bpl Over2
+ neg dividend, dividend
+Over2:
+ cmp dividend, divisor
+ bcc Lgot_result
+
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ Bcs Lbignum
+ cmp divisor, dividend
+ Bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ lsl work, #3
+Loop2:
+ cmp divisor, work
+ Bcs Loop3
+ cmp divisor, dividend
+ Bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ Bcc Over3
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+Over3:
+ lsr work, divisor, #1
+ cmp dividend, work
+ Bcc Over4
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+Over4:
+ lsr work, divisor, #2
+ cmp dividend, work
+ Bcc Over5
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, result, work
+Over5:
+ lsr work, divisor, #3
+ cmp dividend, work
+ Bcc Over6
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, result, work
+Over6:
+ cmp dividend, #0 @ Early termination?
+ Beq Lgot_result
+ lsr curbit, #4 @ No, any more bits to do?
+ Beq Lgot_result
+ lsr divisor, #4
+ b Loop3
+
+Lgot_result:
+ mov r0, result
+ mov work, ip
+ cmp work, #0
+ Bpl Over7
+ neg r0, r0
+Over7:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__divsi3)
+
+#endif /* L_divsi3 */
+
+#ifdef L_modsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__modsi3)
+ TYPE (__modsi3)
+ .align 0
+ .thumb_func
+SYM (__modsi3):
+ mov curbit, #1
+ cmp divisor, #0
+ beq Ldiv0
+ Bpl Over1
+ neg divisor, divisor @ Loops below use unsigned.
+Over1:
+ push { work }
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ ip later on. Must do this after saving the original value of
+ @ the work register, because we will pop this value off first.
+ push { dividend }
+ cmp dividend, #0
+ Bpl Over2
+ neg dividend, dividend
+Over2:
+ cmp dividend, divisor
+ bcc Lgot_result
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ bcc Over3
+ sub dividend, dividend, divisor
+Over3:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over4:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over5
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over5:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over6
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over6:
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ beq Over7
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Over7
+ lsr divisor, #4
+ b Loop3
+
+Over7:
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ beq Lgot_result
+
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq Over8
+ lsr work, divisor, #3
+ add dividend, dividend, work
+Over8:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq Over9
+ lsr work, divisor, #2
+ add dividend, dividend, work
+Over9:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq Lgot_result
+ lsr work, divisor, #1
+ add dividend, dividend, work
+Lgot_result:
+ pop { work }
+ cmp work, #0
+ bpl Over10
+ neg dividend, dividend
+Over10:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0) __PLT__
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+ SIZE (__modsi3)
+
+#endif /* L_modsi3 */
+
+#ifdef L_dvmd_tls
+
+ .globl SYM (__div0)
+ TYPE (__div0)
+ .align 0
+ .thumb_func
+SYM (__div0):
+ RET
+
+ SIZE (__div0)
+
+#endif /* L_divmodsi_tools */
+
+
+#ifdef L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+
+.macro call_via register
+ .globl SYM (_call_via_\register)
+ TYPE (_call_via_\register)
+ .thumb_func
+SYM (_call_via_\register):
+ bx \register
+ nop
+
+ SIZE (_call_via_\register)
+.endm
+
+ call_via r0
+ call_via r1
+ call_via r2
+ call_via r3
+ call_via r4
+ call_via r5
+ call_via r6
+ call_via r7
+ call_via r8
+ call_via r9
+ call_via sl
+ call_via fp
+ call_via ip
+ call_via sp
+ call_via lr
+
+#endif /* L_call_via_rX */
+
+#ifdef L_interwork_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code,
+ when the target address is in an unknown instruction set. The address
+ of function to be called is loaded into a register and then one of these
+ labels is called via a BL instruction. This puts the return address
+ into the link register with the bottom bit set, and the code here
+ switches to the correct mode before executing the function. Unfortunately
+ the target code cannot be relied upon to return via a BX instruction, so
+ instead we have to store the resturn address on the stack and allow the
+ called function to return here instead. Upon return we recover the real
+ return address and use a BX to get back to Thumb mode. */
+
+ .text
+ .align 0
+
+ .code 32
+ .globl _arm_return
+_arm_return:
+ ldmia r13!, {r12}
+ bx r12
+
+.macro interwork register
+ .code 16
+
+ .globl SYM (_interwork_call_via_\register)
+ TYPE (_interwork_call_via_\register)
+ .thumb_func
+SYM (_interwork_call_via_\register):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_\register
+.Lchange_\register:
+ tst \register, #1
+ stmeqdb r13!, {lr}
+ adreq lr, _arm_return
+ bx \register
+
+ SIZE (_interwork_call_via_\register)
+.endm
+
+ interwork r0
+ interwork r1
+ interwork r2
+ interwork r3
+ interwork r4
+ interwork r5
+ interwork r6
+ interwork r7
+ interwork r8
+ interwork r9
+ interwork sl
+ interwork fp
+ interwork ip
+ interwork sp
+
+ /* The lr case has to be handled a little differently...*/
+ .code 16
+ .globl SYM (_interwork_call_via_lr)
+ TYPE (_interwork_call_via_lr)
+ .thumb_func
+SYM (_interwork_call_via_lr):
+ bx pc
+ nop
+
+ .code 32
+ .globl .Lchange_lr
+.Lchange_lr:
+ tst lr, #1
+ stmeqdb r13!, {lr}
+ mov ip, lr
+ adreq lr, _arm_return
+ bx ip
+
+ SIZE (_interwork_call_via_lr)
+
+#endif /* L_interwork_call_via_rX */
diff --git a/libgcc/libgcc1-test.c b/libgcc/libgcc1-test.c
new file mode 100755
index 0000000..0f59cbe
--- /dev/null
+++ b/libgcc/libgcc1-test.c
@@ -0,0 +1,117 @@
+/* This small function uses all the arithmetic operators that
+ libgcc1.c can handle. If you can link it, then
+ you have provided replacements for all the libgcc1.c functions that
+ your target machine needs. */
+
+int foo ();
+double dfoo ();
+
+/* We don't want __main here because that can drag in atexit (among other
+ things) which won't necessarily exist yet. */
+
+main_without__main ()
+{
+ int a = foo (), b = foo ();
+ unsigned int au = foo (), bu = foo ();
+ float af = dfoo (), bf = dfoo ();
+ double ad = dfoo (), bd = dfoo ();
+
+ discard (a * b);
+ discard (a / b);
+ discard (a % b);
+
+ discard (au / bu);
+ discard (au % bu);
+
+ discard (a >> b);
+ discard (a << b);
+
+ discard (au >> bu);
+ discard (au << bu);
+
+ ddiscard (ad + bd);
+ ddiscard (ad - bd);
+ ddiscard (ad * bd);
+ ddiscard (ad / bd);
+ ddiscard (-ad);
+
+ ddiscard (af + bf);
+ ddiscard (af - bf);
+ ddiscard (af * bf);
+ ddiscard (af / bf);
+ ddiscard (-af);
+
+ discard ((int) ad);
+ discard ((int) af);
+
+ ddiscard ((double) a);
+ ddiscard ((float) a);
+ ddiscard ((float) ad);
+
+ discard (ad == bd);
+ discard (ad < bd);
+ discard (ad > bd);
+ discard (ad != bd);
+ discard (ad <= bd);
+ discard (ad >= bd);
+
+ discard (af == bf);
+ discard (af < bf);
+ discard (af > bf);
+ discard (af != bf);
+ discard (af <= bf);
+ discard (af >= bf);
+
+ return 0;
+}
+
+discard (x)
+ int x;
+{}
+
+ddiscard (x)
+ double x;
+{}
+
+foo ()
+{
+ static int table[] = {20, 69, 4, 12};
+ static int idx;
+
+ return table[idx++];
+}
+
+double
+dfoo ()
+{
+ static double table[] = {20.4, 69.96, 4.4, 202.202};
+ static int idx;
+
+ return table[idx++];
+}
+
+/* Provide functions that some versions of the linker use to default
+ the start address if -e symbol is not used, to avoid the warning
+ message saying the start address is defaulted. */
+extern void start() __asm__("start");
+extern void _start() __asm__("_start");
+extern void __start() __asm__("__start");
+
+/* Provide functions that might be needed by soft-float emulation routines. */
+void memcpy() {}
+
+void start() {}
+void _start() {}
+void __start() {}
+void mainCRTStartup() {}
+
+/* CYGNUS LOCAL - duplicate definition of memcpy() removed. */
+
+/* CYGNUS LOCAL v850 */
+#if defined __v850e__ || defined __v850ea__
+/* We need to use the symbol __ctbp in order to force the linker to define it. */
+extern int _ctbp;
+
+void _func() { _ctbp = 1; }
+#endif
+/* END CYGNUS LOCAL */
diff --git a/libgcc/libgcc1.c b/libgcc/libgcc1.c
new file mode 100755
index 0000000..bece500
--- /dev/null
+++ b/libgcc/libgcc1.c
@@ -0,0 +1,596 @@
+/* Subroutines needed by GCC output code on some machines. */
+/* Compile this file with the Unix C compiler! */
+/* Copyright (C) 1987, 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#include "config.h"
+
+/* Don't use `fancy_abort' here even if config.h says to use it. */
+#ifdef abort
+#undef abort
+#endif
+
+/* On some machines, cc is really GCC. For these machines, we can't
+ expect these functions to be properly compiled unless GCC open codes
+ the operation (which is precisely when the function won't be used).
+ So allow tm.h to specify ways of accomplishing the operations
+ by defining the macros perform_*.
+
+ On a machine where cc is some other compiler, there is usually no
+ reason to define perform_*. The other compiler normally has other ways
+ of implementing all of these operations.
+
+ In some cases a certain machine may come with GCC installed as cc
+ or may have some other compiler. Then it may make sense for tm.h
+ to define perform_* only if __GNUC__ is defined. */
+
+#ifndef perform_mulsi3
+#define perform_mulsi3(a, b) return a * b
+#endif
+
+#ifndef perform_divsi3
+#define perform_divsi3(a, b) return a / b
+#endif
+
+#ifndef perform_udivsi3
+#define perform_udivsi3(a, b) return a / b
+#endif
+
+#ifndef perform_modsi3
+#define perform_modsi3(a, b) return a % b
+#endif
+
+#ifndef perform_umodsi3
+#define perform_umodsi3(a, b) return a % b
+#endif
+
+#ifndef perform_lshrsi3
+#define perform_lshrsi3(a, b) return a >> b
+#endif
+
+#ifndef perform_ashrsi3
+#define perform_ashrsi3(a, b) return a >> b
+#endif
+
+#ifndef perform_ashlsi3
+#define perform_ashlsi3(a, b) return a << b
+#endif
+
+#ifndef perform_adddf3
+#define perform_adddf3(a, b) return a + b
+#endif
+
+#ifndef perform_subdf3
+#define perform_subdf3(a, b) return a - b
+#endif
+
+#ifndef perform_muldf3
+#define perform_muldf3(a, b) return a * b
+#endif
+
+#ifndef perform_divdf3
+#define perform_divdf3(a, b) return a / b
+#endif
+
+#ifndef perform_addsf3
+#define perform_addsf3(a, b) return INTIFY (a + b)
+#endif
+
+#ifndef perform_subsf3
+#define perform_subsf3(a, b) return INTIFY (a - b)
+#endif
+
+#ifndef perform_mulsf3
+#define perform_mulsf3(a, b) return INTIFY (a * b)
+#endif
+
+#ifndef perform_divsf3
+#define perform_divsf3(a, b) return INTIFY (a / b)
+#endif
+
+#ifndef perform_negdf2
+#define perform_negdf2(a) return -a
+#endif
+
+#ifndef perform_negsf2
+#define perform_negsf2(a) return INTIFY (-a)
+#endif
+
+#ifndef perform_fixdfsi
+#define perform_fixdfsi(a) return (nongcc_SI_type) a;
+#endif
+
+#ifndef perform_fixsfsi
+#define perform_fixsfsi(a) return (nongcc_SI_type) a
+#endif
+
+#ifndef perform_floatsidf
+#define perform_floatsidf(a) return (double) a
+#endif
+
+#ifndef perform_floatsisf
+#define perform_floatsisf(a) return INTIFY ((float) a)
+#endif
+
+#ifndef perform_extendsfdf2
+#define perform_extendsfdf2(a) return a
+#endif
+
+#ifndef perform_truncdfsf2
+#define perform_truncdfsf2(a) return INTIFY (a)
+#endif
+
+/* Note that eqdf2 returns a value for "true" that is == 0,
+ nedf2 returns a value for "true" that is != 0,
+ gtdf2 returns a value for "true" that is > 0,
+ and so on. */
+
+#ifndef perform_eqdf2
+#define perform_eqdf2(a, b) return !(a == b)
+#endif
+
+#ifndef perform_nedf2
+#define perform_nedf2(a, b) return a != b
+#endif
+
+#ifndef perform_gtdf2
+#define perform_gtdf2(a, b) return a > b
+#endif
+
+#ifndef perform_gedf2
+#define perform_gedf2(a, b) return (a >= b) - 1
+#endif
+
+#ifndef perform_ltdf2
+#define perform_ltdf2(a, b) return -(a < b)
+#endif
+
+#ifndef perform_ledf2
+#define perform_ledf2(a, b) return 1 - (a <= b)
+#endif
+
+#ifndef perform_eqsf2
+#define perform_eqsf2(a, b) return !(a == b)
+#endif
+
+#ifndef perform_nesf2
+#define perform_nesf2(a, b) return a != b
+#endif
+
+#ifndef perform_gtsf2
+#define perform_gtsf2(a, b) return a > b
+#endif
+
+#ifndef perform_gesf2
+#define perform_gesf2(a, b) return (a >= b) - 1
+#endif
+
+#ifndef perform_ltsf2
+#define perform_ltsf2(a, b) return -(a < b)
+#endif
+
+#ifndef perform_lesf2
+#define perform_lesf2(a, b) return 1 - (a <= b);
+#endif
+
+/* Define the C data type to use for an SImode value. */
+
+#ifndef nongcc_SI_type
+#define nongcc_SI_type long int
+#endif
+
+/* Define the C data type to use for a value of word size */
+#ifndef nongcc_word_type
+#define nongcc_word_type nongcc_SI_type
+#endif
+
+/* Define the type to be used for returning an SF mode value
+ and the method for turning a float into that type.
+ These definitions work for machines where an SF value is
+ returned in the same register as an int. */
+
+#ifndef FLOAT_VALUE_TYPE
+#define FLOAT_VALUE_TYPE int
+#endif
+
+#ifndef INTIFY
+#define INTIFY(FLOATVAL) (intify.f = (FLOATVAL), intify.i)
+#endif
+
+#ifndef FLOATIFY
+#define FLOATIFY(INTVAL) ((INTVAL).f)
+#endif
+
+#ifndef FLOAT_ARG_TYPE
+#define FLOAT_ARG_TYPE union flt_or_int
+#endif
+
+union flt_or_value { FLOAT_VALUE_TYPE i; float f; };
+
+union flt_or_int { int i; float f; };
+
+
+#ifdef L_mulsi3
+nongcc_SI_type
+__mulsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_mulsi3 (a, b);
+}
+#endif
+
+#ifdef L_udivsi3
+nongcc_SI_type
+__udivsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_udivsi3 (a, b);
+}
+#endif
+
+#ifdef L_divsi3
+nongcc_SI_type
+__divsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_divsi3 (a, b);
+}
+#endif
+
+#ifdef L_umodsi3
+nongcc_SI_type
+__umodsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_umodsi3 (a, b);
+}
+#endif
+
+#ifdef L_modsi3
+nongcc_SI_type
+__modsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_modsi3 (a, b);
+}
+#endif
+
+#ifdef L_lshrsi3
+nongcc_SI_type
+__lshrsi3 (a, b)
+ unsigned nongcc_SI_type a, b;
+{
+ perform_lshrsi3 (a, b);
+}
+#endif
+
+#ifdef L_ashrsi3
+nongcc_SI_type
+__ashrsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_ashrsi3 (a, b);
+}
+#endif
+
+#ifdef L_ashlsi3
+nongcc_SI_type
+__ashlsi3 (a, b)
+ nongcc_SI_type a, b;
+{
+ perform_ashlsi3 (a, b);
+}
+#endif
+
+#ifdef L_divdf3
+double
+__divdf3 (a, b)
+ double a, b;
+{
+ perform_divdf3 (a, b);
+}
+#endif
+
+#ifdef L_muldf3
+double
+__muldf3 (a, b)
+ double a, b;
+{
+ perform_muldf3 (a, b);
+}
+#endif
+
+#ifdef L_negdf2
+double
+__negdf2 (a)
+ double a;
+{
+ perform_negdf2 (a);
+}
+#endif
+
+#ifdef L_adddf3
+double
+__adddf3 (a, b)
+ double a, b;
+{
+ perform_adddf3 (a, b);
+}
+#endif
+
+#ifdef L_subdf3
+double
+__subdf3 (a, b)
+ double a, b;
+{
+ perform_subdf3 (a, b);
+}
+#endif
+
+/* Note that eqdf2 returns a value for "true" that is == 0,
+ nedf2 returns a value for "true" that is != 0,
+ gtdf2 returns a value for "true" that is > 0,
+ and so on. */
+
+#ifdef L_eqdf2
+nongcc_word_type
+__eqdf2 (a, b)
+ double a, b;
+{
+ /* Value == 0 iff a == b. */
+ perform_eqdf2 (a, b);
+}
+#endif
+
+#ifdef L_nedf2
+nongcc_word_type
+__nedf2 (a, b)
+ double a, b;
+{
+ /* Value != 0 iff a != b. */
+ perform_nedf2 (a, b);
+}
+#endif
+
+#ifdef L_gtdf2
+nongcc_word_type
+__gtdf2 (a, b)
+ double a, b;
+{
+ /* Value > 0 iff a > b. */
+ perform_gtdf2 (a, b);
+}
+#endif
+
+#ifdef L_gedf2
+nongcc_word_type
+__gedf2 (a, b)
+ double a, b;
+{
+ /* Value >= 0 iff a >= b. */
+ perform_gedf2 (a, b);
+}
+#endif
+
+#ifdef L_ltdf2
+nongcc_word_type
+__ltdf2 (a, b)
+ double a, b;
+{
+ /* Value < 0 iff a < b. */
+ perform_ltdf2 (a, b);
+}
+#endif
+
+#ifdef L_ledf2
+nongcc_word_type
+__ledf2 (a, b)
+ double a, b;
+{
+ /* Value <= 0 iff a <= b. */
+ perform_ledf2 (a, b);
+}
+#endif
+
+#ifdef L_fixdfsi
+nongcc_SI_type
+__fixdfsi (a)
+ double a;
+{
+ perform_fixdfsi (a);
+}
+#endif
+
+#ifdef L_fixsfsi
+nongcc_SI_type
+__fixsfsi (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_fixsfsi (FLOATIFY (a));
+}
+#endif
+
+#ifdef L_floatsidf
+double
+__floatsidf (a)
+ nongcc_SI_type a;
+{
+ perform_floatsidf (a);
+}
+#endif
+
+#ifdef L_floatsisf
+FLOAT_VALUE_TYPE
+__floatsisf (a)
+ nongcc_SI_type a;
+{
+ union flt_or_value intify;
+ perform_floatsisf (a);
+}
+#endif
+
+#ifdef L_addsf3
+FLOAT_VALUE_TYPE
+__addsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_addsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_negsf2
+FLOAT_VALUE_TYPE
+__negsf2 (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_negsf2 (FLOATIFY (a));
+}
+#endif
+
+#ifdef L_subsf3
+FLOAT_VALUE_TYPE
+__subsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_subsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_eqsf2
+nongcc_word_type
+__eqsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value == 0 iff a == b. */
+ perform_eqsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_nesf2
+nongcc_word_type
+__nesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value != 0 iff a != b. */
+ perform_nesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_gtsf2
+nongcc_word_type
+__gtsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value > 0 iff a > b. */
+ perform_gtsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_gesf2
+nongcc_word_type
+__gesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value >= 0 iff a >= b. */
+ perform_gesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_ltsf2
+nongcc_word_type
+__ltsf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value < 0 iff a < b. */
+ perform_ltsf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_lesf2
+nongcc_word_type
+__lesf2 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_int intify;
+ /* Value <= 0 iff a <= b. */
+ perform_lesf2 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_mulsf3
+FLOAT_VALUE_TYPE
+__mulsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_mulsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_divsf3
+FLOAT_VALUE_TYPE
+__divsf3 (a, b)
+ FLOAT_ARG_TYPE a, b;
+{
+ union flt_or_value intify;
+ perform_divsf3 (FLOATIFY (a), FLOATIFY (b));
+}
+#endif
+
+#ifdef L_truncdfsf2
+FLOAT_VALUE_TYPE
+__truncdfsf2 (a)
+ double a;
+{
+ union flt_or_value intify;
+ perform_truncdfsf2 (a);
+}
+#endif
+
+#ifdef L_extendsfdf2
+double
+__extendsfdf2 (a)
+ FLOAT_ARG_TYPE a;
+{
+ union flt_or_value intify;
+ perform_extendsfdf2 (FLOATIFY (a));
+}
+#endif
diff --git a/libgcc/libgcc2.c b/libgcc/libgcc2.c
new file mode 100755
index 0000000..cf7231f
--- /dev/null
+++ b/libgcc/libgcc2.c
@@ -0,0 +1,946 @@
+/* More subroutines needed by GCC output code on some machines. */
+/* Compile this one with gcc. */
+/* Copyright (C) 1989, 92-97, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#include <stddef.h>
+
+/* Don't use `fancy_abort' here even if config.h says to use it. */
+#ifdef abort
+#undef abort
+#endif
+
+/* In the first part of this file, we are interfacing to calls generated
+ by the compiler itself. These calls pass values into these routines
+ which have very specific modes (rather than very specific types), and
+ these compiler-generated calls also expect any return values to have
+ very specific modes (rather than very specific types). Thus, we need
+ to avoid using regular C language type names in this part of the file
+ because the sizes for those types can be configured to be anything.
+ Instead we use the following special type names. */
+
+typedef unsigned int UQItype __attribute__ ((mode (QI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+
+typedef float SFtype __attribute__ ((mode (SF)));
+typedef float DFtype __attribute__ ((mode (DF)));
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+/* Make sure that we don't accidentally use any normal C language built-in
+ type names in the first part of this file. Instead we want to use *only*
+ the type names defined above. The following macro definitions insure
+ that if we *do* accidentally use some normal C language built-in type name,
+ we will get a syntax error. */
+
+#define char bogus_type
+#define short bogus_type
+#define int bogus_type
+#define long bogus_type
+#define unsigned bogus_type
+#define float bogus_type
+#define double bogus_type
+
+#define SI_TYPE_SIZE (sizeof (SItype) * 8)
+
+struct DIstruct {SItype low, high;};
+
+/* We need this union to unpack/pack DImode values, since we don't have
+ any arithmetic yet. Incoming DImode parameters are stored into the
+ `ll' field, and the unpacked result is read from the struct `s'. */
+
+typedef union
+{
+ struct DIstruct s;
+ DItype ll;
+} DIunion;
+
+#if (defined (L_udivmoddi4) || defined (L_muldi3) || defined (L_udiv_w_sdiv)\
+ || defined (L_divdi3) || defined (L_udivdi3) \
+ || defined (L_moddi3) || defined (L_umoddi3))
+
+#include "longlong.h"
+
+#endif /* udiv or mul */
+
+extern DItype __fixunssfdi (SFtype a);
+extern DItype __fixunsdfdi (DFtype a);
+
+#if defined (L_negdi2) || defined (L_divdi3) || defined (L_moddi3)
+#if defined (L_divdi3) || defined (L_moddi3)
+static inline
+#endif
+DItype
+__negdi2 (DItype u)
+{
+ DIunion w;
+ DIunion uu;
+
+ uu.ll = u;
+
+ w.s.low = -uu.s.low;
+ w.s.high = -uu.s.high - ((USItype) w.s.low > 0);
+
+ return w.ll;
+}
+#endif
+
+/* Unless shift functions are defined whith full ANSI prototypes,
+ parameter b will be promoted to int if word_type is smaller than an int. */
+#ifdef L_lshrdi3
+DItype
+__lshrdi3 (DItype u, word_type b)
+{
+ DIunion w;
+ word_type bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * 8) - b;
+ if (bm <= 0)
+ {
+ w.s.high = 0;
+ w.s.low = (USItype)uu.s.high >> -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.high << bm;
+ w.s.high = (USItype)uu.s.high >> b;
+ w.s.low = ((USItype)uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashldi3
+DItype
+__ashldi3 (DItype u, word_type b)
+{
+ DIunion w;
+ word_type bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * 8) - b;
+ if (bm <= 0)
+ {
+ w.s.low = 0;
+ w.s.high = (USItype)uu.s.low << -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.low >> bm;
+ w.s.low = (USItype)uu.s.low << b;
+ w.s.high = ((USItype)uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ashrdi3
+DItype
+__ashrdi3 (DItype u, word_type b)
+{
+ DIunion w;
+ word_type bm;
+ DIunion uu;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+
+ bm = (sizeof (SItype) * 8) - b;
+ if (bm <= 0)
+ {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high = uu.s.high >> (sizeof (SItype) * 8 - 1);
+ w.s.low = uu.s.high >> -bm;
+ }
+ else
+ {
+ USItype carries = (USItype)uu.s.high << bm;
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((USItype)uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_ffsdi2
+DItype
+__ffsdi2 (DItype u)
+{
+ DIunion uu, w;
+ uu.ll = u;
+ w.s.high = 0;
+ w.s.low = ffs (uu.s.low);
+ if (w.s.low != 0)
+ return w.ll;
+ w.s.low = ffs (uu.s.high);
+ if (w.s.low != 0)
+ {
+ w.s.low += 8 * sizeof (SItype);
+ return w.ll;
+ }
+ return w.ll;
+}
+#endif
+
+#ifdef L_muldi3
+DItype
+__muldi3 (DItype u, DItype v)
+{
+ DIunion w;
+ DIunion uu, vv;
+
+ uu.ll = u,
+ vv.ll = v;
+
+ w.ll = __umulsidi3 (uu.s.low, vv.s.low);
+ w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+ + (USItype) uu.s.high * (USItype) vv.s.low);
+
+ return w.ll;
+}
+#endif
+
+#ifdef L_udiv_w_sdiv
+#if defined (sdiv_qrnnd)
+USItype
+__udiv_w_sdiv (USItype *rp, USItype a1, USItype a0, USItype d)
+{
+ USItype q, r;
+ USItype c0, c1, b1;
+
+ if ((SItype) d >= 0)
+ {
+ if (a1 < d - a1 - (a0 >> (SI_TYPE_SIZE - 1)))
+ {
+ /* dividend, divisor, and quotient are nonnegative */
+ sdiv_qrnnd (q, r, a1, a0, d);
+ }
+ else
+ {
+ /* Compute c1*2^32 + c0 = a1*2^32 + a0 - 2^31*d */
+ sub_ddmmss (c1, c0, a1, a0, d >> 1, d << (SI_TYPE_SIZE - 1));
+ /* Divide (c1*2^32 + c0) by d */
+ sdiv_qrnnd (q, r, c1, c0, d);
+ /* Add 2^31 to quotient */
+ q += (USItype) 1 << (SI_TYPE_SIZE - 1);
+ }
+ }
+ else
+ {
+ b1 = d >> 1; /* d/2, between 2^30 and 2^31 - 1 */
+ c1 = a1 >> 1; /* A/2 */
+ c0 = (a1 << (SI_TYPE_SIZE - 1)) + (a0 >> 1);
+
+ if (a1 < b1) /* A < 2^32*b1, so A/2 < 2^31*b1 */
+ {
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ r = 2*r + (a0 & 1); /* Remainder from A/(2*b1) */
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else if (c1 < b1) /* So 2^31 <= (A/2)/b1 < 2^32 */
+ {
+ c1 = (b1 - 1) - c1;
+ c0 = ~c0; /* logical NOT */
+
+ sdiv_qrnnd (q, r, c1, c0, b1); /* (A/2) / (d/2) */
+
+ q = ~q; /* (A/2)/b1 */
+ r = (b1 - 1) - r;
+
+ r = 2*r + (a0 & 1); /* A/(2*b1) */
+
+ if ((d & 1) != 0)
+ {
+ if (r >= q)
+ r = r - q;
+ else if (q - r <= d)
+ {
+ r = r - q + d;
+ q--;
+ }
+ else
+ {
+ r = r - q + 2*d;
+ q -= 2;
+ }
+ }
+ }
+ else /* Implies c1 = b1 */
+ { /* Hence a1 = d - 1 = 2*b1 - 1 */
+ if (a0 >= -d)
+ {
+ q = -1;
+ r = a0 + d;
+ }
+ else
+ {
+ q = -2;
+ r = a0 + 2*d;
+ }
+ }
+ }
+
+ *rp = r;
+ return q;
+}
+#else
+/* If sdiv_qrnnd doesn't exist, define dummy __udiv_w_sdiv. */
+USItype
+__udiv_w_sdiv (USItype *rp __attribute__ ((__unused__)),
+ USItype a1 __attribute__ ((__unused__)),
+ USItype a0 __attribute__ ((__unused__)),
+ USItype d __attribute__ ((__unused__)))
+{
+ return 0;
+}
+#endif
+#endif
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+#define L_udivmoddi4
+#endif
+
+#ifdef L_udivmoddi4
+static const UQItype __clz_tab[] =
+{
+ 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+ 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+};
+
+#if (defined (L_udivdi3) || defined (L_divdi3) || \
+ defined (L_umoddi3) || defined (L_moddi3))
+static inline
+#endif
+UDItype
+__udivmoddi4 (UDItype n, UDItype d, UDItype *rp)
+{
+ DIunion ww;
+ DIunion nn, dd;
+ DIunion rr;
+ USItype d0, d1, n0, n1, n2;
+ USItype q0, q1;
+ USItype b, bm;
+
+ nn.ll = n;
+ dd.ll = d;
+
+ d0 = dd.s.low;
+ d1 = dd.s.high;
+ n0 = nn.s.low;
+ n1 = nn.s.high;
+
+#if !UDIV_NEEDS_NORMALIZATION
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ udiv_qrnnd (q1, n1, 0, n1, d0);
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+
+#else /* UDIV_NEEDS_NORMALIZATION */
+
+ if (d1 == 0)
+ {
+ if (d0 > n1)
+ {
+ /* 0q = nn / 0D */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm != 0)
+ {
+ /* Normalize, i.e. make the most significant bit of the
+ denominator set. */
+
+ d0 = d0 << bm;
+ n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm));
+ n0 = n0 << bm;
+ }
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0 >> bm. */
+ }
+ else
+ {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ d0 = 1 / d0; /* Divide intentionally by zero. */
+
+ count_leading_zeros (bm, d0);
+
+ if (bm == 0)
+ {
+ /* From (n1 >= d0) /\ (the most significant bit of d0 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ leading quotient digit q1 = 1).
+
+ This special case is necessary, not an optimization.
+ (Shifts counts of SI_TYPE_SIZE are undefined.) */
+
+ n1 -= d0;
+ q1 = 1;
+ }
+ else
+ {
+ /* Normalize. */
+
+ b = SI_TYPE_SIZE - bm;
+
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q1, n1, n2, n1, d0);
+ }
+
+ /* n1 != d0... */
+
+ udiv_qrnnd (q0, n0, n1, n0, d0);
+
+ /* Remainder in n0 >> bm. */
+ }
+
+ if (rp != 0)
+ {
+ rr.s.low = n0 >> bm;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+ }
+#endif /* UDIV_NEEDS_NORMALIZATION */
+
+ else
+ {
+ if (d1 > n1)
+ {
+ /* 00 = nn / DD */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* Remainder in n1n0. */
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ /* 0q = NN / dd */
+
+ count_leading_zeros (bm, d1);
+ if (bm == 0)
+ {
+ /* From (n1 >= d1) /\ (the most significant bit of d1 is set),
+ conclude (the most significant bit of n1 is set) /\ (the
+ quotient digit q0 = 0 or 1).
+
+ This special case is necessary, not an optimization. */
+
+ /* The condition on the next line takes advantage of that
+ n1 >= d1 (true due to program flow). */
+ if (n1 > d1 || n0 >= d0)
+ {
+ q0 = 1;
+ sub_ddmmss (n1, n0, n1, n0, d1, d0);
+ }
+ else
+ q0 = 0;
+
+ q1 = 0;
+
+ if (rp != 0)
+ {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ }
+ else
+ {
+ USItype m1, m0;
+ /* Normalize. */
+
+ b = SI_TYPE_SIZE - bm;
+
+ d1 = (d1 << bm) | (d0 >> b);
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd (q0, n1, n2, n1, d1);
+ umul_ppmm (m1, m0, q0, d0);
+
+ if (m1 > n1 || (m1 == n1 && m0 > n0))
+ {
+ q0--;
+ sub_ddmmss (m1, m0, m1, m0, d1, d0);
+ }
+
+ q1 = 0;
+
+ /* Remainder in (n1n0 - m1m0) >> bm. */
+ if (rp != 0)
+ {
+ sub_ddmmss (n1, n0, n1, n0, m1, m0);
+ rr.s.low = (n1 << b) | (n0 >> bm);
+ rr.s.high = n1 >> bm;
+ *rp = rr.ll;
+ }
+ }
+ }
+ }
+
+ ww.s.low = q0;
+ ww.s.high = q1;
+ return ww.ll;
+}
+#endif
+
+#ifdef L_divdi3
+UDItype __udivmoddi4 ();
+
+DItype
+__divdi3 (DItype u, DItype v)
+{
+ word_type c = 0;
+ DIunion uu, vv;
+ DItype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = __negdi2 (uu.ll);
+ if (vv.s.high < 0)
+ c = ~c,
+ vv.ll = __negdi2 (vv.ll);
+
+ w = __udivmoddi4 (uu.ll, vv.ll, (UDItype *) 0);
+ if (c)
+ w = __negdi2 (w);
+
+ return w;
+}
+#endif
+
+#ifdef L_moddi3
+UDItype __udivmoddi4 ();
+DItype
+__moddi3 (DItype u, DItype v)
+{
+ word_type c = 0;
+ DIunion uu, vv;
+ DItype w;
+
+ uu.ll = u;
+ vv.ll = v;
+
+ if (uu.s.high < 0)
+ c = ~c,
+ uu.ll = __negdi2 (uu.ll);
+ if (vv.s.high < 0)
+ vv.ll = __negdi2 (vv.ll);
+
+ (void) __udivmoddi4 (uu.ll, vv.ll, &w);
+ if (c)
+ w = __negdi2 (w);
+
+ return w;
+}
+#endif
+
+#ifdef L_umoddi3
+UDItype __udivmoddi4 ();
+UDItype
+__umoddi3 (UDItype u, UDItype v)
+{
+ UDItype w;
+
+ (void) __udivmoddi4 (u, v, &w);
+
+ return w;
+}
+#endif
+
+#ifdef L_udivdi3
+UDItype __udivmoddi4 ();
+UDItype
+__udivdi3 (UDItype n, UDItype d)
+{
+ return __udivmoddi4 (n, d, (UDItype *) 0);
+}
+#endif
+
+#ifdef L_cmpdi2
+word_type
+__cmpdi2 (DItype a, DItype b)
+{
+ DIunion au, bu;
+
+ au.ll = a, bu.ll = b;
+
+ if (au.s.high < bu.s.high)
+ return 0;
+ else if (au.s.high > bu.s.high)
+ return 2;
+ if ((USItype) au.s.low < (USItype) bu.s.low)
+ return 0;
+ else if ((USItype) au.s.low > (USItype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#ifdef L_ucmpdi2
+word_type
+__ucmpdi2 (DItype a, DItype b)
+{
+ DIunion au, bu;
+
+ au.ll = a, bu.ll = b;
+
+ if ((USItype) au.s.high < (USItype) bu.s.high)
+ return 0;
+ else if ((USItype) au.s.high > (USItype) bu.s.high)
+ return 2;
+ if ((USItype) au.s.low < (USItype) bu.s.low)
+ return 0;
+ else if ((USItype) au.s.low > (USItype) bu.s.low)
+ return 2;
+ return 1;
+}
+#endif
+
+#ifdef L_fixunsdfdi
+#define WORD_SIZE (sizeof (SItype) * 8)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunsdfdi (DFtype a)
+{
+ DFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the DFtype, leaving the low part as flonum. */
+ a -= (DFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#ifdef L_fixdfdi
+DItype
+__fixdfdi (DFtype a)
+{
+ if (a < 0)
+ return - __fixunsdfdi (-a);
+ return __fixunsdfdi (a);
+}
+#endif
+
+#ifdef L_fixunssfdi
+#define WORD_SIZE (sizeof (SItype) * 8)
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DItype
+__fixunssfdi (SFtype original_a)
+{
+ /* Convert the SFtype to a DFtype, because that is surely not going
+ to lose any bits. Some day someone else can write a faster version
+ that avoids converting to DFtype, and verify it really works right. */
+ DFtype a = original_a;
+ DFtype b;
+ UDItype v;
+
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ b = (a / HIGH_WORD_COEFF);
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ v = (USItype) b;
+ v <<= WORD_SIZE;
+ /* Remove high part from the DFtype, leaving the low part as flonum. */
+ a -= (DFtype)v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (- a);
+ else
+ v += (USItype) a;
+ return v;
+}
+#endif
+
+#ifdef L_fixsfdi
+DItype
+__fixsfdi (SFtype a)
+{
+ if (a < 0)
+ return - __fixunssfdi (-a);
+ return __fixunssfdi (a);
+}
+#endif
+
+#ifdef L_floatdidf
+#define WORD_SIZE (sizeof (SItype) * 8)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+
+DFtype
+__floatdidf (DItype u)
+{
+ DFtype d;
+
+ d = (SItype) (u >> WORD_SIZE);
+ d *= HIGH_HALFWORD_COEFF;
+ d *= HIGH_HALFWORD_COEFF;
+ d += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return d;
+}
+#endif
+
+#ifdef L_floatdisf
+#define WORD_SIZE (sizeof (SItype) * 8)
+#define HIGH_HALFWORD_COEFF (((UDItype) 1) << (WORD_SIZE / 2))
+#define HIGH_WORD_COEFF (((UDItype) 1) << WORD_SIZE)
+#define DI_SIZE (sizeof (DItype) * 8)
+
+/* Define codes for all the float formats that we know of. Note
+ that this is copied from real.h. */
+
+#define UNKNOWN_FLOAT_FORMAT 0
+#define IEEE_FLOAT_FORMAT 1
+#define VAX_FLOAT_FORMAT 2
+#define IBM_FLOAT_FORMAT 3
+
+/* Default to IEEE float if not specified. Nearly all machines use it. */
+#ifndef HOST_FLOAT_FORMAT
+#define HOST_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+#endif
+
+#if HOST_FLOAT_FORMAT == IEEE_FLOAT_FORMAT
+#define DF_SIZE 53
+#define SF_SIZE 24
+#endif
+
+#if HOST_FLOAT_FORMAT == IBM_FLOAT_FORMAT
+#define DF_SIZE 56
+#define SF_SIZE 24
+#endif
+
+#if HOST_FLOAT_FORMAT == VAX_FLOAT_FORMAT
+#define DF_SIZE 56
+#define SF_SIZE 24
+#endif
+
+SFtype
+__floatdisf (DItype u)
+{
+ /* Do the calculation in DFmode
+ so that we don't lose any of the precision of the high word
+ while multiplying it. */
+ DFtype f;
+
+ /* Protect against double-rounding error.
+ Represent any low-order bits, that might be truncated in DFmode,
+ by a bit that won't be lost. The bit can go in anywhere below the
+ rounding position of the SFmode. A fixed mask and bit position
+ handles all usual configurations. It doesn't handle the case
+ of 128-bit DImode, however. */
+ if (DF_SIZE < DI_SIZE
+ && DF_SIZE > (DI_SIZE - DF_SIZE + SF_SIZE))
+ {
+#define REP_BIT ((USItype) 1 << (DI_SIZE - DF_SIZE))
+ if (! (- ((DItype) 1 << DF_SIZE) < u
+ && u < ((DItype) 1 << DF_SIZE)))
+ {
+ if ((USItype) u & (REP_BIT - 1))
+ u |= REP_BIT;
+ }
+ }
+ f = (SItype) (u >> WORD_SIZE);
+ f *= HIGH_HALFWORD_COEFF;
+ f *= HIGH_HALFWORD_COEFF;
+ f += (USItype) (u & (HIGH_WORD_COEFF - 1));
+
+ return (SFtype) f;
+}
+#endif
+
+#ifdef L_fixunsdfsi
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+USItype
+__fixunsdfsi (DFtype a)
+{
+ if (a >= - (DFtype) LONG_MIN)
+ return (SItype) (a + LONG_MIN) - LONG_MIN;
+ return (SItype) a;
+}
+#endif
+
+#ifdef L_fixunssfsi
+/* Reenable the normal types, in case limits.h needs them. */
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double
+#undef MIN
+#undef MAX
+#include <limits.h>
+
+USItype
+__fixunssfsi (SFtype a)
+{
+ if (a >= - (SFtype) LONG_MIN)
+ return (SItype) (a + LONG_MIN) - LONG_MIN;
+ return (SItype) a;
+}
+#endif
+
+/* From here on down, the routines use normal data types. */
+
+#define SItype bogus_type
+#define USItype bogus_type
+#define DItype bogus_type
+#define UDItype bogus_type
+#define SFtype bogus_type
+#define DFtype bogus_type
+
+#undef char
+#undef short
+#undef int
+#undef long
+#undef unsigned
+#undef float
+#undef double