* GMP code updated from gmp-2.0 release.
* stdlib/Makefile (mpn-routines): Removed add_1, added inlines.
* sysdeps/generic/add_1.c: File removed.
* stdlib/strtod.c: mp_limb is now mp_limb_t.
* stdlib/fpioconst.c, stdlib/fpioconst.h: Likewise.
* stdio-common/_itoa.c: Likewise.
* stdio-common/printf_fp.c: Likewise.
Don't include ansidecl.h.
struct base_table_t
{
#if (UDIV_TIME > 2 * UMUL_TIME)
- mp_limb base_multiplier;
+ mp_limb_t base_multiplier;
#endif
char flag;
char post_shift;
{
char normalization_steps;
char ndigits;
- mp_limb base PACK;
+ mp_limb_t base PACK;
#if UDIV_TIME > 2 * UMUL_TIME
- mp_limb base_ninv PACK;
+ mp_limb_t base_ninv PACK;
#endif
} big;
#endif
do \
{ \
/* `unsigned long long int' always has 64 bits. */ \
- mp_limb work_hi = value >> (64 - BITS_PER_MP_LIMB); \
+ mp_limb_t work_hi = value >> (64 - BITS_PER_MP_LIMB); \
\
if (BITS_PER_MP_LIMB == 32) \
if (work_hi != 0) \
{ \
- mp_limb work_lo; \
+ mp_limb_t work_lo; \
int cnt; \
\
work_lo = value & 0xfffffffful; \
default:
{
#if BITS_PER_MP_LIMB == 64
- mp_limb base_multiplier = brec->base_multiplier;
+ mp_limb_t base_multiplier = brec->base_multiplier;
if (brec->flag)
while (value != 0)
{
- mp_limb quo, rem, x, dummy;
+ mp_limb_t quo, rem, x, dummy;
umul_ppmm (x, dummy, value, base_multiplier);
quo = (x + ((value - x) >> 1)) >> (brec->post_shift - 1);
else
while (value != 0)
{
- mp_limb quo, rem, x, dummy;
+ mp_limb_t quo, rem, x, dummy;
umul_ppmm (x, dummy, value, base_multiplier);
quo = x >> brec->post_shift;
}
#endif
#if BITS_PER_MP_LIMB == 32
- mp_limb t[3];
+ mp_limb_t t[3];
int n;
/* First convert x0 to 1-3 words in base s->big.base.
Optimize for frequent cases of 32 bit numbers. */
- if ((mp_limb) (value >> 32) >= 1)
+ if ((mp_limb_t) (value >> 32) >= 1)
{
int big_normalization_steps = brec->big.normalization_steps;
- mp_limb big_base_norm = brec->big.base << big_normalization_steps;
+ mp_limb_t big_base_norm = brec->big.base << big_normalization_steps;
- if ((mp_limb) (value >> 32) >= brec->big.base)
+ if ((mp_limb_t) (value >> 32) >= brec->big.base)
{
- mp_limb x1hi, x1lo, r;
+ mp_limb_t x1hi, x1lo, r;
/* If you want to optimize this, take advantage of
that the quotient in the first udiv_qrnnd will
always be very small. It might be faster just to
subtract in a tight loop. */
#if UDIV_TIME > 2 * UMUL_TIME
- mp_limb x, xh, xl;
+ mp_limb_t x, xh, xl;
if (big_normalization_steps == 0)
xh = 0;
else
- xh = (mp_limb) (value >> 64 - big_normalization_steps);
- xl = (mp_limb) (value >> 32 - big_normalization_steps);
+ xh = (mp_limb_t) (value >> 64 - big_normalization_steps);
+ xl = (mp_limb_t) (value >> 32 - big_normalization_steps);
udiv_qrnnd_preinv (x1hi, r, xh, xl, big_base_norm,
brec->big.base_ninv);
- xl = ((mp_limb) value) << big_normalization_steps;
+ xl = ((mp_limb_t) value) << big_normalization_steps;
udiv_qrnnd_preinv (x1lo, x, r, xl, big_base_norm,
big_normalization_steps);
t[2] = x >> big_normalization_steps;
big_normalization_steps);
t[1] = x >> big_normalization_steps;
#elif UDIV_NEEDS_NORMALIZATION
- mp_limb x, xh, xl;
+ mp_limb_t x, xh, xl;
if (big_normalization_steps == 0)
xh = 0;
else
- xh = (mp_limb) (value >> 64 - big_normalization_steps);
- xl = (mp_limb) (value >> 32 - big_normalization_steps);
+ xh = (mp_limb_t) (value >> 64 - big_normalization_steps);
+ xl = (mp_limb_t) (value >> 32 - big_normalization_steps);
udiv_qrnnd (x1hi, r, xh, xl, big_base_norm);
- xl = ((mp_limb) value) << big_normalization_steps;
+ xl = ((mp_limb_t) value) << big_normalization_steps;
udiv_qrnnd (x1lo, x, r, xl, big_base_norm);
t[2] = x >> big_normalization_steps;
udiv_qrnnd (t[0], x, xh, xl, big_base_norm);
t[1] = x >> big_normalization_steps;
#else
- udiv_qrnnd (x1hi, r, 0, (mp_limb) (value >> 32),
+ udiv_qrnnd (x1hi, r, 0, (mp_limb_t) (value >> 32),
brec->big.base);
- udiv_qrnnd (x1lo, t[2], r, (mp_limb) value, brec->big.base);
+ udiv_qrnnd (x1lo, t[2], r, (mp_limb_t) value, brec->big.base);
udiv_qrnnd (t[0], t[1], x1hi, x1lo, brec->big.base);
#endif
n = 3;
else
{
#if (UDIV_TIME > 2 * UMUL_TIME)
- mp_limb x;
+ mp_limb_t x;
value <<= brec->big.normalization_steps;
- udiv_qrnnd_preinv (t[0], x, (mp_limb) (value >> 32),
- (mp_limb) value, big_base_norm,
+ udiv_qrnnd_preinv (t[0], x, (mp_limb_t) (value >> 32),
+ (mp_limb_t) value, big_base_norm,
brec->big.base_ninv);
t[1] = x >> brec->big.normalization_steps;
#elif UDIV_NEEDS_NORMALIZATION
- mp_limb x;
+ mp_limb_t x;
value <<= big_normalization_steps;
- udiv_qrnnd (t[0], x, (mp_limb) (value >> 32),
- (mp_limb) value, big_base_norm);
+ udiv_qrnnd (t[0], x, (mp_limb_t) (value >> 32),
+ (mp_limb_t) value, big_base_norm);
t[1] = x >> big_normalization_steps;
#else
- udiv_qrnnd (t[0], t[1], (mp_limb) (value >> 32),
- (mp_limb) value, brec->big.base);
+ udiv_qrnnd (t[0], t[1], (mp_limb_t) (value >> 32),
+ (mp_limb_t) value, brec->big.base);
#endif
n = 2;
}
/* Convert the 1-3 words in t[], word by word, to ASCII. */
do
{
- mp_limb ti = t[--n];
+ mp_limb_t ti = t[--n];
int ndig_for_this_limb = 0;
#if UDIV_TIME > 2 * UMUL_TIME
- mp_limb base_multiplier = brec->base_multiplier;
+ mp_limb_t base_multiplier = brec->base_multiplier;
if (brec->flag)
while (ti != 0)
{
- mp_limb quo, rem, x, dummy;
+ mp_limb_t quo, rem, x, dummy;
umul_ppmm (x, dummy, ti, base_multiplier);
quo = (x + ((ti - x) >> 1)) >> (brec->post_shift - 1);
else
while (ti != 0)
{
- mp_limb quo, rem, x, dummy;
+ mp_limb_t quo, rem, x, dummy;
umul_ppmm (x, dummy, ti, base_multiplier);
quo = x >> brec->post_shift;
#else
while (ti != 0)
{
- mp_limb quo, rem;
+ mp_limb_t quo, rem;
quo = ti / base;
rem = ti % base;
# include <stdio.h>
#endif
#include <alloca.h>
-#include <ansidecl.h>
#include <ctype.h>
#include <float.h>
#include <gmp-mparam.h>
#define outchar(ch) \
do \
{ \
- register CONST int outc = (ch); \
+ register const int outc = (ch); \
if (putc (outc, fp) == EOF) \
return -1; \
++done; \
An MP variable occupies a varying number of entries in its array. We keep
track of this number for efficiency reasons. Otherwise we would always
have to process the whole array. */
-#define MPN_VAR(name) mp_limb *name; mp_size_t name##size
+#define MPN_VAR(name) mp_limb_t *name; mp_size_t name##size
#define MPN_ASSIGN(dst,src) \
- memcpy (dst, src, (dst##size = src##size) * sizeof (mp_limb))
+ memcpy (dst, src, (dst##size = src##size) * sizeof (mp_limb_t))
#define MPN_GE(u,v) \
(u##size > v##size || (u##size == v##size && __mpn_cmp (u, v, u##size) >= 0))
union
{
double dbl;
- LONG_DOUBLE ldbl;
+ __long_double_t ldbl;
}
fpnum;
const char *grouping;
/* "NaN" or "Inf" for the special cases. */
- CONST char *special = NULL;
+ const char *special = NULL;
/* We need just a few limbs for the input before shifting to the right
position. */
- mp_limb fp_input[(LDBL_MANT_DIG + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB];
+ mp_limb_t fp_input[(LDBL_MANT_DIG + BITS_PER_MP_LIMB - 1) / BITS_PER_MP_LIMB];
/* We need to shift the contents of fp_input by this amount of bits. */
int to_shift;
int done = 0;
/* General helper (carry limb). */
- mp_limb cy;
+ mp_limb_t cy;
char hack_digit (void)
{
- mp_limb hi;
+ mp_limb_t hi;
if (expsign != 0 && type == 'f' && exponent-- > 0)
hi = 0;
would be really big it could lead to memory problems. */
{
mp_size_t bignum_size = ((ABS (exponent) + BITS_PER_MP_LIMB - 1)
- / BITS_PER_MP_LIMB + 4) * sizeof (mp_limb);
- frac = (mp_limb *) alloca (bignum_size);
- tmp = (mp_limb *) alloca (bignum_size);
- scale = (mp_limb *) alloca (bignum_size);
+ / BITS_PER_MP_LIMB + 4) * sizeof (mp_limb_t);
+ frac = (mp_limb_t *) alloca (bignum_size);
+ tmp = (mp_limb_t *) alloca (bignum_size);
+ scale = (mp_limb_t *) alloca (bignum_size);
}
/* We now have to distinguish between numbers with positive and negative
if (exponent >= tens->m_expo)
{
int i, incr, cnt_h, cnt_l;
- mp_limb topval[2];
+ mp_limb_t topval[2];
/* The __mpn_mul function expects the first argument to be
bigger than the second. */
{
topval[0] = 0;
topval[1]
- = ((mp_limb) 10) << (BITS_PER_MP_LIMB - 4 - cnt_h);
+ = ((mp_limb_t) 10) << (BITS_PER_MP_LIMB - 4 - cnt_h);
}
else
{
- topval[0] = ((mp_limb) 10) << (BITS_PER_MP_LIMB - 4);
+ topval[0] = ((mp_limb_t) 10) << (BITS_PER_MP_LIMB - 4);
topval[1] = 0;
(void) __mpn_lshift (topval, topval, 2,
BITS_PER_MP_LIMB - cnt_h);
strtof/strtod/strtold. */
struct mp_power
{
- const mp_limb *array; /* The array with the number representation. */
+ const mp_limb_t *array; /* The array with the number representation. */
mp_size_t arraysize; /* Size of the array. */
int p_expo; /* Exponent of the number 10^(2^i). */
int m_expo; /* Exponent of the number 10^-(2^i-1). */
# define MAX_DIG_PER_LIMB 19
# define MAX_FAC_PER_LIMB 10000000000000000000UL
#else
-# error "mp_limb size " BITS_PER_MP_LIMB "not accounted for"
+# error "mp_limb_t size " BITS_PER_MP_LIMB "not accounted for"
#endif
/* Local data structure. */
-static const mp_limb _tens_in_limb[MAX_DIG_PER_LIMB + 1] =
+static const mp_limb_t _tens_in_limb[MAX_DIG_PER_LIMB + 1] =
{ 0, 10, 100,
1000, 10000, 100000,
1000000, 10000000, 100000000,
#define MPNSIZE (howmany (MAX_EXP + 2 * MANT_DIG, BITS_PER_MP_LIMB) \
+ 2)
/* Declare an mpn integer variable that big. */
-#define MPN_VAR(name) mp_limb name[MPNSIZE]; mp_size_t name##size
+#define MPN_VAR(name) mp_limb_t name[MPNSIZE]; mp_size_t name##size
/* Copy an mpn integer value. */
#define MPN_ASSIGN(dst, src) \
- memcpy (dst, src, (dst##size = src##size) * sizeof (mp_limb))
+ memcpy (dst, src, (dst##size = src##size) * sizeof (mp_limb_t))
/* Return a floating point number of the needed type according to the given
multi-precision number after possible rounding. */
static inline FLOAT
-round_and_return (mp_limb *retval, int exponent, int negative,
- mp_limb round_limb, mp_size_t round_bit, int more_bits)
+round_and_return (mp_limb_t *retval, int exponent, int negative,
+ mp_limb_t round_limb, mp_size_t round_bit, int more_bits)
{
if (exponent < MIN_EXP - 1)
{
return 0.0;
}
- more_bits |= (round_limb & ((((mp_limb) 1) << round_bit) - 1)) != 0;
+ more_bits |= (round_limb & ((((mp_limb_t) 1) << round_bit) - 1)) != 0;
if (shift == MANT_DIG)
/* This is a special case to handle the very seldom case where
the mantissa will be empty after the shift. */
round_bit = (shift - 1) % BITS_PER_MP_LIMB;
for (i = 0; i < (shift - 1) / BITS_PER_MP_LIMB; ++i)
more_bits |= retval[i] != 0;
- more_bits |= (round_limb & ((((mp_limb) 1) << round_bit) - 1)) != 0;
+ more_bits |= ((round_limb & ((((mp_limb_t) 1) << round_bit) - 1))
+ != 0);
(void) __mpn_rshift (retval, &retval[shift / BITS_PER_MP_LIMB],
RETURN_LIMB_SIZE - (shift / BITS_PER_MP_LIMB),
exponent = MIN_EXP - 2;
}
- if ((round_limb & (((mp_limb) 1) << round_bit)) != 0
+ if ((round_limb & (((mp_limb_t) 1) << round_bit)) != 0
&& (more_bits || (retval[0] & 1) != 0
- || (round_limb & ((((mp_limb) 1) << round_bit) - 1)) != 0))
+ || (round_limb & ((((mp_limb_t) 1) << round_bit) - 1)) != 0))
{
- mp_limb cy = __mpn_add_1 (retval, retval, RETURN_LIMB_SIZE, 1);
+ mp_limb_t cy = __mpn_add_1 (retval, retval, RETURN_LIMB_SIZE, 1);
if (((MANT_DIG % BITS_PER_MP_LIMB) == 0 && cy) ||
((MANT_DIG % BITS_PER_MP_LIMB) != 0 &&
(retval[RETURN_LIMB_SIZE - 1]
- & (((mp_limb) 1) << (MANT_DIG % BITS_PER_MP_LIMB))) != 0))
+ & (((mp_limb_t) 1) << (MANT_DIG % BITS_PER_MP_LIMB))) != 0))
{
++exponent;
(void) __mpn_rshift (retval, retval, RETURN_LIMB_SIZE, 1);
retval[RETURN_LIMB_SIZE - 1]
- |= ((mp_limb) 1) << ((MANT_DIG - 1) % BITS_PER_MP_LIMB);
+ |= ((mp_limb_t) 1) << ((MANT_DIG - 1) % BITS_PER_MP_LIMB);
}
else if (exponent == MIN_EXP - 2
&& (retval[RETURN_LIMB_SIZE - 1]
- & (((mp_limb) 1) << ((MANT_DIG - 1) % BITS_PER_MP_LIMB)))
+ & (((mp_limb_t) 1) << ((MANT_DIG - 1) % BITS_PER_MP_LIMB)))
!= 0)
/* The number was denormalized but now normalized. */
exponent = MIN_EXP - 1;
value. If the EXPONENT is small enough to be taken as an additional
factor for the resulting number (see code) multiply by it. */
static inline const STRING_TYPE *
-str_to_mpn (const STRING_TYPE *str, int digcnt, mp_limb *n, mp_size_t *nsize,
+str_to_mpn (const STRING_TYPE *str, int digcnt, mp_limb_t *n, mp_size_t *nsize,
int *exponent)
{
/* Number of digits for actual limb. */
int cnt = 0;
- mp_limb low = 0;
- mp_limb base;
+ mp_limb_t low = 0;
+ mp_limb_t base;
*nsize = 0;
assert (digcnt > 0);
n[0] = low;
else
{
- mp_limb cy;
+ mp_limb_t cy;
cy = __mpn_mul_1 (n, n, *nsize, MAX_FAC_PER_LIMB);
cy += __mpn_add_1 (n, n, *nsize, low);
if (cy != 0)
}
else
{
- mp_limb cy;
+ mp_limb_t cy;
cy = __mpn_mul_1 (n, n, *nsize, base);
cy += __mpn_add_1 (n, n, *nsize, low);
if (cy != 0)
Tege doesn't like this function so I have to write it here myself. :)
--drepper */
static inline void
-__mpn_lshift_1 (mp_limb *ptr, mp_size_t size, unsigned int count, mp_limb limb)
+__mpn_lshift_1 (mp_limb_t *ptr, mp_size_t size, unsigned int count,
+ mp_limb_t limb)
{
if (count == BITS_PER_MP_LIMB)
{
MPN_VAR (den);
/* Representation for the return value. */
- mp_limb retval[RETURN_LIMB_SIZE];
+ mp_limb_t retval[RETURN_LIMB_SIZE];
/* Number of bits currently in result value. */
int bits;
if (exponent > 0)
{
/* We now multiply the gained number by the given power of ten. */
- mp_limb *psrc = num;
- mp_limb *pdest = den;
+ mp_limb_t *psrc = num;
+ mp_limb_t *pdest = den;
int expbit = 1;
const struct mp_power *ttab = &_fpioconst_pow10[0];
{
if ((exponent & expbit) != 0)
{
- mp_limb cy;
+ mp_limb_t cy;
exponent ^= expbit;
/* FIXME: not the whole multiplication has to be done.
while (exponent != 0);
if (psrc == den)
- memcpy (num, den, numsize * sizeof (mp_limb));
+ memcpy (num, den, numsize * sizeof (mp_limb_t));
}
/* Determine how many bits of the result we already have. */
if (least_bit == 0)
memcpy (retval, &num[least_idx],
- RETURN_LIMB_SIZE * sizeof (mp_limb));
+ RETURN_LIMB_SIZE * sizeof (mp_limb_t));
else
{
for (i = least_idx; i < numsize - 1; ++i)
if (target_bit == is_bit)
{
memcpy (&retval[RETURN_LIMB_SIZE - numsize], num,
- numsize * sizeof (mp_limb));
+ numsize * sizeof (mp_limb_t));
/* FIXME: the following loop can be avoided if we assume a
maximal MANT_DIG value. */
MPN_ZERO (retval, RETURN_LIMB_SIZE - numsize);
}
else
{
- mp_limb cy;
+ mp_limb_t cy;
assert (numsize < RETURN_LIMB_SIZE);
cy = __mpn_rshift (&retval[RETURN_LIMB_SIZE - numsize],
}
/* Store the bits we already have. */
- memcpy (retval, num, numsize * sizeof (mp_limb));
+ memcpy (retval, num, numsize * sizeof (mp_limb_t));
#if RETURN_LIMB_SIZE > 1
if (numsize < RETURN_LIMB_SIZE)
retval[numsize] = 0;
int cnt;
int neg_exp;
int more_bits;
- mp_limb cy;
- mp_limb *psrc = den;
- mp_limb *pdest = num;
+ mp_limb_t cy;
+ mp_limb_t *psrc = den;
+ mp_limb_t *pdest = num;
const struct mp_power *ttab = &_fpioconst_pow10[0];
assert (dig_no > int_no && exponent <= 0);
{
if ((neg_exp & expbit) != 0)
{
- mp_limb cy;
+ mp_limb_t cy;
neg_exp ^= expbit;
if (densize == 0)
{
densize = ttab->arraysize - _FPIO_CONST_OFFSET;
memcpy (psrc, &ttab->array[_FPIO_CONST_OFFSET],
- densize * sizeof (mp_limb));
+ densize * sizeof (mp_limb_t));
}
else
{
while (neg_exp != 0);
if (psrc == num)
- memcpy (den, num, densize * sizeof (mp_limb));
+ memcpy (den, num, densize * sizeof (mp_limb_t));
/* Read the fractional digits from the string. */
(void) str_to_mpn (startp, dig_no - int_no, num, &numsize, &exponent);
{
case 1:
{
- mp_limb d, n, quot;
+ mp_limb_t d, n, quot;
int used = 0;
n = num[0];
}
case 2:
{
- mp_limb d0, d1, n0, n1;
- mp_limb quot = 0;
+ mp_limb_t d0, d1, n0, n1;
+ mp_limb_t quot = 0;
int used = 0;
d0 = den[0];
while (bits <= MANT_DIG)
{
- mp_limb r;
+ mp_limb_t r;
if (n1 == d1)
{
/* QUOT should be either 111..111 or 111..110. We need
special treatment of this rare case as normal division
would give overflow. */
- quot = ~(mp_limb) 0;
+ quot = ~(mp_limb_t) 0;
r = n0 + d1;
if (r < d1) /* Carry in the addition? */
default:
{
int i;
- mp_limb cy, dX, d1, n0, n1;
- mp_limb quot = 0;
+ mp_limb_t cy, dX, d1, n0, n1;
+ mp_limb_t quot = 0;
int used = 0;
dX = den[densize - 1];
if (n0 == dX)
/* This might over-estimate QUOT, but it's probably not
worth the extra code here to find out. */
- quot = ~(mp_limb) 0;
+ quot = ~(mp_limb_t) 0;
else
{
- mp_limb r;
+ mp_limb_t r;
udiv_qrnnd (quot, r, n0, num[densize - 1], dX);
umul_ppmm (n1, n0, d1, quot);
+++ /dev/null
-/* mpn_add_1 --
-
-Copyright (C) 1993, 1994 Free Software Foundation, Inc.
-
-This file is part of the GNU MP Library.
-
-The GNU MP Library is free software; you can redistribute it and/or modify
-it under the terms of the GNU Library General Public License as published by
-the Free Software Foundation; either version 2 of the License, or (at your
-option) any later version.
-
-The GNU MP Library is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
-License for more details.
-
-You should have received a copy of the GNU Library General Public License
-along with the GNU MP Library; see the file COPYING.LIB. If not, write to
-the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
-
-#define __mpn_add_1 __noname
-#include "gmp.h"
-#undef __mpn_add_1
-
-#include "gmp-impl.h"
-
-mp_limb
-__mpn_add_1 (res_ptr, s1_ptr, s1_size, s2_limb)
- register mp_ptr res_ptr;
- register mp_srcptr s1_ptr;
- register mp_size_t s1_size;
- register mp_limb s2_limb;
-{
- register mp_limb x;
-
- x = *s1_ptr++;
- s2_limb = x + s2_limb;
- *res_ptr++ = s2_limb;
- if (s2_limb < x)
- {
- while (--s1_size != 0)
- {
- x = *s1_ptr++ + 1;
- *res_ptr++ = x;
- if (x != 0)
- goto fin;
- }
-
- return 1;
- }
-
- fin:
- if (res_ptr != s1_ptr)
- {
- mp_size_t i;
- for (i = 0; i < s1_size - 1; i++)
- res_ptr[i] = s1_ptr[i];
- }
-
- return 0;
-}