Commit 57e30426 authored by Niels Möller's avatar Niels Möller
Browse files

Updated mini-gmp from the gmp repository, latest change from 2017-07-23.

parent 195edb37
2017-09-09 Niels Möller <nisse@lysator.liu.se>
* mini-gmp.c: Updated mini-gmp from the gmp repository, latest
change from 2017-07-23.
* mini-gmp.h: Likewise.
2017-09-06 Niels Möller <nisse@lysator.liu.se>
* hkdf.c (hkdf_expand): Eliminate a (signed) ssize_t variable, use
......
......@@ -2,7 +2,7 @@
Contributed to the GNU project by Niels Möller
Copyright 1991-1997, 1999-2014 Free Software Foundation, Inc.
Copyright 1991-1997, 1999-2017 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
......@@ -69,8 +69,16 @@ see https://www.gnu.org/licenses/. */
#define GMP_MIN(a, b) ((a) < (b) ? (a) : (b))
#define GMP_MAX(a, b) ((a) > (b) ? (a) : (b))
#define GMP_CMP(a,b) (((a) > (b)) - ((a) < (b)))
/* Return non-zero if xp,xsize and yp,ysize overlap.
If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
overlap. If both these are false, there's an overlap. */
#define GMP_MPN_OVERLAP_P(xp, xsize, yp, ysize) \
((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
#define gmp_assert_nocarry(x) do { \
mp_limb_t __cy = x; \
mp_limb_t __cy = (x); \
assert (__cy == 0); \
} while (0)
......@@ -264,12 +272,12 @@ gmp_default_alloc (size_t size)
static void *
gmp_default_realloc (void *old, size_t old_size, size_t new_size)
{
mp_ptr p;
void * p;
p = realloc (old, new_size);
if (!p)
gmp_die("gmp_default_realoc: Virtual memory exhausted.");
gmp_die("gmp_default_realloc: Virtual memory exhausted.");
return p;
}
......@@ -322,14 +330,14 @@ mp_set_memory_functions (void *(*alloc_func) (size_t),
static mp_ptr
gmp_xalloc_limbs (mp_size_t size)
{
return gmp_xalloc (size * sizeof (mp_limb_t));
return (mp_ptr) gmp_xalloc (size * sizeof (mp_limb_t));
}
static mp_ptr
gmp_xrealloc_limbs (mp_ptr old, mp_size_t size)
{
assert (size > 0);
return (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
return (mp_ptr) (*gmp_reallocate_func) (old, 0, size * sizeof (mp_limb_t));
}
......@@ -346,7 +354,7 @@ mpn_copyi (mp_ptr d, mp_srcptr s, mp_size_t n)
void
mpn_copyd (mp_ptr d, mp_srcptr s, mp_size_t n)
{
while (n-- > 0)
while (--n >= 0)
d[n] = s[n];
}
......@@ -373,20 +381,22 @@ mpn_cmp4 (mp_srcptr ap, mp_size_t an, mp_srcptr bp, mp_size_t bn)
static mp_size_t
mpn_normalized_size (mp_srcptr xp, mp_size_t n)
{
for (; n > 0 && xp[n-1] == 0; n--)
;
while (n > 0 && xp[n-1] == 0)
--n;
return n;
}
#define mpn_zero_p(xp, n) (mpn_normalized_size ((xp), (n)) == 0)
int
mpn_zero_p(mp_srcptr rp, mp_size_t n)
{
return mpn_normalized_size (rp, n) == 0;
}
void
mpn_zero (mp_ptr rp, mp_size_t n)
{
mp_size_t i;
for (i = 0; i < n; i++)
rp[i] = 0;
while (--n >= 0)
rp[n] = 0;
}
mp_limb_t
......@@ -452,7 +462,7 @@ mpn_sub_1 (mp_ptr rp, mp_srcptr ap, mp_size_t n, mp_limb_t b)
{
mp_limb_t a = ap[i];
/* Carry out */
mp_limb_t cy = a < b;;
mp_limb_t cy = a < b;
rp[i] = a - b;
b = cy;
}
......@@ -572,23 +582,24 @@ mpn_mul (mp_ptr rp, mp_srcptr up, mp_size_t un, mp_srcptr vp, mp_size_t vn)
{
assert (un >= vn);
assert (vn >= 1);
assert (!GMP_MPN_OVERLAP_P(rp, un + vn, up, un));
assert (!GMP_MPN_OVERLAP_P(rp, un + vn, vp, vn));
/* We first multiply by the low order limb. This result can be
stored, not added, to rp. We also avoid a loop for zeroing this
way. */
rp[un] = mpn_mul_1 (rp, up, un, vp[0]);
rp += 1, vp += 1, vn -= 1;
/* Now accumulate the product of up[] and the next higher limb from
vp[]. */
while (vn >= 1)
while (--vn >= 1)
{
rp += 1, vp += 1;
rp[un] = mpn_addmul_1 (rp, up, un, vp[0]);
rp += 1, vp += 1, vn -= 1;
}
return rp[un - 1];
return rp[un];
}
void
......@@ -608,7 +619,6 @@ mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
{
mp_limb_t high_limb, low_limb;
unsigned int tnc;
mp_size_t i;
mp_limb_t retval;
assert (n >= 1);
......@@ -623,7 +633,7 @@ mpn_lshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
retval = low_limb >> tnc;
high_limb = (low_limb << cnt);
for (i = n; --i != 0;)
while (--n != 0)
{
low_limb = *--up;
*--rp = high_limb | (low_limb >> tnc);
......@@ -639,7 +649,6 @@ mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
{
mp_limb_t high_limb, low_limb;
unsigned int tnc;
mp_size_t i;
mp_limb_t retval;
assert (n >= 1);
......@@ -651,7 +660,7 @@ mpn_rshift (mp_ptr rp, mp_srcptr up, mp_size_t n, unsigned int cnt)
retval = (high_limb << tnc);
low_limb = high_limb >> cnt;
for (i = n; --i != 0;)
while (--n != 0)
{
high_limb = *up++;
*rp++ = low_limb | (high_limb << tnc);
......@@ -702,24 +711,68 @@ mpn_scan0 (mp_srcptr ptr, mp_bitcnt_t bit)
i, ptr, i, GMP_LIMB_MAX);
}
void
mpn_com (mp_ptr rp, mp_srcptr up, mp_size_t n)
{
while (--n >= 0)
*rp++ = ~ *up++;
}
mp_limb_t
mpn_neg (mp_ptr rp, mp_srcptr up, mp_size_t n)
{
while (*up == 0)
{
*rp = 0;
if (!--n)
return 0;
++up; ++rp;
}
*rp = - *up;
mpn_com (++rp, ++up, --n);
return 1;
}
/* MPN division interface. */
/* The 3/2 inverse is defined as
m = floor( (B^3-1) / (B u1 + u0)) - B
*/
mp_limb_t
mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
{
mp_limb_t r, p, m;
unsigned ul, uh;
unsigned ql, qh;
mp_limb_t r, p, m, ql;
unsigned ul, uh, qh;
/* First, do a 2/1 inverse. */
/* The inverse m is defined as floor( (B^2 - 1 - u1)/u1 ), so that 0 <
* B^2 - (B + m) u1 <= u1 */
assert (u1 >= GMP_LIMB_HIGHBIT);
/* For notation, let b denote the half-limb base, so that B = b^2.
Split u1 = b uh + ul. */
ul = u1 & GMP_LLIMB_MASK;
uh = u1 >> (GMP_LIMB_BITS / 2);
/* Approximation of the high half of quotient. Differs from the 2/1
inverse of the half limb uh, since we have already subtracted
u0. */
qh = ~u1 / uh;
/* Adjust to get a half-limb 3/2 inverse, i.e., we want
qh' = floor( (b^3 - 1) / u) - b = floor ((b^3 - b u - 1) / u
= floor( (b (~u) + b-1) / u),
and the remainder
r = b (~u) + b-1 - qh (b uh + ul)
= b (~u - qh uh) + b-1 - qh ul
Subtraction of qh ul may underflow, which implies adjustments.
But by normalization, 2 u >= B > qh ul, so we need to adjust by
at most 2.
*/
r = ((~u1 - (mp_limb_t) qh * uh) << (GMP_LIMB_BITS / 2)) | GMP_LLIMB_MASK;
p = (mp_limb_t) qh * ul;
......@@ -737,11 +790,19 @@ mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
}
r -= p;
/* Do a 3/2 division (with half limb size) */
/* Low half of the quotient is
ql = floor ( (b r + b-1) / u1).
This is a 3/2 division (on half-limbs), for which qh is a
suitable inverse. */
p = (r >> (GMP_LIMB_BITS / 2)) * qh + r;
/* Unlike full-limb 3/2, we can add 1 without overflow. For this to
work, it is essential that ql is a full mp_limb_t. */
ql = (p >> (GMP_LIMB_BITS / 2)) + 1;
/* By the 3/2 method, we don't need the high half limb. */
/* By the 3/2 trick, we don't need the high half limb. */
r = (r << (GMP_LIMB_BITS / 2)) + GMP_LLIMB_MASK - ql * u1;
if (r >= (p << (GMP_LIMB_BITS / 2)))
......@@ -756,6 +817,8 @@ mpn_invert_3by2 (mp_limb_t u1, mp_limb_t u0)
r -= u1;
}
/* Now m is the 2/1 invers of u1. If u0 > 0, adjust it to become a
3/2 inverse. */
if (u0 > 0)
{
mp_limb_t th, tl;
......@@ -876,7 +939,7 @@ mpn_div_qr_1_preinv (mp_ptr qp, mp_srcptr np, mp_size_t nn,
d = inv->d1;
di = inv->di;
while (nn-- > 0)
while (--nn >= 0)
{
mp_limb_t q;
......@@ -1160,7 +1223,7 @@ mpn_get_str_bits (unsigned char *sp, unsigned bits, mp_srcptr up, mp_size_t un)
unsigned char mask;
size_t sn, j;
mp_size_t i;
int shift;
unsigned shift;
sn = ((un - 1) * GMP_LIMB_BITS + mpn_limb_size_in_base_2 (up[un-1])
+ bits - 1) / bits;
......@@ -1301,6 +1364,8 @@ mpn_set_str_bits (mp_ptr rp, const unsigned char *sp, size_t sn,
return rn;
}
/* Result is usually normalized, except for all-zero input, in which
case a single zero limb is written at *RP, and 1 is returned. */
static mp_size_t
mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
mp_limb_t b, const struct mpn_base_info *info)
......@@ -1310,16 +1375,18 @@ mpn_set_str_other (mp_ptr rp, const unsigned char *sp, size_t sn,
unsigned k;
size_t j;
assert (sn > 0);
k = 1 + (sn - 1) % info->exp;
j = 0;
w = sp[j++];
for (; --k > 0; )
while (--k != 0)
w = w * b + sp[j++];
rp[0] = w;
for (rn = (w > 0); j < sn;)
for (rn = 1; j < sn;)
{
mp_limb_t cy;
......@@ -1362,9 +1429,11 @@ mpn_set_str (mp_ptr rp, const unsigned char *sp, size_t sn, int base)
void
mpz_init (mpz_t r)
{
r->_mp_alloc = 1;
static const mp_limb_t dummy_limb = 0xc1a0;
r->_mp_alloc = 0;
r->_mp_size = 0;
r->_mp_d = gmp_xalloc_limbs (1);
r->_mp_d = (mp_ptr) &dummy_limb;
}
/* The utility of this function is a bit limited, since many functions
......@@ -1385,15 +1454,19 @@ mpz_init2 (mpz_t r, mp_bitcnt_t bits)
void
mpz_clear (mpz_t r)
{
gmp_free (r->_mp_d);
if (r->_mp_alloc)
gmp_free (r->_mp_d);
}
static void *
static mp_ptr
mpz_realloc (mpz_t r, mp_size_t size)
{
size = GMP_MAX (size, 1);
r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
if (r->_mp_alloc)
r->_mp_d = gmp_xrealloc_limbs (r->_mp_d, size);
else
r->_mp_d = gmp_xalloc_limbs (size);
r->_mp_alloc = size;
if (GMP_ABS (r->_mp_size) > size)
......@@ -1416,7 +1489,7 @@ mpz_set_si (mpz_t r, signed long int x)
else /* (x < 0) */
{
r->_mp_size = -1;
r->_mp_d[0] = GMP_NEG_CAST (unsigned long int, x);
MPZ_REALLOC (r, 1)[0] = GMP_NEG_CAST (unsigned long int, x);
}
}
......@@ -1426,7 +1499,7 @@ mpz_set_ui (mpz_t r, unsigned long int x)
if (x > 0)
{
r->_mp_size = 1;
r->_mp_d[0] = x;
MPZ_REALLOC (r, 1)[0] = x;
}
else
r->_mp_size = 0;
......@@ -1475,14 +1548,12 @@ mpz_fits_slong_p (const mpz_t u)
{
mp_size_t us = u->_mp_size;
if (us == 0)
return 1;
else if (us == 1)
if (us == 1)
return u->_mp_d[0] < GMP_LIMB_HIGHBIT;
else if (us == -1)
return u->_mp_d[0] <= GMP_LIMB_HIGHBIT;
else
return 0;
return (us == 0);
}
int
......@@ -1496,14 +1567,11 @@ mpz_fits_ulong_p (const mpz_t u)
long int
mpz_get_si (const mpz_t u)
{
mp_size_t us = u->_mp_size;
if (us > 0)
return (long) (u->_mp_d[0] & ~GMP_LIMB_HIGHBIT);
else if (us < 0)
return (long) (- u->_mp_d[0] | GMP_LIMB_HIGHBIT);
if (u->_mp_size < 0)
/* This expression is necessary to properly handle 0x80000000 */
return -1 - (long) ((u->_mp_d[0] - 1) & ~GMP_LIMB_HIGHBIT);
else
return 0;
return (long) (mpz_get_ui (u) & ~GMP_LIMB_HIGHBIT);
}
unsigned long int
......@@ -1536,7 +1604,7 @@ mpz_realloc2 (mpz_t x, mp_bitcnt_t n)
mp_srcptr
mpz_limbs_read (mpz_srcptr x)
{
return x->_mp_d;;
return x->_mp_d;
}
mp_ptr
......@@ -1716,9 +1784,7 @@ mpz_cmp_d (const mpz_t x, double d)
int
mpz_sgn (const mpz_t u)
{
mp_size_t usize = u->_mp_size;
return (usize > 0) - (usize < 0);
return GMP_CMP (u->_mp_size, 0);
}
int
......@@ -1733,13 +1799,7 @@ mpz_cmp_si (const mpz_t u, long v)
else if (usize >= 0)
return 1;
else /* usize == -1 */
{
mp_limb_t ul = u->_mp_d[0];
if ((mp_limb_t)GMP_NEG_CAST (unsigned long int, v) < ul)
return -1;
else
return (mp_limb_t)GMP_NEG_CAST (unsigned long int, v) > ul;
}
return GMP_CMP (GMP_NEG_CAST (mp_limb_t, v), u->_mp_d[0]);
}
int
......@@ -1752,10 +1812,7 @@ mpz_cmp_ui (const mpz_t u, unsigned long v)
else if (usize < 0)
return -1;
else
{
mp_limb_t ul = (usize > 0) ? u->_mp_d[0] : 0;
return (ul > v) - (ul < v);
}
return GMP_CMP (mpz_get_ui (u), v);
}
int
......@@ -1775,15 +1832,10 @@ mpz_cmp (const mpz_t a, const mpz_t b)
int
mpz_cmpabs_ui (const mpz_t u, unsigned long v)
{
mp_size_t un = GMP_ABS (u->_mp_size);
mp_limb_t ul;
if (un > 1)
if (GMP_ABS (u->_mp_size) > 1)
return 1;
ul = (un == 1) ? u->_mp_d[0] : 0;
return (ul > v) - (ul < v);
else
return GMP_CMP (mpz_get_ui (u), v);
}
int
......@@ -1796,18 +1848,14 @@ mpz_cmpabs (const mpz_t u, const mpz_t v)
void
mpz_abs (mpz_t r, const mpz_t u)
{
if (r != u)
mpz_set (r, u);
mpz_set (r, u);
r->_mp_size = GMP_ABS (r->_mp_size);
}
void
mpz_neg (mpz_t r, const mpz_t u)
{
if (r != u)
mpz_set (r, u);
mpz_set (r, u);
r->_mp_size = -r->_mp_size;
}
......@@ -1833,7 +1881,7 @@ mpz_abs_add_ui (mpz_t r, const mpz_t a, unsigned long b)
an = GMP_ABS (a->_mp_size);
if (an == 0)
{
r->_mp_d[0] = b;
MPZ_REALLOC (r, 1)[0] = b;
return b > 0;
}
......@@ -1852,14 +1900,15 @@ static mp_size_t
mpz_abs_sub_ui (mpz_t r, const mpz_t a, unsigned long b)
{
mp_size_t an = GMP_ABS (a->_mp_size);
mp_ptr rp = MPZ_REALLOC (r, an);
mp_ptr rp;
if (an == 0)
{
rp[0] = b;
MPZ_REALLOC (r, 1)[0] = b;
return -(b > 0);
}
else if (an == 1 && a->_mp_d[0] < b)
rp = MPZ_REALLOC (r, an);
if (an == 1 && a->_mp_d[0] < b)
{
rp[0] = b - a->_mp_d[0];
return -1;
......@@ -2077,8 +2126,7 @@ mpz_mul_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bits)
else
mpn_copyd (rp + limbs, u->_mp_d, un);
while (limbs > 0)
rp[--limbs] = 0;
mpn_zero (rp, limbs);
r->_mp_size = (u->_mp_size < 0) ? - rn : rn;
}
......@@ -2331,7 +2379,6 @@ mpz_div_q_2exp (mpz_t q, const mpz_t u, mp_bitcnt_t bit_index,
if (qn <= 0)
qn = 0;
else
{
qp = MPZ_REALLOC (q, qn);
......@@ -2385,16 +2432,9 @@ mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
{
/* Have to negate and sign extend. */
mp_size_t i;
mp_limb_t cy;
for (cy = 1, i = 0; i < un; i++)
{
mp_limb_t s = ~u->_mp_d[i] + cy;
cy = s < cy;
rp[i] = s;
}
assert (cy == 0);
for (; i < rn - 1; i++)
gmp_assert_nocarry (! mpn_neg (rp, u->_mp_d, un));
for (i = un; i < rn - 1; i++)
rp[i] = GMP_LIMB_MAX;
rp[rn-1] = mask;
......@@ -2419,23 +2459,13 @@ mpz_div_r_2exp (mpz_t r, const mpz_t u, mp_bitcnt_t bit_index,
if (mode == ((us > 0) ? GMP_DIV_CEIL : GMP_DIV_FLOOR)) /* us != 0 here. */
{
/* If r != 0, compute 2^{bit_count} - r. */
mp_size_t i;
mpn_neg (rp, rp, rn);
for (i = 0; i < rn && rp[i] == 0; i++)
;
if (i < rn)
{
/* r > 0, need to flip sign. */
rp[i] = ~rp[i] + 1;
while (++i < rn)
rp[i] = ~rp[i];
rp[rn-1] &= mask;
rp[rn-1] &= mask;
/* us is not used for anything else, so we can modify it
here to indicate flipped sign. */
us = -us;
}
/* us is not used for anything else, so we can modify it
here to indicate flipped sign. */
us = -us;
}
}
rn = mpn_normalized_size (rp, rn);
......@@ -2550,7 +2580,7 @@ mpz_div_qr_ui (mpz_t q, mpz_t r,
if (r)
{
r->_mp_d[0] = rl;
MPZ_REALLOC (r, 1)[0] = rl;
r->_mp_size = rs;
}
if (q)
......@@ -3074,9 +3104,7 @@ void
mpz_ui_pow_ui (mpz_t r, unsigned long blimb, unsigned long e)
{
mpz_t b;
mpz_init_set_ui (b, blimb);
mpz_pow_ui (r, b, e);
mpz_clear (b);
mpz_pow_ui (r, mpz_roinit_n (b, &blimb, 1), e);
}
void
......@@ -3148,7 +3176,7 @@ mpz_powm (mpz_t r, const mpz_t b, const mpz_t e, const mpz_t m)
}
mpz_init_set_ui (tr, 1);
while (en-- > 0)
while (--en >= 0)
{
mp_limb_t w = e->_mp_d[en];
mp_limb_t bit;
......@@ -3188,9 +3216,7 @@ void
mpz_powm_ui (mpz_t r, const mpz_t b, unsigned long elimb, const mpz_t m)
{
mpz_t e;
mpz_init_set_ui (e, elimb);
mpz_powm (r, b, e, m);
mpz_clear (e);
mpz_powm (r, b, mpz_roinit_n (e, &elimb, 1), m);
}
/* x=trunc(y^(1/z)), r=y-x^z */
......@@ -3215,12 +3241,8 @@ mpz_rootrem (mpz_t x, mpz_t r, const mpz_t y, unsigned long z)
}
mpz_init (u);
{
mp_bitcnt_t tb;
tb = mpz_sizeinbase (y, 2) / z + 1;
mpz_init2 (t, tb);
mpz_setbit (t, tb);
}
mpz_init (t);
mpz_setbit (t, mpz_sizeinbase (y, 2) / z + 1);
if (z == 2) /* simplify sqrt loop: z-1 == 1 */
do {
......@@ -3333,7 +3355,7 @@ void
mpz_fac_ui (mpz_t x, unsigned long n)