diff --git a/ChangeLog b/ChangeLog
index 983078a6b115f7e3fd69c2208169782d2bde5a15..57d121be6227195e764141f7512502e4e48c227b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,14 @@
+2020-10-28  Niels Möller  <nisse@lysator.liu.se>
+
+	* gmp-glue.h (cnd_add_n, cnd_sub_n, cnd_swap): Deleted, use
+	corresponding functions mpn_cnd_add_n, mpn_cnd_sub_n,
+	mpn_cnd_swap, available from GMP version 6.1.0. Update all
+	callers, in particular, mpn_cnd_add_n and mpn_cnd_sub_n has one
+	more argument than the old functions.
+
+	* gmp-glue.c (mpn_cnd_add_n, mpn_cnd_sub_n, mpn_cnd_swap)
+	[NETTLE_USE_MINI_GMP]: Fallback definitions or mini-gmp builds.
+
 2020-10-14  Niels Möller  <nisse@lysator.liu.se>
 
 	* ecc-mod-arith.c (ecc_mod_pow_2k, ecc_mod_pow_2k_mul): Moved
diff --git a/ecc-curve25519.c b/ecc-curve25519.c
index 60ef05400f6da4a1a72c89994ef9245a79aed8ce..05e772bce40910459192a1cea50761d82fc705d7 100644
--- a/ecc-curve25519.c
+++ b/ecc-curve25519.c
@@ -91,13 +91,13 @@ ecc_curve25519_modq (const struct ecc_modulo *q, mp_limb_t *rp)
 			 rp[n + ECC_LIMB_SIZE]);
       /* Top limb of mBmodq_shifted is zero, so we get cy == 0 or 1 */
       assert (cy < 2);
-      cnd_add_n (cy, rp+n, q->m, ECC_LIMB_SIZE);
+      mpn_cnd_add_n (cy, rp+n, rp+n, q->m, ECC_LIMB_SIZE);
     }
 
   cy = mpn_submul_1 (rp, q->m, ECC_LIMB_SIZE,
 		     rp[ECC_LIMB_SIZE-1] >> (GMP_NUMB_BITS - QHIGH_BITS));
   assert (cy < 2);
-  cnd_add_n (cy, rp, q->m, ECC_LIMB_SIZE);
+  mpn_cnd_add_n (cy, rp, rp, q->m, ECC_LIMB_SIZE);
 }
 
 /* Computes a^{(p-5)/8} = a^{2^{252}-3} mod m. Needs 5 * n scratch
@@ -187,7 +187,7 @@ ecc_curve25519_zero_p (const struct ecc_modulo *p, mp_limb_t *xp)
     + sec_add_1 (xp, xp, ECC_LIMB_SIZE - 1, 19 * (hi >> (GMP_NUMB_BITS - PHIGH_BITS)));
 #endif
   cy = mpn_sub_n (xp, xp, p->m, ECC_LIMB_SIZE);
-  cnd_add_n (cy, xp, p->m, ECC_LIMB_SIZE);
+  mpn_cnd_add_n (cy, xp, xp, p->m, ECC_LIMB_SIZE);
 
   for (i = 0, w = 0; i < ECC_LIMB_SIZE; i++)
     w |= xp[i];
diff --git a/ecc-curve448.c b/ecc-curve448.c
index 729ce985fe4702897e189e8dd1cee85be365340c..c00faa30b51eaf25f93e78c61b8314b92b31ce7b 100644
--- a/ecc-curve448.c
+++ b/ecc-curve448.c
@@ -91,7 +91,7 @@ ecc_curve448_modp(const struct ecc_modulo *m, mp_limb_t *rp)
   tp[4] = c4 + (c7 >> 32) + (tp[3] < c3);
   tp[5] = tp[6] = 0;
   c7 = mpn_add_n (rp, rp, tp, 7);
-  c7 = cnd_add_n (c7, rp, m->B, 7);
+  c7 = mpn_cnd_add_n (c7, rp, rp, m->B, 7);
   assert (c7 == 0);
 }
 #else
@@ -165,7 +165,7 @@ ecc_curve448_zero_p (const struct ecc_modulo *p, mp_limb_t *xp)
   mp_limb_t w;
   mp_size_t i;
   cy = mpn_sub_n (xp, xp, p->m, ECC_LIMB_SIZE);
-  cnd_add_n (cy, xp, p->m, ECC_LIMB_SIZE);
+  mpn_cnd_add_n (cy, xp, xp, p->m, ECC_LIMB_SIZE);
 
   for (i = 0, w = 0; i < ECC_LIMB_SIZE; i++)
     w |= xp[i];
diff --git a/ecc-mod-arith.c b/ecc-mod-arith.c
index 0b315552c40ee4db0d6402743354b471af64f891..34a2854458e72b072e2720e4ba42a42b2f2aae76 100644
--- a/ecc-mod-arith.c
+++ b/ecc-mod-arith.c
@@ -48,8 +48,8 @@ ecc_mod_add (const struct ecc_modulo *m, mp_limb_t *rp,
 {
   mp_limb_t cy;
   cy = mpn_add_n (rp, ap, bp, m->size);
-  cy = cnd_add_n (cy, rp, m->B, m->size);
-  cy = cnd_add_n (cy, rp, m->B, m->size);
+  cy = mpn_cnd_add_n (cy, rp, rp, m->B, m->size);
+  cy = mpn_cnd_add_n (cy, rp, rp, m->B, m->size);
   assert (cy == 0);  
 }
 
@@ -59,8 +59,8 @@ ecc_mod_sub (const struct ecc_modulo *m, mp_limb_t *rp,
 {
   mp_limb_t cy;
   cy = mpn_sub_n (rp, ap, bp, m->size);
-  cy = cnd_sub_n (cy, rp, m->B, m->size);
-  cy = cnd_sub_n (cy, rp, m->B, m->size);
+  cy = mpn_cnd_sub_n (cy, rp, rp, m->B, m->size);
+  cy = mpn_cnd_sub_n (cy, rp, rp, m->B, m->size);
   assert (cy == 0);  
 }
 
@@ -74,7 +74,7 @@ ecc_mod_mul_1 (const struct ecc_modulo *m, mp_limb_t *rp,
   hi = mpn_mul_1 (rp, ap, m->size, b);
   hi = mpn_addmul_1 (rp, m->B, m->size, hi);
   assert (hi <= 1);
-  hi = cnd_add_n (hi, rp, m->B, m->size);
+  hi = mpn_cnd_add_n (hi, rp, rp, m->B, m->size);
   /* Sufficient if b < B^size / p */
   assert (hi == 0);
 }
@@ -89,7 +89,7 @@ ecc_mod_addmul_1 (const struct ecc_modulo *m, mp_limb_t *rp,
   hi = mpn_addmul_1 (rp, ap, m->size, b);
   hi = mpn_addmul_1 (rp, m->B, m->size, hi);
   assert (hi <= 1);
-  hi = cnd_add_n (hi, rp, m->B, m->size);
+  hi = mpn_cnd_add_n (hi, rp, rp, m->B, m->size);
   /* Sufficient roughly if b < B^size / p */
   assert (hi == 0);
 }
@@ -104,7 +104,7 @@ ecc_mod_submul_1 (const struct ecc_modulo *m, mp_limb_t *rp,
   hi = mpn_submul_1 (rp, ap, m->size, b);
   hi = mpn_submul_1 (rp, m->B, m->size, hi);
   assert (hi <= 1);
-  hi = cnd_sub_n (hi, rp, m->B, m->size);
+  hi = mpn_cnd_sub_n (hi, rp, rp, m->B, m->size);
   /* Sufficient roughly if b < B^size / p */
   assert (hi == 0);
 }
diff --git a/ecc-mod-inv.c b/ecc-mod-inv.c
index f306d7de9ae151b0c9fc33250a96ebc5efb23ffb..e45c230ab40f2b29f3dba52d604a3da41f379cfb 100644
--- a/ecc-mod-inv.c
+++ b/ecc-mod-inv.c
@@ -134,19 +134,19 @@ ecc_mod_inv_destructive (const struct ecc_modulo *m,
       assert (bp[0] & 1);
       odd = ap[0] & 1;
 
-      swap = cnd_sub_n (odd, ap, bp, n);
-      cnd_add_n (swap, bp, ap, n);
+      swap = mpn_cnd_sub_n (odd, ap, ap, bp, n);
+      mpn_cnd_add_n (swap, bp, bp, ap, n);
       cnd_neg (swap, ap, ap, n);
 
-      cnd_swap (swap, up, vp, n);
-      cy = cnd_sub_n (odd, up, vp, n);
-      cy -= cnd_add_n (cy, up, m->m, n);
+      mpn_cnd_swap (swap, up, vp, n);
+      cy = mpn_cnd_sub_n (odd, up, up, vp, n);
+      cy -= mpn_cnd_add_n (cy, up, up, m->m, n);
       assert (cy == 0);
 
       cy = mpn_rshift (ap, ap, n, 1);
       assert (cy == 0);
       cy = mpn_rshift (up, up, n, 1);
-      cy = cnd_add_n (cy, up, m->mp1h, n);
+      cy = mpn_cnd_add_n (cy, up, up, m->mp1h, n);
       assert (cy == 0);
     }
   assert ( (ap[0] | ap[n-1]) == 0);
diff --git a/ecc-mod.c b/ecc-mod.c
index 4e77f0c0e8a88244419704143c73ae2d9148e759..fd3b315d9217706bb33d532f557dd729b7014e70 100644
--- a/ecc-mod.c
+++ b/ecc-mod.c
@@ -86,7 +86,7 @@ ecc_mod (const struct ecc_modulo *m, mp_limb_t *rp)
 	    rp[rn+i] = mpn_addmul_1 (rp + rn - mn + i, m->B, bn, rp[rn+i]);
 				     
 	  hi = mpn_add_n (rp + rn - sn, rp + rn - sn, rp + rn, sn);
-	  hi = cnd_add_n (hi, rp + rn - mn, m->B, mn);
+	  hi = mpn_cnd_add_n (hi, rp + rn - mn, rp + rn - mn, m->B, mn);
 	  assert (hi == 0);
 	}
     }
@@ -113,7 +113,7 @@ ecc_mod (const struct ecc_modulo *m, mp_limb_t *rp)
     }
   else
     {
-      hi = cnd_add_n (hi, rp, m->B_shifted, mn);
+      hi = mpn_cnd_add_n (hi, rp, rp, m->B_shifted, mn);
       assert (hi == 0);
     }
 }
diff --git a/ecc-mul-m.c b/ecc-mul-m.c
index 68bdd16e8e948145cd3ef71cfa4a74bbcc67a769..ce612360674c63fb0a24c4c34c6126f141a155de 100644
--- a/ecc-mul-m.c
+++ b/ecc-mul-m.c
@@ -87,7 +87,7 @@ ecc_mul_m (const struct ecc_modulo *m,
     {
       int bit = (n[i/8] >> (i & 7)) & 1;
 
-      cnd_swap (bit, x2, x3, 2*m->size);
+      mpn_cnd_swap (bit, x2, x3, 2*m->size);
 
       /* Formulas from RFC 7748. We compute new coordinates in
 	 memory-address order, since mul and sqr clobbers higher
@@ -112,8 +112,8 @@ ecc_mul_m (const struct ecc_modulo *m,
       ecc_mod_sqr (m, DA, C);
       ecc_mod_mul (m, z3, DA, px);
 
-      /* FIXME: Could be combined with the loop's initial cnd_swap. */
-      cnd_swap (bit, x2, x3, 2*m->size);
+      /* FIXME: Could be combined with the loop's initial mpn_cnd_swap. */
+      mpn_cnd_swap (bit, x2, x3, 2*m->size);
     }
   /* Do the low zero bits, just duplicating x2 */
   for (i = 0; i < bit_low; i++)
diff --git a/ecc-pm1-redc.c b/ecc-pm1-redc.c
index 2ed50ca53e0058ca1f568691a455d355d7ba8de4..1b07b7930b00800a42d5b5be62a8791ed9f805cf 100644
--- a/ecc-pm1-redc.c
+++ b/ecc-pm1-redc.c
@@ -53,7 +53,7 @@ ecc_pm1_redc (const struct ecc_modulo *m, mp_limb_t *rp)
     rp[i] = mpn_submul_1 (rp + i + k,
 			  m->redc_mpm1, m->size - k, rp[i]);
   hi = mpn_sub_n (rp, rp + m->size, rp, m->size);
-  cy = cnd_add_n (hi, rp, m->m, m->size);
+  cy = mpn_cnd_add_n (hi, rp, rp, m->m, m->size);
   assert (cy == hi);
 
   if (shift > 0)
diff --git a/ecc-pp1-redc.c b/ecc-pp1-redc.c
index ae5b966949760d03438d10030606242bf558ec4f..9f643d97a82a0e565ff67e87a322f3a2b2c4bbf7 100644
--- a/ecc-pp1-redc.c
+++ b/ecc-pp1-redc.c
@@ -63,7 +63,7 @@ ecc_pp1_redc (const struct ecc_modulo *m, mp_limb_t *rp)
     }
   else
     {
-      cy = cnd_sub_n (hi, rp, m->m, m->size);
+      cy = mpn_cnd_sub_n (hi, rp, rp, m->m, m->size);
       assert (cy == hi);      
     }
 }
diff --git a/ecc-secp192r1.c b/ecc-secp192r1.c
index 046026f3f697575f63121f4e513fcdaa22b07016..05c264080a416418ef5e6eb1886250859b609ce7 100644
--- a/ecc-secp192r1.c
+++ b/ecc-secp192r1.c
@@ -78,7 +78,7 @@ ecc_secp192r1_modp (const struct ecc_modulo *m UNUSED, mp_limb_t *rp)
   cy = sec_add_1 (rp + 5, rp + 5, 1, cy);
   
   assert (cy <= 1);
-  cy = cnd_add_n (cy, rp, ecc_Bmodp, 6);
+  cy = mpn_cnd_add_n (cy, rp, rp, ecc_Bmodp, 6);
   assert (cy == 0);  
 }
 #elif GMP_NUMB_BITS == 64
@@ -102,7 +102,7 @@ ecc_secp192r1_modp (const struct ecc_modulo *m UNUSED, mp_limb_t *rp)
   cy += mpn_add_n (rp + 1, rp + 1, rp + 3, 2);
 
   assert (cy <= 1);
-  cy = cnd_add_n (cy, rp, ecc_Bmodp, 3);
+  cy = mpn_cnd_add_n (cy, rp, rp, ecc_Bmodp, 3);
   assert (cy == 0);  
 }
   
diff --git a/ecc-secp256r1.c b/ecc-secp256r1.c
index adab8d90d82cba6b9bf918b1c27d143046e66233..4b153327bf8ac8266572eb4229dd82295e2dda0d 100644
--- a/ecc-secp256r1.c
+++ b/ecc-secp256r1.c
@@ -127,7 +127,7 @@ ecc_secp256r1_modp (const struct ecc_modulo *p, mp_limb_t *rp)
 	 shifts rather than mul.
       */
       t = mpn_submul_1 (rp + n - 4, p->m, 2, q1);
-      t += cnd_sub_n (q2, rp + n - 3, p->m, 1);
+      t += mpn_cnd_sub_n (q2, rp + n - 3, rp + n - 3, p->m, 1);
       t += (-q2) & 0xffffffff;
 
       u0 = rp[n-2];
@@ -136,7 +136,7 @@ ecc_secp256r1_modp (const struct ecc_modulo *p, mp_limb_t *rp)
       t = (u1 < cy);
       u1 -= cy;
 
-      cy = cnd_add_n (t, rp + n - 4, p->m, 2);
+      cy = mpn_cnd_add_n (t, rp + n - 4, rp + n - 4, p->m, 2);
       u0 += cy;
       u1 += (u0 < cy);
       u1 -= (-t) & 0xffffffff;
@@ -210,7 +210,7 @@ ecc_secp256r1_modq (const struct ecc_modulo *q, mp_limb_t *rp)
 
       assert (q2 < 2);
 
-      c0 = cnd_sub_n (q2, rp + n - 3, q->m, 1);
+      c0 = mpn_cnd_sub_n (q2, rp + n - 3, rp + n - 3, q->m, 1);
       c0 += (-q2) & q->m[1];
       t = mpn_submul_1 (rp + n - 4, q->m, 2, q1);
       c0 += t;
@@ -227,7 +227,7 @@ ecc_secp256r1_modq (const struct ecc_modulo *q, mp_limb_t *rp)
       u1 += t;
       u2 += (t<<32) + (u1 < t);
 
-      t = cnd_add_n (t, rp + n - 4, q->m, 2);
+      t = mpn_cnd_add_n (t, rp + n - 4, rp + n - 4, q->m, 2);
       u1 += t;
       u2 += (u1 < t);
     }
diff --git a/ecc-secp384r1.c b/ecc-secp384r1.c
index 54bcd1128d3908bae35f0b99d5a564d0b4d83c02..317899e468f4ba002afa8f6967dfc4df867e7563 100644
--- a/ecc-secp384r1.c
+++ b/ecc-secp384r1.c
@@ -99,7 +99,7 @@ ecc_secp384r1_modp (const struct ecc_modulo *p, mp_limb_t *rp)
   assert (cy >= bw);
   cy -= bw;
   assert (cy <= 1);
-  cy = cnd_add_n (cy, rp, p->B, ECC_LIMB_SIZE);
+  cy = mpn_cnd_add_n (cy, rp, rp, p->B, ECC_LIMB_SIZE);
   assert (cy == 0);
 }
 #elif GMP_NUMB_BITS == 64
@@ -140,7 +140,7 @@ ecc_secp384r1_modp (const struct ecc_modulo *p, mp_limb_t *rp)
   cy = sec_add_1 (rp + 5, rp + 5, 1, cy);
   assert (cy <= 1);
 
-  cy = cnd_add_n (cy, rp, p->B, ECC_LIMB_SIZE);
+  cy = mpn_cnd_add_n (cy, rp, rp, p->B, ECC_LIMB_SIZE);
   assert (cy == 0);  
 }
 #else
diff --git a/eddsa-hash.c b/eddsa-hash.c
index e05f6ac1a5ed648ac48a6f6af1f8e395e2e33964..3f21dac40e0b45d0b2ec6cc2d9d0f6d6785a848e 100644
--- a/eddsa-hash.c
+++ b/eddsa-hash.c
@@ -71,7 +71,7 @@ _eddsa_hash (const struct ecc_modulo *m,
 
       hi = mpn_addmul_1 (rp + m->size, m->B, m->size, hi);
       assert (hi <= 1);
-      hi = cnd_add_n (hi, rp + m->size, m->B, m->size);
+      hi = mpn_cnd_add_n (hi, rp + m->size, rp + m->size, m->B, m->size);
       assert (hi == 0);
     }
   m->mod (m, rp);
diff --git a/eddsa-sign.c b/eddsa-sign.c
index acb8299b4191aca906cd42d15fcf6a416ef2686d..f8bdf25511df6628fd6416b773546b5350058b35 100644
--- a/eddsa-sign.c
+++ b/eddsa-sign.c
@@ -117,7 +117,7 @@ _eddsa_sign (const struct ecc_curve *ecc,
 
   cy = mpn_submul_1 (sp, ecc->q.m, ecc->p.size, q);
   assert (cy < 2);
-  cy -= cnd_add_n (cy, sp, ecc->q.m, ecc->p.size);
+  cy -= mpn_cnd_add_n (cy, sp, sp, ecc->q.m, ecc->p.size);
   assert (cy == 0);
 
   mpn_get_base256_le (signature + nbytes, nbytes, sp, ecc->q.size);
diff --git a/gmp-glue.c b/gmp-glue.c
index 8819601f910ad5722c66764dceb50dd35c6b442c..3bfc6175a1e549c12a31fe6cd858342933d5d057 100644
--- a/gmp-glue.c
+++ b/gmp-glue.c
@@ -39,10 +39,54 @@
 
 #include "gmp-glue.h"
 
+#if NETTLE_USE_MINI_GMP
+mp_limb_t
+mpn_cnd_add_n (mp_limb_t cnd, mp_limb_t *rp,
+	       const mp_limb_t *ap, const mp_limb_t *bp, mp_size_t n)
+{
+  mp_limb_t cy, mask;
+  mp_size_t  i;
+
+  mask = -(mp_limb_t) (cnd != 0);
+
+  for (i = 0, cy = 0; i < n; i++)
+    {
+      mp_limb_t rl = ap[i] + cy;
+      mp_limb_t bl = bp[i] & mask;
+      cy = (rl < cy);
+      rl += bl;
+      cy += (rl < bl);
+      rp[i] = rl;
+    }
+  return cy;
+}
+
+mp_limb_t
+mpn_cnd_sub_n (mp_limb_t cnd, mp_limb_t *rp,
+	       const mp_limb_t *ap, const mp_limb_t *bp, mp_size_t n)
+{
+  mp_limb_t cy, mask;
+  mp_size_t  i;
+
+  mask = -(mp_limb_t) (cnd != 0);
+
+  for (i = 0, cy = 0; i < n; i++)
+    {
+      mp_limb_t al = ap[i];
+      mp_limb_t bl = bp[i] & mask;
+      mp_limb_t sl;
+      sl = al - cy;
+      cy = (al < cy) + (sl < bl);
+      sl -= bl;
+      rp[i] = sl;
+    }
+  return cy;
+}
+
 void
-cnd_swap (mp_limb_t cnd, mp_limb_t *ap, mp_limb_t *bp, mp_size_t n)
+mpn_cnd_swap (mp_limb_t cnd, volatile mp_limb_t *ap, volatile mp_limb_t *bp, mp_size_t n)
 {
-  mp_limb_t mask = - (mp_limb_t) (cnd != 0);
+  volatile mp_limb_t mask = - (mp_limb_t) (cnd != 0);
   mp_size_t i;
   for (i = 0; i < n; i++)
     {
@@ -55,6 +99,8 @@ cnd_swap (mp_limb_t cnd, mp_limb_t *ap, mp_limb_t *bp, mp_size_t n)
     }
 }
 
+#endif /* NETTLE_USE_MINI_GMP */
+
 /* Additional convenience functions. */
 
 int
diff --git a/gmp-glue.h b/gmp-glue.h
index 4dfcb3848e2c3a2c479d3df5522b5e8e2d6ddae3..7ebfd782b9570fd18d2a1f41a3c7a9e781b7ff16 100644
--- a/gmp-glue.h
+++ b/gmp-glue.h
@@ -35,7 +35,6 @@
 
 #include "bignum.h"
 
-#define cnd_swap _nettle_cnd_swap
 #define mpz_limbs_cmp _nettle_mpz_limbs_cmp
 #define mpz_limbs_read_n _nettle_mpz_limbs_read_n
 #define mpz_limbs_copy _nettle_mpz_limbs_copy
@@ -57,22 +56,22 @@
   } while (0)
 #define TMP_GMP_FREE(name) (gmp_free(name, tmp_##name##_size))
 
+#if NETTLE_USE_MINI_GMP
+mp_limb_t
+mpn_cnd_add_n (mp_limb_t cnd, mp_limb_t *rp,
+	       const mp_limb_t *ap, const mp_limb_t *bp, mp_size_t n);
 
-/* Use only in-place operations, so we can fall back to addmul_1/submul_1 */
-#ifdef mpn_cnd_add_n
-# define cnd_add_n(cnd, rp, ap, n) mpn_cnd_add_n ((cnd), (rp), (rp), (ap), (n))
-# define cnd_sub_n(cnd, rp, ap, n) mpn_cnd_sub_n ((cnd), (rp), (rp), (ap), (n))
-#else
-# define cnd_add_n(cnd, rp, ap, n) mpn_addmul_1 ((rp), (ap), (n), (cnd) != 0)
-# define cnd_sub_n(cnd, rp, ap, n) mpn_submul_1 ((rp), (ap), (n), (cnd) != 0)
+mp_limb_t
+mpn_cnd_sub_n (mp_limb_t cnd, mp_limb_t *rp,
+	       const mp_limb_t *ap, const mp_limb_t *bp, mp_size_t n);
+
+void
+mpn_cnd_swap (mp_limb_t cnd, volatile mp_limb_t *ap, volatile mp_limb_t *bp, mp_size_t n);
 #endif
 
 #define NETTLE_OCTET_SIZE_TO_LIMB_SIZE(n) \
   (((n) * 8 + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS)
 
-void
-cnd_swap (mp_limb_t cnd, mp_limb_t *ap, mp_limb_t *bp, mp_size_t n);
-
 /* Convenience functions */
 int
 mpz_limbs_cmp (mpz_srcptr a, const mp_limb_t *bp, mp_size_t bn);
diff --git a/rsa-sec-compute-root.c b/rsa-sec-compute-root.c
index 98b6c2a50618b1c4ec7b3dfe7767f69b24f56533..8e9676b685cf60c982420fdcaff2de6790187f5b 100644
--- a/rsa-sec-compute-root.c
+++ b/rsa-sec-compute-root.c
@@ -184,7 +184,7 @@ _rsa_sec_compute_root (const struct rsa_private_key *key,
   sec_mod_mul (scratch_out, r_mod_q, qn, mpz_limbs_read (key->c), cn, pp, pn,
 	       scratch_out + cn + qn);
   cy = mpn_sub_n (r_mod_p, r_mod_p, scratch_out, pn);
-  cnd_add_n (cy, r_mod_p, pp, pn);
+  mpn_cnd_add_n (cy, r_mod_p, r_mod_p, pp, pn);
 
   /* Finally, compute x = r_mod_q + q r_mod_p' */
   sec_mul (scratch_out, qp, qn, r_mod_p, pn, scratch_out + pn + qn);