Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
N
nettle
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Container registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Dmitry Baryshkov
nettle
Commits
6487ef7e
Commit
6487ef7e
authored
Nov 27, 2018
by
Niels Möller
Browse files
Options
Downloads
Patches
Plain Diff
Rewrote _rsa_sec_compute_root, for clarity.
Use new local helper functions, with their own itch functions.
parent
07a31f84
Branches
Branches containing commit
Tags
Tags containing commit
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
ChangeLog
+7
-0
7 additions, 0 deletions
ChangeLog
rsa-sec-compute-root.c
+110
-55
110 additions, 55 deletions
rsa-sec-compute-root.c
with
117 additions
and
55 deletions
ChangeLog
+
7
−
0
View file @
6487ef7e
2018-11-27 Niels Möller <nisse@lysator.liu.se>
* rsa-sec-compute-root.c (sec_mul, sec_mod_mul, sec_powm): New
local helper functions, with their own itch functions.
(_rsa_sec_compute_root_itch, _rsa_sec_compute_root): Rewrote to
use helpers, for clarity.
2018-11-26 Niels Möller <nisse@lysator.liu.se>
* testsuite/rsa-compute-root-test.c (generate_keypair): Simplify
...
...
This diff is collapsed.
Click to expand it.
rsa-sec-compute-root.c
+
110
−
55
View file @
6487ef7e
...
...
@@ -45,39 +45,101 @@
#if !NETTLE_USE_MINI_GMP
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/* Like mpn_sec_mul_itch, monotonously increasing in operand sizes. */
static
mp_size_t
sec_mul_itch
(
mp_size_t
an
,
mp_size_t
bn
)
{
if
(
an
>=
bn
)
return
mpn_sec_mul_itch
(
an
,
bn
);
else
return
mpn_sec_mul_itch
(
bn
,
an
);
}
/* Writes an + bn limbs to the rp area */
static
void
sec_mul
(
mp_limb_t
*
rp
,
const
mp_limb_t
*
ap
,
mp_size_t
an
,
const
mp_limb_t
*
bp
,
mp_size_t
bn
,
mp_limb_t
*
scratch
)
{
if
(
an
>=
bn
)
mpn_sec_mul
(
rp
,
ap
,
an
,
bp
,
bn
,
scratch
);
else
mpn_sec_mul
(
rp
,
bp
,
bn
,
ap
,
an
,
scratch
);
}
static
mp_size_t
sec_mod_mul_itch
(
mp_size_t
an
,
mp_size_t
bn
,
mp_size_t
mn
)
{
mp_size_t
mul_itch
=
sec_mul_itch
(
an
,
bn
);
mp_size_t
mod_itch
=
mpn_sec_div_r_itch
(
an
+
bn
,
mn
);
return
MAX
(
mul_itch
,
mod_itch
);
}
/* Sets r <-- a b % m. Needs space for an + bn limbs at rp. It is
required than an + bn >= mn. */
static
void
sec_mod_mul
(
mp_limb_t
*
rp
,
const
mp_limb_t
*
ap
,
mp_size_t
an
,
const
mp_limb_t
*
bp
,
mp_size_t
bn
,
const
mp_limb_t
*
mp
,
mp_size_t
mn
,
mp_limb_t
*
scratch
)
{
assert
(
an
+
bn
>=
mn
);
sec_mul
(
rp
,
ap
,
an
,
bp
,
bn
,
scratch
);
mpn_sec_div_r
(
rp
,
an
+
bn
,
mp
,
mn
,
scratch
);
}
static
mp_size_t
sec_powm_itch
(
mp_size_t
bn
,
mp_size_t
en
,
mp_size_t
mn
)
{
mp_size_t
mod_itch
=
bn
+
mpn_sec_div_r_itch
(
bn
,
mn
);
mp_size_t
pow_itch
=
mn
+
mpn_sec_powm_itch
(
mn
,
en
*
GMP_NUMB_BITS
,
mn
);
return
MAX
(
mod_itch
,
pow_itch
);
}
/* Sets r <-- b ^ e % m. Performs an initial reduction b mod m, and
requires bn >= mn. */
static
void
sec_powm
(
mp_limb_t
*
rp
,
const
mp_limb_t
*
bp
,
mp_size_t
bn
,
const
mp_limb_t
*
ep
,
mp_size_t
en
,
const
mp_limb_t
*
mp
,
mp_size_t
mn
,
mp_limb_t
*
scratch
)
{
assert
(
bn
>=
mn
);
assert
(
en
<=
mn
);
mpn_copyi
(
scratch
,
bp
,
bn
);
mpn_sec_div_r
(
scratch
,
bn
,
mp
,
mn
,
scratch
+
bn
);
mpn_sec_powm
(
rp
,
scratch
,
mn
,
ep
,
en
*
GMP_NUMB_BITS
,
mp
,
mn
,
scratch
+
mn
);
}
mp_size_t
_rsa_sec_compute_root_itch
(
const
struct
rsa_private_key
*
key
)
{
mp_size_t
nn
=
NETTLE_OCTET_SIZE_TO_LIMB_SIZE
(
key
->
size
);
mp_size_t
pn
=
mpz_size
(
key
->
p
);
mp_size_t
qn
=
mpz_size
(
key
->
q
);
mp_size_t
an
=
mpz_size
(
key
->
a
);
mp_size_t
bn
=
mpz_size
(
key
->
b
);
mp_size_t
cn
=
mpz_size
(
key
->
c
);
mp_size_t
itch
;
mp_size_t
i2
;
itch
=
nn
;
/* Sufficient for mpn_sec_add_1 */
i2
=
mpn_sec_div_r_itch
(
nn
,
qn
);
itch
=
MAX
(
itch
,
i2
);
i2
=
mpn_sec_div_r_itch
(
nn
,
pn
);
itch
=
MAX
(
itch
,
i2
);
i2
=
mpn_sec_powm_itch
(
qn
,
mpz_size
(
key
->
b
)
*
GMP_NUMB_BITS
,
qn
);
itch
=
MAX
(
itch
,
i2
);
i2
=
mpn_sec_powm_itch
(
pn
,
mpz_size
(
key
->
a
)
*
GMP_NUMB_BITS
,
pn
);
itch
=
MAX
(
itch
,
i2
);
i2
=
mpn_sec_div_r_itch
(
qn
,
pn
);
itch
=
MAX
(
itch
,
i2
);
i2
=
mpn_sec_mul_itch
(
pn
,
cn
);
itch
=
MAX
(
itch
,
i2
);
if
(
qn
>
pn
)
i2
=
mpn_sec_mul_itch
(
qn
,
pn
);
else
i2
=
mpn_sec_mul_itch
(
pn
,
qn
);
itch
=
MAX
(
itch
,
i2
);
i2
=
mpn_sec_div_r_itch
(
pn
+
cn
,
pn
);
itch
=
MAX
(
itch
,
i2
);
itch
+=
MAX
(
nn
+
1
,
MAX
(
pn
+
cn
,
qn
+
cn
))
+
pn
+
qn
;
return
itch
;
mp_size_t
powm_p_itch
=
sec_powm_itch
(
nn
,
an
,
pn
);
mp_size_t
powm_q_itch
=
sec_powm_itch
(
nn
,
bn
,
qn
);
mp_size_t
mod_mul_itch
=
cn
+
MAX
(
pn
,
qn
)
+
sec_mod_mul_itch
(
MAX
(
pn
,
qn
),
cn
,
pn
);
mp_size_t
mul_itch
=
sec_mul_itch
(
qn
,
pn
);
mp_size_t
add_1_itch
=
mpn_sec_add_1_itch
(
nn
-
qn
);
/* pn + qn for the product q * r_mod_p' */
mp_size_t
itch
=
pn
+
qn
+
MAX
(
mul_itch
,
add_1_itch
);
itch
=
MAX
(
itch
,
powm_p_itch
);
itch
=
MAX
(
itch
,
powm_q_itch
);
itch
=
MAX
(
itch
,
mod_mul_itch
);
/* pn + qn for the r_mod_p and r_mod_q temporaries. */
return
pn
+
qn
+
itch
;
}
void
...
...
@@ -92,49 +154,42 @@ _rsa_sec_compute_root (const struct rsa_private_key *key,
const
mp_limb_t
*
pp
=
mpz_limbs_read
(
key
->
p
);
const
mp_limb_t
*
qp
=
mpz_limbs_read
(
key
->
q
);
mp_size_t
cn
=
mpz_size
(
key
->
c
);
mp_size_t
pn
=
mpz_size
(
key
->
p
);
mp_size_t
qn
=
mpz_size
(
key
->
q
);
mp_size_t
tn
=
nn
+
1
;
mp_size_t
an
=
mpz_size
(
key
->
a
);
mp_size_t
bn
=
mpz_size
(
key
->
b
);
mp_size_t
cn
=
mpz_size
(
key
->
c
);
mp_limb_t
*
r_mod_p
=
scratch
+
MAX
(
tn
,
MAX
(
pn
+
cn
,
qn
+
cn
))
;
mp_limb_t
*
r_mod_q
=
r_mod_p
+
pn
;
mp_limb_t
*
s
p
=
r_mod_q
+
qn
;
mp_limb_t
*
r_mod_p
=
scratch
;
mp_limb_t
*
r_mod_q
=
scratch
+
pn
;
mp_limb_t
*
s
cratch_out
=
r_mod_q
+
qn
;
mp_limb_t
cy
;
assert
(
pn
+
qn
<=
tn
);
assert
(
pn
<=
nn
);
assert
(
qn
<=
nn
);
assert
(
an
<=
pn
);
assert
(
bn
<=
qn
);
assert
(
cn
<=
pn
);
/* Compute r_mod_q = m^d % q = (m%q)^b % q */
mpn_copyi
(
scratch
,
mp
,
nn
);
mpn_sec_div_r
(
scratch
,
nn
,
qp
,
qn
,
sp
);
mpn_sec_powm
(
r_mod_q
,
scratch
,
qn
,
mpz_limbs_read
(
key
->
b
),
mpz_size
(
key
->
b
)
*
GMP_NUMB_BITS
,
qp
,
qn
,
sp
);
/* Compute r_mod_p = m^d % p = (m%p)^a % p */
mpn_copyi
(
scratch
,
mp
,
nn
);
mpn_sec_div_r
(
scratch
,
nn
,
pp
,
pn
,
sp
);
mpn_sec_powm
(
r_mod_p
,
scratch
,
pn
,
mpz_limbs_read
(
key
->
a
),
mpz_size
(
key
->
a
)
*
GMP_NUMB_BITS
,
pp
,
pn
,
sp
);
sec_powm
(
r_mod_p
,
mp
,
nn
,
mpz_limbs_read
(
key
->
a
),
an
,
pp
,
pn
,
scratch_out
);
/* Compute r_mod_q = m^d % q = (m%q)^b % q */
sec_powm
(
r_mod_q
,
mp
,
nn
,
mpz_limbs_read
(
key
->
b
),
bn
,
qp
,
qn
,
scratch_out
);
/* Set r_mod_p' = r_mod_p * c % p - r_mod_q * c % p . */
mpn_sec_mul
(
scratch
,
r_mod_p
,
pn
,
mpz_limbs_read
(
key
->
c
),
cn
,
sp
);
mpn_sec_div_r
(
scratch
,
pn
+
cn
,
pp
,
pn
,
sp
);
mpn_copyi
(
r_mod_p
,
scratch
,
pn
);
mpn_sec_mul
(
scratch
,
r_mod_q
,
qn
,
mpz_limbs_read
(
key
->
c
),
cn
,
sp
);
mpn_sec_div_r
(
scratch
,
qn
+
cn
,
pp
,
pn
,
sp
);
cy
=
mpn_sub_n
(
r_mod_p
,
r_mod_p
,
scratch
,
pn
);
sec_mod_mul
(
scratch_out
,
r_mod_p
,
pn
,
mpz_limbs_read
(
key
->
c
),
cn
,
pp
,
pn
,
scratch_out
+
cn
+
pn
);
mpn_copyi
(
r_mod_p
,
scratch_out
,
pn
);
sec_mod_mul
(
scratch_out
,
r_mod_q
,
qn
,
mpz_limbs_read
(
key
->
c
),
cn
,
pp
,
pn
,
scratch_out
+
cn
+
qn
);
cy
=
mpn_sub_n
(
r_mod_p
,
r_mod_p
,
scratch_out
,
pn
);
cnd_add_n
(
cy
,
r_mod_p
,
pp
,
pn
);
/* Finally, compute x = r_mod_q + q r_mod_p' */
if
(
qn
>
pn
)
mpn_sec_mul
(
scratch
,
qp
,
qn
,
r_mod_p
,
pn
,
sp
);
else
mpn_sec_mul
(
scratch
,
r_mod_p
,
pn
,
qp
,
qn
,
sp
);
sec_mul
(
scratch_out
,
qp
,
qn
,
r_mod_p
,
pn
,
scratch_out
+
pn
+
qn
);
cy
=
mpn_add_n
(
rp
,
scratch
,
r_mod_q
,
qn
);
mpn_sec_add_1
(
rp
+
qn
,
scratch
+
qn
,
nn
-
qn
,
cy
,
s
p
);
cy
=
mpn_add_n
(
rp
,
scratch
_out
,
r_mod_q
,
qn
);
mpn_sec_add_1
(
rp
+
qn
,
scratch
_out
+
qn
,
nn
-
qn
,
cy
,
s
cratch_out
+
pn
+
qn
);
}
#endif
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment