Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
N
nettle
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Container registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Wim Lewis
nettle
Commits
0da001a8
Commit
0da001a8
authored
Sep 13, 2008
by
Niels Möller
Browse files
Options
Downloads
Patches
Plain Diff
Fixes to get through assembler.
Rev: nettle/x86_64/aes-encrypt-internal.asm:1.4 Rev: nettle/x86_64/aes.m4:1.3
parent
6b8cfa50
Branches
Branches containing commit
Tags
Tags containing commit
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
x86_64/aes-encrypt-internal.asm
+17
-19
17 additions, 19 deletions
x86_64/aes-encrypt-internal.asm
x86_64/aes.m4
+20
-20
20 additions, 20 deletions
x86_64/aes.m4
with
37 additions
and
39 deletions
x86_64/aes-encrypt-internal.asm
+
17
−
19
View file @
0da001a8
...
...
@@ -18,9 +18,7 @@ C along with the nettle library; see the file COPYING.LIB. If not, write to
C
the
Free
Software
Foundation
,
Inc.
,
59
Temple
Place
-
Suite
330
,
Boston
,
C
MA
02111
-
1307
,
USA.
C
Use
same
macros
as
for
plain
x86.
FIXME
:
AES_SUBST_BYTE
uses
C
hardcoded
registers.
include_src
(
<
x86
/
aes.m4
>
)
include_src
(
<
x86_64
/
aes.m4
>
)
C
Register
usage
:
...
...
@@ -47,9 +45,9 @@ define(<COUNT>, <%r15d>)
C
Put
the
outer
loop
counter
on
the
stack
,
and
reuse
the
LENGTH
C
register
as
a
temporary.
define
(
<
FRAME_COUNT
>
,
<
(
%
e
sp
)
>
)
define
(
<
TMP
>
,
<%
e
dx
>
)
define
(
<
TMPPTR
>
,
<%
rdx
>
)
define
(
<
FRAME_COUNT
>
,
<
(
%
r
sp
)
>
)
define
(
<
TMP
>
,
<%
r
dx
>
)
.file
"
aes
-
encrypt
-
internal.asm
"
C
_aes_encrypt
(
struct
aes_context
*
ctx
,
...
...
@@ -76,41 +74,41 @@ PROLOGUE(_nettle_aes_encrypt)
shrl
$
4
,
LENGTH
movl
LENGTH
,
FRAME_COUNT
.Lblock_loop:
mov
l
CTX
,
KEY
mov
CTX
,
KEY
AES_LOAD
(
SA
,
SB
,
SC
,
SD
,
SRC
,
KEY
)
add
l
$
16
,
SRC
C
Increment
src
pointer
add
$
16
,
SRC
C
Increment
src
pointer
C
get
number
of
rounds
to
do
from
ctx
struct
movl
AES_NROUNDS
(
CTX
),
COUNT
shrl
$
1
,
COUNT
subl
$
1
,
COUNT
add
l
$
16
,
KEY
C
point
to
next
key
add
$
16
,
KEY
C
point
to
next
key
ALIGN
(
4
)
.Lround_loop:
AES_ROUND
(
TABLE
,
SA
,
SB
,
SC
,
SD
,
TA
,
TMP
PTR
)
AES_ROUND
(
TABLE
,
SA
,
SB
,
SC
,
SD
,
TA
,
TMP
)
xorl
(
KEY
),
TA
AES_ROUND
(
TABLE
,
SB
,
SC
,
SD
,
SA
,
TB
,
TMP
PTR
)
AES_ROUND
(
TABLE
,
SB
,
SC
,
SD
,
SA
,
TB
,
TMP
)
xorl
4
(
KEY
),
TB
AES_ROUND
(
TABLE
,
SC
,
SD
,
SA
,
SB
,
TC
,
TMP
PTR
)
AES_ROUND
(
TABLE
,
SC
,
SD
,
SA
,
SB
,
TC
,
TMP
)
xorl
8
(
KEY
),
TC
AES_ROUND
(
TABLE
,
SD
,
SA
,
SB
,
SC
,
TD
,
TMP
PTR
)
AES_ROUND
(
TABLE
,
SD
,
SA
,
SB
,
SC
,
TD
,
TMP
)
xorl
12
(
KEY
),
TD
AES_ROUND
(
TABLE
,
TA
,
TB
,
TC
,
TD
,
SA
,
TMP
PTR
)
AES_ROUND
(
TABLE
,
TA
,
TB
,
TC
,
TD
,
SA
,
TMP
)
xorl
16
(
KEY
),
SA
AES_ROUND
(
TABLE
,
TB
,
TC
,
TD
,
TA
,
SB
,
TMP
PTR
)
AES_ROUND
(
TABLE
,
TB
,
TC
,
TD
,
TA
,
SB
,
TMP
)
xorl
20
(
KEY
),
SB
AES_ROUND
(
TABLE
,
TC
,
TD
,
TA
,
TB
,
SC
,
TMP
PTR
)
AES_ROUND
(
TABLE
,
TC
,
TD
,
TA
,
TB
,
SC
,
TMP
)
xorl
24
(
KEY
),
SC
AES_ROUND
(
TABLE
,
TD
,
TA
,
TB
,
TC
,
SD
,
TMP
PTR
)
AES_ROUND
(
TABLE
,
TD
,
TA
,
TB
,
TC
,
SD
,
TMP
)
xorl
28
(
KEY
),
SD
addl
$
32
,
KEY
C
point
to
next
key
...
...
@@ -127,7 +125,7 @@ PROLOGUE(_nettle_aes_encrypt)
C
S
-
box
substitution
mov
$
3
,
COUNT
.Lsubst:
AES_SUBST_BYTE
(
TA
,
TB
,
TC
,
TD
,
TABLE
,
TMP
PTR
)
AES_SUBST_BYTE
(
TA
,
TB
,
TC
,
TD
,
TABLE
,
TMP
)
decl
COUNT
jnz
.Lsubst
...
...
@@ -135,7 +133,7 @@ PROLOGUE(_nettle_aes_encrypt)
C
Add
last
subkey
,
and
store
encrypted
data
AES_STORE
(
TA
,
TB
,
TC
,
TD
,
KEY
,
DS
T
)
add
l
$
16
,
DS
T
add
$
16
,
DS
T
decl
FRAME_COUNT
jnz
.Lblock_loop
...
...
This diff is collapsed.
Click to expand it.
x86_64/aes.m4
+
20
−
20
View file @
0da001a8
...
...
@@ -70,34 +70,34 @@ define(<AES_STORE>, <
dnl AES_ROUND(table,a,b,c,d,out,ptr)
dnl Computes one word of the AES round. Leaves result in $6.
define(<AES_ROUND>, <
movzb
l
LREG($2), $7
movzb LREG($2), $7
movl AES_TABLE0 ($1, $7, 4),$6
movl $3, XREG($7)
shr
l
<$>8,$7
and
l
<$>0xff,$7
shr <$>8,$7
and <$>0xff,$7
xorl AES_TABLE1 ($1, $7, 4),$6
movl $4,XREG($7)
shr
l
<$>16,$7
and
l
<$>0xff,$7
shr <$>16,$7
and <$>0xff,$7
xorl AES_TABLE2 ($1, $7, 4),$6
movl $5,XREG($7)
xorl AES_TABLE3 ($1, $7, 4),$6>)dnl
dnl AES_FINAL_ROUND(a, b, c, d, table out, tmp)
dnl AES_FINAL_ROUND(a, b, c, d, table
,
out, tmp)
dnl Computes one word of the final round. Leaves result in %edi.
dnl Note that we have to quote $ in constants.
define(<AES_FINAL_ROUND>, <
movzb LREG($1),$
6
movzbl ($5, $
6
), $6
movl $2,$7
andl <$>0x0000ff00,$7
orl $7, $6
movl $3,$7
andl <$>0x00ff0000,$7
orl $7, $6
movl $4,$7
andl <$>0xff000000,$7
orl $7, $6
movzb LREG($1),$
7
movzbl ($5, $
7
), $6
movl $2,
XREG(
$7
)
andl <$>0x0000ff00,
XREG(
$7
)
orl
XREG(
$7
)
, $6
movl $3,
XREG(
$7
)
andl <$>0x00ff0000,
XREG(
$7
)
orl
XREG(
$7
)
, $6
movl $4,
XREG(
$7
)
andl <$>0xff000000,
XREG(
$7
)
orl
XREG(
$7
)
, $6
roll <$>8, $6>)dnl
dnl AES_SUBST_BYTE(A, B, C, D, table, tmp)
...
...
@@ -110,14 +110,14 @@ define(<AES_SUBST_BYTE>, <
movb ($5, $6),LREG($1)
roll <$>8,$1
movzb
l
LREG($2),$6
movzb LREG($2),$6
movb ($5, $6),LREG($2)
roll <$>8,$2
movzb
l
LREG($3),$6
movzb LREG($3),$6
movb ($5, $6),LREG($3)
roll <$>8,$3
movzb
l
LREG($4),$6
movzb LREG($4),$6
movb ($5, $6),LREG($4)
roll <$>8,$4>)dnl
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment