Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • nettle/nettle
  • briansmith/nettle
  • ajlawrence/nettle
  • mhoffmann/nettle
  • devnexen/nettle
  • wiml/nettle
  • lumag/nettle
  • michaelweiser/nettle
  • aberaud/nettle
  • mamonet/nettle
  • npocs/nettle
  • babelouest/nettle
  • ueno/nettle
  • rth/nettle
14 results
Show changes
Showing
with 4064 additions and 0 deletions
C arm/memxor3.asm
ifelse(`
Copyright (C) 2013, 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
C Possible speedups:
C
C The ldm instruction can do load two registers per cycle,
C if the address is two-word aligned. Or three registers in two
C cycles, regardless of alignment.
C Register usage:
define(`DST', `r0')
define(`AP', `r1')
define(`BP', `r2')
define(`N', `r3')
C Temporaries r4-r7
define(`ACNT', `r8')
define(`ATNC', `r10')
define(`BCNT', `r11')
define(`BTNC', `r12')
C little-endian and big-endian need to shift in different directions for
C alignment correction
define(`S0ADJ', IF_LE(`lsr', `lsl'))
define(`S1ADJ', IF_LE(`lsl', `lsr'))
.syntax unified
.file "memxor3.asm"
.text
.arm
C memxor3(void *dst, const void *a, const void *b, size_t n)
.align 2
PROLOGUE(nettle_memxor3)
cmp N, #0
beq .Lmemxor3_ret
push {r4,r5,r6,r7,r8,r10,r11}
cmp N, #7
add AP, N
add BP, N
add DST, N
bcs .Lmemxor3_large
C Simple byte loop
.Lmemxor3_bytes:
ldrb r4, [AP, #-1]!
ldrb r5, [BP, #-1]!
eor r4, r5
strb r4, [DST, #-1]!
subs N, #1
bne .Lmemxor3_bytes
.Lmemxor3_done:
pop {r4,r5,r6,r7,r8,r10,r11}
.Lmemxor3_ret:
bx lr
.Lmemxor3_align_loop:
ldrb r4, [AP, #-1]!
ldrb r5, [BP, #-1]!
eor r5, r4
strb r5, [DST, #-1]!
sub N, #1
.Lmemxor3_large:
tst DST, #3
bne .Lmemxor3_align_loop
C We have at least 4 bytes left to do here.
sub N, #4
ands ACNT, AP, #3
lsl ACNT, #3
beq .Lmemxor3_a_aligned
ands BCNT, BP, #3
lsl BCNT, #3
bne .Lmemxor3_uu
C Swap
mov r4, AP
mov AP, BP
mov BP, r4
.Lmemxor3_au:
C NOTE: We have the relevant shift count in ACNT, not BCNT
C AP is aligned, BP is not
C v original SRC
C +-------+------+
C |SRC-4 |SRC |
C +---+---+------+
C |DST-4 |
C +-------+
C
C With little-endian, we need to do
C DST[i-i] ^= (SRC[i-i] >> CNT) ^ (SRC[i] << TNC)
C With big-endian, we need to do
C DST[i-i] ^= (SRC[i-i] << CNT) ^ (SRC[i] >> TNC)
rsb ATNC, ACNT, #32
bic BP, #3
ldr r4, [BP]
tst N, #4
itet eq
moveq r5, r4
subne N, #4
beq .Lmemxor3_au_odd
.Lmemxor3_au_loop:
ldr r5, [BP, #-4]!
ldr r6, [AP, #-4]!
eor r6, r6, r4, S1ADJ ATNC
eor r6, r6, r5, S0ADJ ACNT
str r6, [DST, #-4]!
.Lmemxor3_au_odd:
ldr r4, [BP, #-4]!
ldr r6, [AP, #-4]!
eor r6, r6, r5, S1ADJ ATNC
eor r6, r6, r4, S0ADJ ACNT
str r6, [DST, #-4]!
subs N, #8
bcs .Lmemxor3_au_loop
adds N, #8
beq .Lmemxor3_done
C Leftover bytes in r4, low end on LE and high end on BE before
C preparatory alignment correction
ldr r5, [AP, #-4]
eor r4, r5, r4, S1ADJ ATNC
C now byte-aligned in high end on LE and low end on BE because we're
C working downwards in saving the very first bytes of the buffer
.Lmemxor3_au_leftover:
C Store a byte at a time
C bring uppermost byte down for saving while preserving lower ones
IF_LE(` ror r4, #24')
strb r4, [DST, #-1]!
subs N, #1
beq .Lmemxor3_done
subs ACNT, #8
C bring down next byte, no need to preserve
IF_BE(` lsr r4, #8')
sub AP, #1
bne .Lmemxor3_au_leftover
b .Lmemxor3_bytes
.Lmemxor3_a_aligned:
ands ACNT, BP, #3
lsl ACNT, #3
bne .Lmemxor3_au ;
C a, b and dst all have the same alignment.
subs N, #8
bcc .Lmemxor3_aligned_word_end
C This loop runs at 8 cycles per iteration. It has been
C observed running at only 7 cycles, for this speed, the loop
C started at offset 0x2ac in the object file.
C FIXME: consider software pipelining, similarly to the memxor
C loop.
.Lmemxor3_aligned_word_loop:
ldmdb AP!, {r4,r5,r6}
ldmdb BP!, {r7,r8,r10}
subs N, #12
eor r4, r7
eor r5, r8
eor r6, r10
stmdb DST!, {r4, r5,r6}
bcs .Lmemxor3_aligned_word_loop
.Lmemxor3_aligned_word_end:
C We have 0-11 bytes left to do, and N holds number of bytes -12.
adds N, #4
bcc .Lmemxor3_aligned_lt_8
C Do 8 bytes more, leftover is in N
ldmdb AP!, {r4, r5}
ldmdb BP!, {r6, r7}
eor r4, r6
eor r5, r7
stmdb DST!, {r4,r5}
beq .Lmemxor3_done
b .Lmemxor3_bytes
.Lmemxor3_aligned_lt_8:
adds N, #4
bcc .Lmemxor3_aligned_lt_4
ldr r4, [AP,#-4]!
ldr r5, [BP,#-4]!
eor r4, r5
str r4, [DST,#-4]!
beq .Lmemxor3_done
b .Lmemxor3_bytes
.Lmemxor3_aligned_lt_4:
adds N, #4
beq .Lmemxor3_done
b .Lmemxor3_bytes
.Lmemxor3_uu:
cmp ACNT, BCNT
bic AP, #3
bic BP, #3
rsb ATNC, ACNT, #32
bne .Lmemxor3_uud
C AP and BP are unaligned in the same way
ldr r4, [AP]
ldr r6, [BP]
eor r4, r6
tst N, #4
itet eq
moveq r5, r4
subne N, #4
beq .Lmemxor3_uu_odd
.Lmemxor3_uu_loop:
ldr r5, [AP, #-4]!
ldr r6, [BP, #-4]!
eor r5, r6
S1ADJ r4, ATNC
eor r4, r4, r5, S0ADJ ACNT
str r4, [DST, #-4]!
.Lmemxor3_uu_odd:
ldr r4, [AP, #-4]!
ldr r6, [BP, #-4]!
eor r4, r6
S1ADJ r5, ATNC
eor r5, r5, r4, S0ADJ ACNT
str r5, [DST, #-4]!
subs N, #8
bcs .Lmemxor3_uu_loop
adds N, #8
beq .Lmemxor3_done
C Leftover bytes in r4, low end on LE and high end on BE before
C preparatory alignment correction
IF_LE(` ror r4, ACNT')
IF_BE(` ror r4, ATNC')
C now byte-aligned in high end on LE and low end on BE because we're
C working downwards in saving the very first bytes of the buffer
.Lmemxor3_uu_leftover:
C bring uppermost byte down for saving while preserving lower ones
IF_LE(` ror r4, #24')
strb r4, [DST, #-1]!
subs N, #1
beq .Lmemxor3_done
subs ACNT, #8
C bring down next byte, no need to preserve
IF_BE(` lsr r4, #8')
bne .Lmemxor3_uu_leftover
b .Lmemxor3_bytes
.Lmemxor3_uud:
C Both AP and BP unaligned, and in different ways
rsb BTNC, BCNT, #32
ldr r4, [AP]
ldr r6, [BP]
tst N, #4
ittet eq
moveq r5, r4
moveq r7, r6
subne N, #4
beq .Lmemxor3_uud_odd
.Lmemxor3_uud_loop:
ldr r5, [AP, #-4]!
ldr r7, [BP, #-4]!
S1ADJ r4, ATNC
eor r4, r4, r6, S1ADJ BTNC
eor r4, r4, r5, S0ADJ ACNT
eor r4, r4, r7, S0ADJ BCNT
str r4, [DST, #-4]!
.Lmemxor3_uud_odd:
ldr r4, [AP, #-4]!
ldr r6, [BP, #-4]!
S1ADJ r5, ATNC
eor r5, r5, r7, S1ADJ BTNC
eor r5, r5, r4, S0ADJ ACNT
eor r5, r5, r6, S0ADJ BCNT
str r5, [DST, #-4]!
subs N, #8
bcs .Lmemxor3_uud_loop
adds N, #8
beq .Lmemxor3_done
C FIXME: More clever left-over handling? For now, just adjust pointers.
add AP, AP, ACNT, lsr #3
add BP, BP, BCNT, lsr #3
b .Lmemxor3_bytes
EPILOGUE(nettle_memxor3)
C arm/neon/chacha-3core.asm
ifelse(`
Copyright (C) 2020 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "chacha-3core.asm"
.fpu neon
define(`DST', `r0')
define(`SRC', `r1')
define(`ROUNDS', `r2')
define(`SRCp32', `r3')
C State, X, Y and Z representing consecutive blocks
define(`X0', `q0')
define(`X1', `q1')
define(`X2', `q2')
define(`X3', `q3')
define(`Y0', `q8')
define(`Y1', `q9')
define(`Y2', `q10')
define(`Y3', `q11')
define(`Z0', `q12')
define(`Z1', `q13')
define(`Z2', `q14')
define(`Z3', `q15')
define(`T0', `q4')
define(`T1', `q5')
define(`T2', `q6')
define(`T3', `q7')
.text
.align 4
.Lcount1:
.int 1,0,0,0
C _chacha_3core(uint32_t *dst, const uint32_t *src, unsigned rounds)
PROLOGUE(_nettle_chacha_3core)
C loads using vld1.32 to be endianness-neutral wrt consecutive 32-bit words
add SRCp32, SRC, #32
vld1.32 {X0,X1}, [SRC]
vld1.32 {X2,X3}, [SRCp32]
vpush {q4,q5,q6,q7}
adr r12, .Lcount1
vld1.32 {Z3}, [r12]
vadd.i64 Y3, X3, Z3 C Increment 64-bit counter
vadd.i64 Z3, Y3, Z3
.Lshared_entry:
vmov Y0, X0
vmov Z0, X0
vmov Y1, X1
vmov Z1, X1
vmov Y2, X2
vmov Z2, X2
C Save initial values for the words including the counters.
vmov T2, Y3
vmov T3, Z3
.Loop:
C Interleave three blocks. Note that with this scheduling,
C only two temporaries, T0 and T1, are needed.
vadd.i32 X0, X0, X1
veor X3, X3, X0
vadd.i32 Y0, Y0, Y1
vrev32.16 X3, X3 C lrot 16
veor Y3, Y3, Y0
vadd.i32 Z0, Z0, Z1
vadd.i32 X2, X2, X3
vrev32.16 Y3, Y3 C lrot 16
veor Z3, Z3, Z0
veor T0, X1, X2
vadd.i32 Y2, Y2, Y3
vrev32.16 Z3, Z3 C lrot 16
vshl.i32 X1, T0, #12
veor T1, Y1, Y2
vadd.i32 Z2, Z2, Z3
vsri.u32 X1, T0, #20
vshl.i32 Y1, T1, #12
veor T0, Z1, Z2
vadd.i32 X0, X0, X1
vsri.u32 Y1, T1, #20
vshl.i32 Z1, T0, #12
veor T1, X3, X0
vadd.i32 Y0, Y0, Y1
vsri.u32 Z1, T0, #20
vshl.i32 X3, T1, #8
veor T0, Y3, Y0
vadd.i32 Z0, Z0, Z1
vsri.u32 X3, T1, #24
vshl.i32 Y3, T0, #8
veor T1, Z3, Z0
vadd.i32 X2, X2, X3
vsri.u32 Y3, T0, #24
vext.32 X3, X3, X3, #3
vshl.i32 Z3, T1, #8
veor T0, X1, X2
vadd.i32 Y2, Y2, Y3
vsri.u32 Z3, T1, #24
vext.32 Y3, Y3, Y3, #3
vshl.i32 X1, T0, #7
veor T1, Y1, Y2
vadd.i32 Z2, Z2, Z3
vsri.u32 X1, T0, #25
vshl.i32 Y1, T1, #7
veor T0, Z1, Z2
vext.32 X1, X1, X1, #1
vsri.u32 Y1, T1, #25
vshl.i32 Z1, T0, #7
vext.32 Y2, Y2, Y2, #2
vext.32 Y1, Y1, Y1, #1
vsri.u32 Z1, T0, #25
vext.32 X2, X2, X2, #2
C Second QROUND
vadd.i32 X0, X0, X1
vext.32 Z2, Z2, Z2, #2
vext.32 Z1, Z1, Z1, #1
veor X3, X3, X0
vadd.i32 Y0, Y0, Y1
vext.32 Z3, Z3, Z3, #3
vrev32.16 X3, X3 C lrot 16
veor Y3, Y3, Y0
vadd.i32 Z0, Z0, Z1
vadd.i32 X2, X2, X3
vrev32.16 Y3, Y3 C lrot 16
veor Z3, Z3, Z0
veor T0, X1, X2
vadd.i32 Y2, Y2, Y3
vrev32.16 Z3, Z3 C lrot 16
vshl.i32 X1, T0, #12
veor T1, Y1, Y2
vadd.i32 Z2, Z2, Z3
vsri.u32 X1, T0, #20
vshl.i32 Y1, T1, #12
veor T0, Z1, Z2
vadd.i32 X0, X0, X1
vsri.u32 Y1, T1, #20
vshl.i32 Z1, T0, #12
veor T1, X3, X0
vadd.i32 Y0, Y0, Y1
vsri.u32 Z1, T0, #20
vshl.i32 X3, T1, #8
veor T0, Y3, Y0
vadd.i32 Z0, Z0, Z1
vsri.u32 X3, T1, #24
vshl.i32 Y3, T0, #8
veor T1, Z3, Z0
vadd.i32 X2, X2, X3
vsri.u32 Y3, T0, #24
vext.32 X3, X3, X3, #1
vshl.i32 Z3, T1, #8
veor T0, X1, X2
vext.32 X2, X2, X2, #2
vadd.i32 Y2, Y2, Y3
vext.32 Y3, Y3, Y3, #1
vsri.u32 Z3, T1, #24
vshl.i32 X1, T0, #7
veor T1, Y1, Y2
vext.32 Y2, Y2, Y2, #2
vadd.i32 Z2, Z2, Z3
vext.32 Z3, Z3, Z3, #1
vsri.u32 X1, T0, #25
vshl.i32 Y1, T1, #7
veor T0, Z1, Z2
vext.32 Z2, Z2, Z2, #2
vext.32 X1, X1, X1, #3
vsri.u32 Y1, T1, #25
vshl.i32 Z1, T0, #7
vext.32 Y1, Y1, Y1, #3
vsri.u32 Z1, T0, #25
subs ROUNDS, ROUNDS, #2
vext.32 Z1, Z1, Z1, #3
bhi .Loop
C Add updated counters
vadd.i32 Y3, Y3, T2
vadd.i32 Z3, Z3, T3
vld1.32 {T0,T1}, [SRC]
vadd.i32 X0, X0, T0
vadd.i32 X1, X1, T1
C vst1.8 because caller expects results little-endian
C interleave loads, calculations and stores to save cycles on stores
C use vstm when little-endian for some additional speedup
IF_BE(` vst1.8 {X0,X1}, [DST]!')
vld1.32 {T2,T3}, [SRCp32]
vadd.i32 X2, X2, T2
vadd.i32 X3, X3, T3
IF_BE(` vst1.8 {X2,X3}, [DST]!')
IF_LE(` vstmia DST!, {X0,X1,X2,X3}')
vadd.i32 Y0, Y0, T0
vadd.i32 Y1, Y1, T1
IF_BE(` vst1.8 {Y0,Y1}, [DST]!')
vadd.i32 Y2, Y2, T2
IF_BE(` vst1.8 {Y2,Y3}, [DST]!')
IF_LE(` vstmia DST!, {Y0,Y1,Y2,Y3}')
vadd.i32 Z0, Z0, T0
vadd.i32 Z1, Z1, T1
IF_BE(` vst1.8 {Z0,Z1}, [DST]!')
vadd.i32 Z2, Z2, T2
vpop {q4,q5,q6,q7}
IF_BE(` vst1.8 {Z2,Z3}, [DST]')
IF_LE(` vstm DST, {Z0,Z1,Z2,Z3}')
bx lr
EPILOGUE(_nettle_chacha_3core)
PROLOGUE(_nettle_chacha_3core32)
add SRCp32, SRC, #32
vld1.32 {X0,X1}, [SRC]
vld1.32 {X2,X3}, [SRCp32]
vpush {q4,q5,q6,q7}
adr r12, .Lcount1
vld1.32 {Z3}, [r12]
vadd.i32 Y3, X3, Z3 C Increment 32-bit counter
vadd.i32 Z3, Y3, Z3
b .Lshared_entry
EPILOGUE(_nettle_chacha_3core32)
C arm/neon/salsa20-2core.asm
ifelse(`
Copyright (C) 2020 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "salsa20-2core.asm"
.fpu neon
define(`DST', `r0')
define(`SRC', `r1')
define(`ROUNDS', `r2')
define(`SRCp32', `r3')
C State, even elements in X, odd elements in Y
define(`X0', `q0')
define(`X1', `q1')
define(`X2', `q2')
define(`X3', `q3')
define(`Y0', `q8')
define(`Y1', `q9')
define(`Y2', `q10')
define(`Y3', `q11')
define(`T0', `q12')
define(`T1', `q13')
define(`T2', `q14')
define(`T3', `q15')
.text
.align 4
.Lcount1:
.int 1,0,0,0
C _salsa20_2core(uint32_t *dst, const uint32_t *src, unsigned rounds)
PROLOGUE(_nettle_salsa20_2core)
C loads using vld1.32 to be endianness-neutral wrt consecutive 32-bit words
add SRCp32, SRC, #32
vld1.32 {X0,X1}, [SRC]
vld1.32 {X2,X3}, [SRCp32]
adr r12, .Lcount1
vmov Y3, X0
vld1.32 {Y1}, [r12]
vmov Y0, X1
vadd.i64 Y1, Y1, X2 C Increment counter
vmov Y2, X3
vtrn.32 X0, Y3 C X0: 0 0 2 2 Y3: 1 1 3 3
vtrn.32 X1, Y0 C X1: 4 4 6 6 Y0: 5 5 7 7
vtrn.32 X2, Y1 C X2: 8 8 10 10 Y1: 9 9 11 11
vtrn.32 X3, Y2 C X3: 12 12 14 14 Y2: 13 13 15 15
C Swap, to get
C X0: 0 10 Y0: 5 15
C X1: 4 14 Y1: 9 3
C X2: 8 2 Y2: 13 7
C X3: 12 6 Y3: 1 11
vswp D1REG(X0), D1REG(X2)
vswp D1REG(X1), D1REG(X3)
vswp D1REG(Y0), D1REG(Y2)
vswp D1REG(Y1), D1REG(Y3)
.Loop:
C Register layout (A is first block, B is second block)
C
C X0: A0 B0 A10 B10 Y0: A5 A5 A15 B15
C X1: A4 B4 A14 B14 Y1: A9 B9 A3 B3
C X2: A8 B8 A2 B2 Y2: A13 B13 A7 B7
C X3: A12 B12 A6 B6 Y3: A1 B1 A11 B11
vadd.i32 T0, X0, X3
vshl.i32 T1, T0, #7
vadd.i32 T2, Y0, Y3
vsri.u32 T1, T0, #25
vshl.i32 T3, T2, #7
veor X1, X1, T1
vsri.u32 T3, T2, #25
vadd.i32 T0, X1, X0
veor Y1, Y1, T3
vshl.i32 T1, T0, #9
vadd.i32 T2, Y1, Y0
vsri.u32 T1, T0, #23
vshl.i32 T3, T2, #9
veor X2, X2, T1
vsri.u32 T3, T2, #23
vadd.i32 T0, X2, X1
veor Y2, Y2, T3
vshl.i32 T1, T0, #13
vadd.i32 T2, Y2, Y1
vsri.u32 T1, T0, #19
vshl.i32 T3, T2, #13
veor X3, X3, T1
vsri.u32 T3, T2, #19
vadd.i32 T0, X3, X2
veor Y3, Y3, T3
vshl.i32 T1, T0, #18
vadd.i32 T2, Y3, Y2
vext.32 Y1, Y1, Y1, #2
vsri.u32 T1, T0, #14
vshl.i32 T3, T2, #18
vext.32 Y2, Y2, Y2, #2
veor X0, X0, T1
vsri.u32 T3, T2, #14
vext.32 X3, X3, X3, #2
veor Y0, Y0, T3
C Register layout:
C X0: A0 B0 A10 B10 Y0: A5 A5 A15 B15
C Y1: A3 B3 A9 B9 X1: A4 B4 A14 B14 (Y1 swapped)
C X2: A2 B2 A8 B8 Y2: A7 B7 A13 B13 (X2, Y2 swapped)
C Y3: A1 B1 A11 B11 X3: A6 B6 A12 B12 (X3 swapped)
vadd.i32 T0, X0, Y1
vext.32 X2, X2, X2, #2
vshl.i32 T1, T0, #7
vadd.i32 T2, Y0, X1
vsri.u32 T1, T0, #25
vshl.i32 T3, T2, #7
veor Y3, Y3, T1
vsri.u32 T3, T2, #25
vadd.i32 T0, Y3, X0
veor X3, X3, T3
vshl.i32 T1, T0, #9
vadd.i32 T2, X3, Y0
vsri.u32 T1, T0, #23
vshl.i32 T3, T2, #9
veor X2, X2, T1
vsri.u32 T3, T2, #23
vadd.i32 T0, X2, Y3
veor Y2, Y2, T3
vshl.i32 T1, T0, #13
vadd.i32 T2, Y2, X3
vsri.u32 T1, T0, #19
vshl.i32 T3, T2, #13
veor Y1, Y1, T1
vsri.u32 T3, T2, #19
vadd.i32 T0, Y1, X2
veor X1, X1, T3
vext.32 X2, X2, X2, #2
vshl.i32 T1, T0, #18
vadd.i32 T2, X1, Y2
vext.32 Y1, Y1, Y1, #2
vsri.u32 T1, T0, #14
subs ROUNDS, ROUNDS, #2
vshl.i32 T3, T2, #18
vext.32 X3, X3, X3, #2
veor X0, X0, T1
vsri.u32 T3, T2, #14
vext.32 Y2, Y2, Y2, #2
veor Y0, Y0, T3
bhi .Loop
C Inverse swaps and transpositions
vswp D1REG(X0), D1REG(X2)
vswp D1REG(X1), D1REG(X3)
vswp D1REG(Y0), D1REG(Y2)
vswp D1REG(Y1), D1REG(Y3)
vld1.32 {T0,T1}, [SRC]
vld1.32 {T2,T3}, [SRCp32]
vtrn.32 X0, Y3
vtrn.32 X1, Y0
vtrn.32 X2, Y1
vtrn.32 X3, Y2
C Add in the original context
vadd.i32 X0, X0, T0
vadd.i32 X1, X1, T1
C vst1.8 because caller expects results little-endian
C interleave loads, calculations and stores to save cycles on stores
C use vstm when little-endian for some additional speedup
IF_BE(` vst1.8 {X0,X1}, [DST]!')
vadd.i32 X2, X2, T2
vadd.i32 X3, X3, T3
IF_BE(` vst1.8 {X2,X3}, [DST]!')
IF_LE(` vstmia DST!, {X0,X1,X2,X3}')
vld1.32 {X0}, [r12]
vadd.i32 T0, T0, Y3
vadd.i64 T2, T2, X0
vadd.i32 T1, T1, Y0
IF_BE(` vst1.8 {T0,T1}, [DST]!')
vadd.i32 T2, T2, Y1
vadd.i32 T3, T3, Y2
IF_BE(` vst1.8 {T2,T3}, [DST]')
IF_LE(` vstm DST, {T0,T1,T2,T3}')
bx lr
EPILOGUE(_nettle_salsa20_2core)
C arm/neon/sha3-permute.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "sha3-permute.asm"
.fpu neon
define(`CTX', `r0')
define(`COUNT', `r1')
define(`RC', `r2')
C First column
define(`A0', `d0')
define(`A5', `d2')
define(`A10', `d3')
define(`A15', `d4')
define(`A20', `d5')
define(`A1', `d6')
define(`A2', `d7')
define(`A3', `d8')
define(`A4', `d9')
define(`A6', `d16')
define(`A7', `d17')
define(`A8', `d18')
define(`A9', `d19')
define(`A11', `d20')
define(`A12', `d21')
define(`A13', `d22')
define(`A14', `d23')
define(`A16', `d24')
define(`A17', `d25')
define(`A18', `d26')
define(`A19', `d27')
define(`A21', `d28')
define(`A22', `d29')
define(`A23', `d30')
define(`A24', `d31')
define(`T0', `d10')
define(`T1', `d11')
define(`C0', `d1')
define(`C1', `d12')
define(`C2', `d13')
define(`C3', `d14')
define(`C4', `d15')
C ROL(DST, SRC, COUNT)
C Must have SRC != DST
define(`ROL', `
vshr.u64 $1, $2, #eval(64-$3)
vsli.i64 $1, $2, #$3
')
C sha3_permute(struct sha3_ctx *ctx)
.text
.align 3
.Lrc:
.quad 0x0000000000000001
.quad 0x0000000000008082
.quad 0x800000000000808A
.quad 0x8000000080008000
.quad 0x000000000000808B
.quad 0x0000000080000001
.quad 0x8000000080008081
.quad 0x8000000000008009
.quad 0x000000000000008A
.quad 0x0000000000000088
.quad 0x0000000080008009
.quad 0x000000008000000A
.quad 0x000000008000808B
.quad 0x800000000000008B
.quad 0x8000000000008089
.quad 0x8000000000008003
.quad 0x8000000000008002
.quad 0x8000000000000080
.quad 0x000000000000800A
.quad 0x800000008000000A
.quad 0x8000000080008081
.quad 0x8000000000008080
.quad 0x0000000080000001
.quad 0x8000000080008008
PROLOGUE(nettle_sha3_permute)
vpush {d8-d15}
vld1.64 {A0}, [CTX]!
vldm CTX!, {A1,A2,A3,A4}
vld1.64 {A5}, [CTX]!
vldm CTX!, {A6,A7,A8,A9}
vld1.64 {A10}, [CTX]!
vldm CTX!, {A11,A12,A13,A14}
vld1.64 {A15}, [CTX]!
vldm CTX!, {A16,A17,A18,A19}
vld1.64 {A20}, [CTX]!
vldm CTX, {A21,A22,A23,A24}
sub CTX, CTX, #168
mov COUNT, #24
adr RC, .Lrc
.align 3
.Loop:
veor QREG(T0), QREG(A5), QREG(A15)
veor C0, A0, T0
veor C0, C0, T1
veor QREG(C1), QREG(A1), QREG(A6)
veor QREG(C1), QREG(C1), QREG(A11)
veor QREG(C1), QREG(C1), QREG(A16)
veor QREG(C1), QREG(C1), QREG(A21)
veor QREG(C3), QREG(A3), QREG(A8)
veor QREG(C3), QREG(C3), QREG(A13)
veor QREG(C3), QREG(C3), QREG(A18)
veor QREG(C3), QREG(C3), QREG(A23)
C D0 = C4 ^ (C1 <<< 1)
C NOTE: Using ROL macro (and vsli) is slightly slower.
vshl.i64 T0, C1, #1
vshr.u64 T1, C1, #63
veor T0, T0, C4
veor T0, T0, T1
vmov T1, T0
veor A0, A0, T0
veor QREG(A5), QREG(A5), QREG(T0)
veor QREG(A15), QREG(A15), QREG(T0)
C D1 = C0 ^ (C2 <<< 1)
C D2 = C1 ^ (C3 <<< 1)
ROL(T0, C2, 1)
ROL(T1, C3, 1)
veor T0, T0, C0
veor T1, T1, C1
veor QREG(A1), QREG(A1), QREG(T0)
veor QREG(A6), QREG(A6), QREG(T0)
veor QREG(A11), QREG(A11), QREG(T0)
veor QREG(A16), QREG(A16), QREG(T0)
veor QREG(A21), QREG(A21), QREG(T0)
C D3 = C2 ^ (C4 <<< 1)
C D4 = C3 ^ (C0 <<< 1)
ROL(T0, C4, 1)
ROL(T1, C0, 1)
veor T0, T0, C2
veor T1, T1, C3
veor QREG(A3), QREG(A3), QREG(T0)
veor QREG(A8), QREG(A8), QREG(T0)
veor QREG(A13), QREG(A13), QREG(T0)
veor QREG(A18), QREG(A18), QREG(T0)
veor QREG(A23), QREG(A23), QREG(T0)
ROL( T0, A1, 1)
ROL( A1, A6, 44)
ROL( A6, A9, 20)
ROL( A9, A22, 61)
ROL(A22, A14, 39)
ROL(A14, A20, 18)
ROL(A20, A2, 62)
ROL( A2, A12, 43)
ROL(A12, A13, 25)
ROL(A13, A19, 8)
ROL(A19, A23, 56)
ROL(A23, A15, 41)
ROL(A15, A4, 27)
ROL( A4, A24, 14)
ROL(A24, A21, 2)
ROL(A21, A8, 55)
ROL( A8, A16, 45)
ROL(A16, A5, 36)
ROL( A5, A3, 28)
ROL( A3, A18, 21)
ROL(A18, A17, 15)
ROL(A17, A11, 10)
ROL(A11, A7, 6)
ROL( A7, A10, 3)
C New A10 value left in T0
vbic C0, A2, A1
vbic C1, A3, A2
vbic C2, A4, A3
vbic C3, A0, A4
vbic C4, A1, A0
veor A0, A0, C0
vld1.64 {C0}, [RC :64]!
veor QREG(A1), QREG(A1), QREG(C1)
veor QREG(A3), QREG(A3), QREG(C3)
veor A0, A0, C0
vbic C0, A7, A6
vbic C1, A8, A7
vbic C2, A9, A8
vbic C3, A5, A9
vbic C4, A6, A5
veor A5, A5, C0
veor QREG(A6), QREG(A6), QREG(C1)
veor QREG(A8), QREG(A8), QREG(C3)
vbic C0, A12, A11
vbic C1, A13, A12
vbic C2, A14, A13
vbic C3, T0, A14
vbic C4, A11, T0
veor A10, T0, C0
veor QREG(A11), QREG(A11), QREG(C1)
veor QREG(A13), QREG(A13), QREG(C3)
vbic C0, A17, A16
vbic C1, A18, A17
vbic C2, A19, A18
vbic C3, A15, A19
vbic C4, A16, A15
veor A15, A15, C0
veor QREG(A16), QREG(A16), QREG(C1)
veor QREG(A18), QREG(A18), QREG(C3)
vbic C0, A22, A21
vbic C1, A23, A22
vbic C2, A24, A23
vbic C3, A20, A24
vbic C4, A21, A20
subs COUNT, COUNT, #1
veor A20, A20, C0
veor QREG(A21), QREG(A21), QREG(C1)
veor QREG(A23), QREG(A23), QREG(C3)
bne .Loop
vst1.64 {A0}, [CTX]!
vstm CTX!, {A1,A2,A3,A4}
vst1.64 {A5}, [CTX]!
vstm CTX!, {A6,A7,A8,A9}
vst1.64 {A10}, [CTX]!
vstm CTX!, {A11,A12,A13,A14}
vst1.64 {A15}, [CTX]!
vstm CTX!, {A16,A17,A18,A19}
vst1.64 {A20}, [CTX]!
vstm CTX, {A21,A22,A23,A24}
vpop {d8-d15}
bx lr
EPILOGUE(nettle_sha3_permute)
C arm/neon/sha512-compress.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "sha512-compress.asm"
.fpu neon
define(`STATE', `r0')
define(`INPUT', `r1')
define(`K', `r2')
define(`COUNT', `r3')
define(`SHIFT', `r12')
define(`SA', `d0')
define(`SB', `d1')
define(`SC', `d2')
define(`SD', `d3')
define(`SE', `d4')
define(`SF', `d5')
define(`SG', `d6')
define(`SH', `d7')
define(`QSAB', `q0')
define(`QSCD', `q1')
define(`QSEF', `q2')
define(`QSGH', `q3')
C d8-d15 are callee-save
define(`DT0', `d8')
define(`DT1', `d9')
define(`QT01', `q4')
define(`DT2', `d10')
define(`DT3', `d11')
define(`QT23', `q5')
define(`DT4', `d12')
define(`DT5', `d13')
define(`QT45', `q6')
C Used only when reading the input, can overlap with state
define(`DT6', `d0')
define(`DT7', `d1')
define(`QT67', `q0')
define(`DW0', `d16')
define(`DW1', `d17')
define(`DW2', `d18')
define(`DW3', `d19')
define(`DW4', `d20')
define(`DW5', `d21')
define(`DW6', `d22')
define(`DW7', `d23')
define(`DW8', `d24')
define(`DW9', `d25')
define(`DW10', `d26')
define(`DW11', `d27')
define(`DW12', `d28')
define(`DW13', `d29')
define(`DW14', `d30')
define(`DW15', `d31')
define(`QW0001', `q8')
define(`QW0203', `q9')
define(`QW0405', `q10')
define(`QW0607', `q11')
define(`QW0809', `q12')
define(`QW1011', `q13')
define(`QW1213', `q14')
define(`QW1415', `q15')
define(`EXPAND_ME', `$1')
define(`W', `EXPAND_ME(`DW'eval(($1) % 16))')
C If x = W(i+14), y = w(i+1), we xor in parallel
C
C x << 45 y << 63
C x >> 19 y >> 1
C x << 3 y << 56
C x >> 61 y >> 8
C xor x >> 6 y >> 7
C -----------------------------
C DT0 DT1
define(`EXPN', `
vshl.i64 DT0, W($1+14), #45
vshl.i64 DT1, W($1 + 1), #63
vshr.u64 DT2, W($1+14), #19
vshr.u64 DT3, W($1 + 1), #1
vshl.i64 DT4, W($1+14), #3
vshl.i64 DT5, W($1 + 1), #56
veor.i64 QT01, QT01, QT23
vshr.u64 DT2, W($1+14), #61
vshr.u64 DT3, W($1 + 1), #8
veor.i64 QT01, QT01, QT45
vshr.u64 DT4, W($1+14), #6
vshr.u64 DT5, W($1 + 1), #7
veor.i64 QT01, QT01, QT23
vadd.i64 W($1), W($1), W($1 + 9)
veor.i64 QT01, QT01, QT45
vadd.i64 W($1), W($1), DT0
vadd.i64 W($1), W($1), DT1
')
C ROUND(A,B,C,D,E,F,G,H,i)
C
C H += S1(E) + Choice(E,F,G) + K + W
C D += H
C H += S0(A) + Majority(A,B,C)
C
C Where
C
C S1(E) = E<<<50 ^ E<<<46 ^ E<<<23
C S0(A) = A<<<36 ^ A<<<30 ^ A<<<25
C Choice (E, F, G) = G^(E&(F^G))
C Majority (A,B,C) = (A&B) + (C&(A^B))
C Do S1 and S0 in parallel
C
C e << 50 a << 36
C e >> 14 a >> 28
C e << 46 a << 30
C e >> 18 a >> 34
C e << 23 a << 25
C xor e >> 41 a >> 39
C ----------------------------
C DT0 DT1
define(`ROUND', `
vshl.i64 DT0, $5, #50
vshl.i64 DT1, $1, #36
vshr.u64 DT2, $5, #14
vshr.u64 DT3, $1, #28
vshl.i64 DT4, $5, #46
vshl.i64 DT5, $1, #30
veor QT01, QT01, QT23
vshr.u64 DT2, $5, #18
vshr.u64 DT3, $1, #34
veor QT01, QT01, QT45
vshl.i64 DT4, $5, #23
vshl.i64 DT5, $1, #25
veor QT01, QT01, QT23
vshr.u64 DT2, $5, #41
vshr.u64 DT3, $1, #39
veor QT01, QT01, QT45
veor DT4, $6, $7
veor DT5, $1, $2
vand DT4, DT4, $5
vand DT5, DT5, $3
veor DT4, DT4, $7
veor QT01, QT01, QT23
vand DT2, $1, $2
vldr DT3, [K,#eval(8*$9)]
vadd.i64 $8, $8, W($9)
vadd.i64 QT01, QT01, QT45
vadd.i64 $8, $8, DT3
vadd.i64 $8, $8, DT0
vadd.i64 DT1, DT1, DT2
vadd.i64 $4, $4, $8
vadd.i64 $8, $8, DT1
')
C void
C _nettle_sha512_compress(uint64_t *state, const uint8_t *input, const uint64_t *k)
.text
.align 2
PROLOGUE(_nettle_sha512_compress)
vpush {d8,d9,d10,d11,d12,d13}
ands SHIFT, INPUT, #7
and INPUT, INPUT, #-8
vld1.8 {DT5}, [INPUT :64]
addne INPUT, INPUT, #8
addeq SHIFT, SHIFT, #8
lsl SHIFT, SHIFT, #3
C Put right shift in DT0 and DT1, aka QT01
neg SHIFT, SHIFT
vmov.i32 DT0, #0
vmov.32 DT0[0], SHIFT
vmov DT1, DT0
C Put left shift in DT2 and DT3, aka QT23
add SHIFT, SHIFT, #64
vmov.i32 DT2, #0
vmov.32 DT2[0], SHIFT
vmov DT3, DT2
vshl.u64 DT5, DT5, DT0
C Set w[i] <-- w[i-1] >> RSHIFT + w[i] << LSHIFT
vld1.8 {W(0),W(1),W(2),W(3)}, [INPUT :64]!
vshl.u64 QT67, QW0001, QT01 C Right shift
vshl.u64 QW0001, QW0001, QT23 C Left shift
veor W(0), W(0), DT5
veor W(1), W(1), DT6
vrev64.8 QW0001, QW0001
vshl.u64 QT45, QW0203, QT01 C Right shift
vshl.u64 QW0203, QW0203, QT23 C Left shift
veor W(2), W(2), DT7
veor W(3), W(3), DT4
vrev64.8 QW0203, QW0203
vld1.8 {W(4),W(5),W(6),W(7)}, [INPUT :64]!
vshl.u64 QT67, QW0405, QT01 C Right shift
vshl.u64 QW0405, QW0405, QT23 C Left shift
veor W(4), W(4), DT5
veor W(5), W(5), DT6
vrev64.8 QW0405, QW0405
vshl.u64 QT45, QW0607, QT01 C Right shift
vshl.u64 QW0607, QW0607, QT23 C Left shift
veor W(6), W(6), DT7
veor W(7), W(7), DT4
vrev64.8 QW0607, QW0607
vld1.8 {W(8),W(9),W(10),W(11)}, [INPUT :64]!
vshl.u64 QT67, QW0809, QT01 C Right shift
vshl.u64 QW0809, QW0809, QT23 C Left shift
veor W(8), W(8), DT5
veor W(9), W(9), DT6
vrev64.8 QW0809, QW0809
vshl.u64 QT45, QW1011, QT01 C Right shift
vshl.u64 QW1011, QW1011, QT23 C Left shift
veor W(10), W(10), DT7
veor W(11), W(11), DT4
vrev64.8 QW1011, QW1011
vld1.8 {W(12),W(13),W(14),W(15)}, [INPUT :64]!
vshl.u64 QT67, QW1213, QT01 C Right shift
vshl.u64 QW1213, QW1213, QT23 C Left shift
veor W(12), W(12), DT5
veor W(13), W(13), DT6
vrev64.8 QW1213, QW1213
vshl.u64 QT45, QW1415, QT01 C Right shift
vshl.u64 QW1415, QW1415, QT23 C Left shift
veor W(14), W(14), DT7
veor W(15), W(15), DT4
vrev64.8 QW1415, QW1415
vldm STATE, {SA,SB,SC,SD,SE,SF,SG,SH}
ROUND(SA,SB,SC,SD,SE,SF,SG,SH, 0)
ROUND(SH,SA,SB,SC,SD,SE,SF,SG, 1)
ROUND(SG,SH,SA,SB,SC,SD,SE,SF, 2)
ROUND(SF,SG,SH,SA,SB,SC,SD,SE, 3)
ROUND(SE,SF,SG,SH,SA,SB,SC,SD, 4)
ROUND(SD,SE,SF,SG,SH,SA,SB,SC, 5)
ROUND(SC,SD,SE,SF,SG,SH,SA,SB, 6)
ROUND(SB,SC,SD,SE,SF,SG,SH,SA, 7)
ROUND(SA,SB,SC,SD,SE,SF,SG,SH, 8)
ROUND(SH,SA,SB,SC,SD,SE,SF,SG, 9)
ROUND(SG,SH,SA,SB,SC,SD,SE,SF, 10)
ROUND(SF,SG,SH,SA,SB,SC,SD,SE, 11)
ROUND(SE,SF,SG,SH,SA,SB,SC,SD, 12)
ROUND(SD,SE,SF,SG,SH,SA,SB,SC, 13)
ROUND(SC,SD,SE,SF,SG,SH,SA,SB, 14)
ROUND(SB,SC,SD,SE,SF,SG,SH,SA, 15)
add K, K, #128
mov COUNT, #4
.Loop:
EXPN( 0) ROUND(SA,SB,SC,SD,SE,SF,SG,SH, 0)
EXPN( 1) ROUND(SH,SA,SB,SC,SD,SE,SF,SG, 1)
EXPN( 2) ROUND(SG,SH,SA,SB,SC,SD,SE,SF, 2)
EXPN( 3) ROUND(SF,SG,SH,SA,SB,SC,SD,SE, 3)
EXPN( 4) ROUND(SE,SF,SG,SH,SA,SB,SC,SD, 4)
EXPN( 5) ROUND(SD,SE,SF,SG,SH,SA,SB,SC, 5)
EXPN( 6) ROUND(SC,SD,SE,SF,SG,SH,SA,SB, 6)
EXPN( 7) ROUND(SB,SC,SD,SE,SF,SG,SH,SA, 7)
EXPN( 8) ROUND(SA,SB,SC,SD,SE,SF,SG,SH, 8)
EXPN( 9) ROUND(SH,SA,SB,SC,SD,SE,SF,SG, 9)
EXPN(10) ROUND(SG,SH,SA,SB,SC,SD,SE,SF, 10)
EXPN(11) ROUND(SF,SG,SH,SA,SB,SC,SD,SE, 11)
EXPN(12) ROUND(SE,SF,SG,SH,SA,SB,SC,SD, 12)
EXPN(13) ROUND(SD,SE,SF,SG,SH,SA,SB,SC, 13)
EXPN(14) ROUND(SC,SD,SE,SF,SG,SH,SA,SB, 14)
subs COUNT, COUNT, #1
EXPN(15) ROUND(SB,SC,SD,SE,SF,SG,SH,SA, 15)
add K, K, #128
bne .Loop
vld1.64 {DW0, DW1, DW2, DW3}, [STATE]
vadd.i64 QSAB, QSAB, QW0001
vadd.i64 QSCD, QSCD, QW0203
vst1.64 {SA,SB,SC,SD}, [STATE]!
vld1.64 {DW0, DW1, DW2, DW3}, [STATE]
vadd.i64 QSEF, QSEF, QW0001
vadd.i64 QSGH, QSGH, QW0203
vst1.64 {SE,SF,SG,SH}, [STATE]!
vpop {d8,d9,d10,d11,d12,d13}
bx lr
EPILOGUE(_nettle_sha512_compress)
divert(-1)
define shastate
p/x $d0.u64
p/x $d1.u64
p/x $d2.u64
p/x $d3.u64
p/x $d4.u64
p/x $d5.u64
p/x $d6.u64
p/x $d7.u64
end
C arm/neon/umac-nh-n.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "umac-nh.asm"
.fpu neon
define(`OUT', `r0')
define(`ITERS', `r1')
define(`KEY', `r2')
define(`LENGTH', `r3')
define(`MSG', `r12')
define(`SHIFT', `r14')
define(`QA', `q0')
define(`QB', `q1')
define(`QY0', `q3') C Accumulates for the first two operations.
define(`DM', `d4')
define(`QY1', `q4') C Used for 3 and 4 iterations.
define(`QC', `q5')
define(`QD', `q6')
define(`QLEFT', `q8')
define(`QRIGHT', `q9')
define(`QT0', `q10')
define(`QT1', `q11')
define(`QT2', `q12')
define(`QK0', `q13')
define(`QK1', `q14')
define(`QK2', `q15')
C FIXME: Try permuting subkeys using vld4, vzip or similar.
.text
.align 3
PROLOGUE(_nettle_umac_nh_n)
ldr MSG, [sp]
str lr, [sp, #-4]!
C Setup for 64-bit aligned reads
ands SHIFT, MSG, #7
and MSG, MSG, #-8
vld1.8 {DM}, [MSG :64]
addne MSG, MSG, #8
addeq SHIFT, SHIFT, #8
C FIXME: Combine as rsb ?
lsl SHIFT, SHIFT, #3
neg SHIFT, SHIFT
C Right shift in QRIGHT (both halves)
vmov.i32 D0REG(QRIGHT)[0], SHIFT
vmov.32 D1REG(QRIGHT), D0REG(QRIGHT)
add SHIFT, SHIFT, #64
vmov.i32 D0REG(QLEFT)[0], SHIFT
vmov.32 D1REG(QLEFT), D0REG(QLEFT)
cmp r1, #3
vmov.i64 QY0, #0
vshl.u64 DM, DM, D0REG(QRIGHT)
bcc .Lnh2
beq .Lnh3
.Lnh4:
C Permute key words, so we in each iteration have them in order
C
C P0: [0, 4,1, 5] P1: [ 2, 6, 3, 7] P2: [ 4, 8, 5, 9] P3: [ 6,10, 7,11]
C P4: [8,12,9,13] P5: [10,14,11,15] P6: [12,16,13,17] P7: [14,18,15,19]
C
C Also arrange the message words, so we get them as
C M0: [0,0,1,1] M1: [ 2, 2, 3, 3] M2: [ 4, 4, 5, 5] M3: [ 6, 6, 7, 7]
C M4: [8,8,9,9] M5: [10,10,11,11] M6: [12,12,13,13] M7: [14,14,15,15]
C
C Then, accumulate Y0 (first two "iters") using
C
C Y0 += (M0+P0) * (M2+P2) + (M1+P1) * (M3+P3)
C Y1 += (M0+P4) * (M2+P6) + (M1+P5) * (M3+P7)
C
C Next iteration is then
C
C Y0 += (M4+P4) * (M6+P6) + (M5+P5) * (M7 + P7)
C Y1 += (M4+P6) * (M6+P8) + (M5+P7) * (M7 + P11)
C
C So we can reuse P4, P5, P6, P7 from the previous iteration.
C How to for in registers? We need 4 Q regs for P0-P3, and one
C more for the last read key. We need at least two regiters
C for the message (QA and QB, more if we want to expand only
C once). For the Y0 update, we can let the factors overwrite
C P0-P3, and for the Y1 update, we can overwrite M0-M3.
vpush {q4,q5,q6}
vld1.32 {QK0,QK1}, [KEY]!
vld1.32 {QK2}, [KEY]!
vmov QT0, QK1
vmov QT1, QK2
C Permute keys. QK2 us untouched, permuted subkeys put in QK0,QK1,QT0,QT1
vtrn.32 QK0, QK1 C Gives us [0, 4, 2, 6] and [1, 5, 3, 7]
vswp D1REG(QK0), D0REG(QK1) C Gives us [0, 4, 1, 5] and [2, 6, 3, 7]
vtrn.32 QT0, QT1 C Gives us [4,8,6,10] and [5 ,9,7,11]
vswp D1REG(QT0), D0REG(QT1) C Gives us [4,8,5, 9] and [6,10,7,11]
vmov.i64 QY1, #0
.Loop4:
C Set m[i] <-- m[i-1] >> RSHIFT + m[i] << LSHIFT
vld1.8 {QA, QB}, [MSG :64]!
vshl.u64 QC, QA, QRIGHT
vshl.u64 QD, QB, QRIGHT
vshl.u64 QA, QA, QLEFT
vshl.u64 QB, QB, QLEFT
veor D0REG(QA), D0REG(QA), DM
veor D1REG(QA), D1REG(QA), D0REG(QC)
veor D0REG(QB), D0REG(QB), D1REG(QC)
veor D1REG(QB), D1REG(QB), D0REG(QD)
vmov DM, D1REG(QD)
C Explode message (too bad there's no vadd with scalar)
vdup.32 D1REG(QD), D1REG(QB)[1]
vdup.32 D0REG(QD), D1REG(QB)[0]
vdup.32 D1REG(QC), D0REG(QB)[1]
vdup.32 D0REG(QC), D0REG(QB)[0]
vdup.32 D1REG(QB), D1REG(QA)[1]
vdup.32 D0REG(QB), D1REG(QA)[0]
vdup.32 D1REG(QA), D0REG(QA)[1]
vdup.32 D0REG(QA), D0REG(QA)[0]
vadd.i32 QK0, QK0, QA
vadd.i32 QK1, QK1, QB
vadd.i32 QT0, QT0, QC
vadd.i32 QT1, QT1, QD
vmlal.u32 QY0, D0REG(QK0), D0REG(QT0)
vmlal.u32 QY0, D1REG(QK0), D1REG(QT0)
vmlal.u32 QY0, D0REG(QK1), D0REG(QT1)
vmlal.u32 QY0, D1REG(QK1), D1REG(QT1)
C Next 4 subkeys
vld1.32 {QT0,QT1}, [KEY]!
vmov QK0, QK2
vmov QK1, QT0
vmov QK2, QT1 C Save
vtrn.32 QK0, QK1 C Gives us [8,12,10,14] and [9,13,11,15]
vswp D1REG(QK0), D0REG(QK1) C Gives us [8,12,9,13] and [10,14,11,15]
vtrn.32 QT0, QT1 C Gives us [12,16,14,18] and [13,17,15,19]
vswp D1REG(QT0), D0REG(QT1) C Gives us [12,16,13,17] and [14,18,15,19]
vadd.i32 QA, QA, QK0
vadd.i32 QB, QB, QK1
vadd.i32 QC, QC, QT0
vadd.i32 QD, QD, QT1
subs LENGTH, LENGTH, #32
vmlal.u32 QY1, D0REG(QA), D0REG(QC)
vmlal.u32 QY1, D1REG(QA), D1REG(QC)
vmlal.u32 QY1, D0REG(QB), D0REG(QD)
vmlal.u32 QY1, D1REG(QB), D1REG(QD)
bhi .Loop4
vst1.64 {QY0, QY1}, [OUT]
vpop {q4,q5,q6}
ldr pc, [sp], #+4
.Lnh3:
vpush {q4}
vld1.32 {QK0,QK1}, [KEY]!
vmov.i64 QY1, #0
.Loop3:
C Set m[i] <-- m[i-1] >> RSHIFT + m[i] << LSHIFT
vld1.8 {QA, QB}, [MSG :64]!
vshl.u64 QT0, QA, QRIGHT
vshl.u64 QT1, QB, QRIGHT
vshl.u64 QA, QA, QLEFT
vshl.u64 QB, QB, QLEFT
veor D0REG(QA), D0REG(QA), DM
veor D1REG(QA), D1REG(QA), D0REG(QT0)
veor D0REG(QB), D0REG(QB), D1REG(QT0)
veor D1REG(QB), D1REG(QB), D0REG(QT1)
vmov DM, D1REG(QT1)
vld1.32 {QK2}, [KEY]!
C Construct factors, with low half corresponding to first iteration,
C and high half corresponding to the second iteration.
vmov QT0, QK1
vtrn.32 QK0, QT0 C Gives us [0, 4, 2, 6] and [1, 5, 3, 7]
vswp D1REG(QK0), D0REG(QT0) C Gives us [0, 4, 1, 5] and [2, 6, 3, 7]
vdup.32 D0REG(QT1), D0REG(QA)[0]
vdup.32 D1REG(QT1), D0REG(QA)[1]
vadd.i32 QT1, QT1, QK0
vmov QK0, QK2 C Save for next iteration
vtrn.32 QK1, QK2 C Gives us [4, 8, 2, 1] and [1, 5, 3, 7]
vswp D1REG(QK1), D0REG(QK2) C Gives us [4, 8, 1, 5] and [2, 1, 3, 7]
vdup.32 D0REG(QT2), D0REG(QB)[0]
vdup.32 D1REG(QT2), D0REG(QB)[1]
vadd.i32 QK1, QK1, QT2
vmlal.u32 QY0, D0REG(QT1), D0REG(QK1)
vmlal.u32 QY0, D1REG(QT1), D1REG(QK1)
vdup.32 D0REG(QT1), D1REG(QA)[0]
vdup.32 D1REG(QT1), D1REG(QA)[1]
vadd.i32 QT0, QT0, QT1
vdup.32 D0REG(QT1), D1REG(QB)[0]
vdup.32 D1REG(QT1), D1REG(QB)[1]
vadd.i32 QK2, QK2, QT1
vmlal.u32 QY0, D0REG(QT0), D0REG(QK2)
vmlal.u32 QY0, D1REG(QT0), D1REG(QK2)
vld1.32 {QK1}, [KEY]!
vadd.i32 QA, QA, QK0
vadd.i32 QB, QB, QK1
subs LENGTH, LENGTH, #32
vmlal.u32 QY1, D0REG(QA), D0REG(QB)
vmlal.u32 QY1, D1REG(QA), D1REG(QB)
bhi .Loop3
vadd.i64 D0REG(QY1), D0REG(QY1), D1REG(QY1)
vst1.64 {D0REG(QY0), D1REG(QY0), D0REG(QY1)}, [OUT]
vpop {q4}
ldr pc, [sp], #+4
.Lnh2:
vld1.32 {QK0}, [KEY]!
.Loop2:
C Set m[i] <-- m[i-1] >> RSHIFT + m[i] << LSHIFT
vld1.8 {QA, QB}, [MSG :64]!
vshl.u64 QT0, QA, QRIGHT
vshl.u64 QT1, QB, QRIGHT
vshl.u64 QA, QA, QLEFT
vshl.u64 QB, QB, QLEFT
veor D0REG(QA), D0REG(QA), DM
veor D1REG(QA), D1REG(QA), D0REG(QT0)
veor D0REG(QB), D0REG(QB), D1REG(QT0)
veor D1REG(QB), D1REG(QB), D0REG(QT1)
vmov DM, D1REG(QT1)
vld1.32 {QK1,QK2}, [KEY]!
C Construct factors, with low half corresponding to first iteration,
C and high half corresponding to the second iteration.
vmov QT0, QK1
vtrn.32 QK0, QT0 C Gives us [0, 4, 2, 6] and [1, 5, 3, 7]
vswp D1REG(QK0), D0REG(QT0) C Gives us [0, 4, 1, 5] and [2, 6, 3, 7]
vdup.32 D0REG(QT1), D0REG(QA)[0]
vdup.32 D1REG(QT1), D0REG(QA)[1]
vadd.i32 QT1, QT1, QK0
vmov QK0, QK2 C Save for next iteration
vtrn.32 QK1, QK2 C Gives us [4, 8, 6, 10] and [5, 9, 7, 11]
vswp D1REG(QK1), D0REG(QK2) C Gives us [4, 8, 5, 9] and [6, 10, 7, 11]
vdup.32 D0REG(QT2), D0REG(QB)[0]
vdup.32 D1REG(QT2), D0REG(QB)[1]
vadd.i32 QK1, QK1, QT2
vmlal.u32 QY0, D0REG(QT1), D0REG(QK1)
vmlal.u32 QY0, D1REG(QT1), D1REG(QK1)
vdup.32 D0REG(QT1), D1REG(QA)[0]
vdup.32 D1REG(QT1), D1REG(QA)[1]
vadd.i32 QT0, QT0, QT1
vdup.32 D0REG(QT1), D1REG(QB)[0]
vdup.32 D1REG(QT1), D1REG(QB)[1]
vadd.i32 QK2, QK2, QT1
subs LENGTH, LENGTH, #32
vmlal.u32 QY0, D0REG(QT0), D0REG(QK2)
vmlal.u32 QY0, D1REG(QT0), D1REG(QK2)
bhi .Loop2
vst1.64 {QY0}, [OUT]
.Lend:
ldr pc, [sp], #+4
EPILOGUE(_nettle_umac_nh_n)
C arm/neon/umac-nh.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "umac-nh.asm"
.fpu neon
define(`KEY', `r0')
define(`LENGTH', `r1')
define(`MSG', `r2')
define(`SHIFT', `r3')
define(`QA', `q0')
define(`QB', `q1')
define(`DM', `d16')
define(`QLEFT', `q9')
define(`QRIGHT', `q10')
define(`QY', `q11')
define(`QT0', `q12')
define(`QT1', `q13')
define(`QK0', `q14')
define(`QK1', `q15')
.text
.align 3
PROLOGUE(_nettle_umac_nh)
C Setup for 64-bit aligned reads
ands SHIFT, MSG, #7
and MSG, MSG, #-8
vld1.8 {DM}, [MSG :64]
addne MSG, MSG, #8
addeq SHIFT, SHIFT, #8
C FIXME: Combine as rsb ?
lsl SHIFT, SHIFT, #3
neg SHIFT, SHIFT
C Right shift in QRIGHT (both halves)
vmov.i32 D0REG(QRIGHT)[0], SHIFT
vmov.32 D1REG(QRIGHT), D0REG(QRIGHT)
add SHIFT, SHIFT, #64
vmov.i32 D0REG(QLEFT)[0], SHIFT
vmov.32 D1REG(QLEFT), D0REG(QLEFT)
vmov.i64 QY, #0
vshl.u64 DM, DM, D0REG(QRIGHT)
.Loop:
C Set m[i] <-- m[i-1] >> RSHIFT + m[i] << LSHIFT
vld1.8 {QA, QB}, [MSG :64]!
vshl.u64 QT0, QA, QRIGHT
vshl.u64 QT1, QB, QRIGHT
vshl.u64 QA, QA, QLEFT
vshl.u64 QB, QB, QLEFT
veor D0REG(QA), D0REG(QA), DM
veor D1REG(QA), D1REG(QA), D0REG(QT0)
veor D0REG(QB), D0REG(QB), D1REG(QT0)
veor D1REG(QB), D1REG(QB), D0REG(QT1)
vmov DM, D1REG(QT1)
vld1.i32 {QK0, QK1}, [KEY]!
vadd.i32 QA, QA, QK0
vadd.i32 QB, QB, QK1
subs LENGTH, LENGTH, #32
vmlal.u32 QY, D0REG(QA), D0REG(QB)
vmlal.u32 QY, D1REG(QA), D1REG(QB)
bhi .Loop
vadd.i64 D0REG(QY), D0REG(QY), D1REG(QY)
C return value needs to respect word order mandated by AAPCS
IF_LE(` vmov r0, r1, D0REG(QY)')
IF_BE(` vmov r1, r0, D0REG(QY)')
bx lr
EPILOGUE(_nettle_umac_nh)
C arm/v6/aes-decrypt-internal.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.arch armv6
include_src(`arm/aes.m4')
define(`PARAM_ROUNDS', `r0')
define(`PARAM_KEYS', `r1')
define(`TABLE', `r2')
define(`LENGTH', `r3')
C On stack: DST, SRC
define(`W0', `r4')
define(`W1', `r5')
define(`W2', `r6')
define(`W3', `r7')
define(`T0', `r8')
define(`COUNT', `r10')
define(`KEY', `r11')
define(`X0', `r0') C Overlaps PARAM_ROUNDS and PARAM_KEYS
define(`X1', `r1')
define(`X2', `r12')
define(`X3', `r14') C lr
define(`FRAME_ROUNDS', `[sp]')
define(`FRAME_KEYS', `[sp, #+4]')
C 8 saved registers
define(`FRAME_DST', `[sp, #+40]')
define(`FRAME_SRC', `[sp, #+44]')
define(`SRC', `r12') C Overlap registers used in inner loop.
define(`DST', `COUNT')
C AES_DECRYPT_ROUND(x0,x1,x2,x3,w0,w1,w2,w3,key)
define(`AES_DECRYPT_ROUND', `
uxtb T0, $1
ldr $5, [TABLE, T0, lsl #2]
uxtb T0, $2
ldr $6, [TABLE, T0, lsl #2]
uxtb T0, $3
ldr $7, [TABLE, T0, lsl #2]
uxtb T0, $4
ldr $8, [TABLE, T0, lsl #2]
uxtb T0, $4, ror #8
add TABLE, TABLE, #1024
ldr T0, [TABLE, T0, lsl #2]
eor $5, $5, T0
uxtb T0, $1, ror #8
ldr T0, [TABLE, T0, lsl #2]
eor $6, $6, T0
uxtb T0, $2, ror #8
ldr T0, [TABLE, T0, lsl #2]
eor $7, $7, T0
uxtb T0, $3, ror #8
ldr T0, [TABLE, T0, lsl #2]
eor $8, $8, T0
uxtb T0, $3, ror #16
add TABLE, TABLE, #1024
ldr T0, [TABLE, T0, lsl #2]
eor $5, $5, T0
uxtb T0, $4, ror #16
ldr T0, [TABLE, T0, lsl #2]
eor $6, $6, T0
uxtb T0, $1, ror #16
ldr T0, [TABLE, T0, lsl #2]
eor $7, $7, T0
uxtb T0, $2, ror #16
ldr T0, [TABLE, T0, lsl #2]
eor $8, $8, T0
uxtb T0, $2, ror #24
add TABLE, TABLE, #1024
ldr T0, [TABLE, T0, lsl #2]
eor $5, $5, T0
uxtb T0, $3, ror #24
ldr T0, [TABLE, T0, lsl #2]
eor $6, $6, T0
uxtb T0, $4, ror #24
ldr T0, [TABLE, T0, lsl #2]
eor $7, $7, T0
uxtb T0, $1, ror #24
ldr T0, [TABLE, T0, lsl #2]
ldm $9, {$1,$2,$3,$4}
eor $8, $8, T0
sub TABLE, TABLE, #3072
eor $5, $5, $1
eor $6, $6, $2
sub $9, $9, #16
eor $7, $7, $3
eor $8, $8, $4
')
.file "aes-decrypt-internal.asm"
C _aes_decrypt(unsigned rounds, const uint32_t *keys,
C const struct aes_table *T,
C size_t length, uint8_t *dst,
C uint8_t *src)
.text
ALIGN(4)
PROLOGUE(_nettle_aes_decrypt)
teq LENGTH, #0
beq .Lend
ldr SRC, [sp, #+4]
push {r0,r1, r4,r5,r6,r7,r8,r10,r11,lr}
ALIGN(16)
.Lblock_loop:
ldm sp, {COUNT, KEY}
add TABLE, TABLE, #AES_TABLE0
AES_LOAD(SRC,KEY,W0)
AES_LOAD(SRC,KEY,W1)
AES_LOAD(SRC,KEY,W2)
AES_LOAD_INCR(SRC,KEY,W3, -28)
str SRC, FRAME_SRC
b .Lentry
ALIGN(16)
.Lround_loop:
C Transform X -> W
AES_DECRYPT_ROUND(X0, X1, X2, X3, W0, W1, W2, W3, KEY)
.Lentry:
subs COUNT, COUNT,#2
C Transform W -> X
AES_DECRYPT_ROUND(W0, W1, W2, W3, X0, X1, X2, X3, KEY)
bne .Lround_loop
sub TABLE, TABLE, #AES_TABLE0
C Final round
ldr DST, FRAME_DST
AES_FINAL_ROUND_V6(X0, X3, X2, X1, KEY, W0)
AES_FINAL_ROUND_V6(X1, X0, X3, X2, KEY, W1)
AES_FINAL_ROUND_V6(X2, X1, X0, X3, KEY, W2)
AES_FINAL_ROUND_V6(X3, X2, X1, X0, KEY, W3)
ldr SRC, FRAME_SRC
AES_STORE(DST,W0)
AES_STORE(DST,W1)
AES_STORE(DST,W2)
AES_STORE(DST,W3)
str DST, FRAME_DST
subs LENGTH, LENGTH, #16
bhi .Lblock_loop
add sp, sp, #8 C Drop saved r0, r1
pop {r4,r5,r6,r7,r8,r10,r11,pc}
.Lend:
bx lr
EPILOGUE(_nettle_aes_decrypt)
C arm/v6/aes-encrypt-internal.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.arch armv6
include_src(`arm/aes.m4')
C Benchmarked at at 706, 870, 963 cycles/block on cortex A9,
C for 128, 192 and 256 bit key sizes.
C Possible improvements: More efficient load and store with
C aligned accesses. Better scheduling.
define(`PARAM_ROUNDS', `r0')
define(`PARAM_KEYS', `r1')
define(`TABLE', `r2')
define(`LENGTH', `r3')
C On stack: DST, SRC
define(`W0', `r4')
define(`W1', `r5')
define(`W2', `r6')
define(`W3', `r7')
define(`T0', `r8')
define(`COUNT', `r10')
define(`KEY', `r11')
define(`X0', `r0') C Overlaps PARAM_ROUNDS and PARAM_KEYS
define(`X1', `r1')
define(`X2', `r12')
define(`X3', `r14') C lr
define(`FRAME_ROUNDS', `[sp]')
define(`FRAME_KEYS', `[sp, #+4]')
C 8 saved registers
define(`FRAME_DST', `[sp, #+40]')
define(`FRAME_SRC', `[sp, #+44]')
define(`SRC', `r12') C Overlap registers used in inner loop.
define(`DST', `COUNT')
C 53 instr.
C It's tempting to use eor with rotation, but that's slower.
C AES_ENCRYPT_ROUND(x0,x1,x2,x3,w0,w1,w2,w3,key)
define(`AES_ENCRYPT_ROUND', `
uxtb T0, $1
ldr $5, [TABLE, T0, lsl #2]
uxtb T0, $2
ldr $6, [TABLE, T0, lsl #2]
uxtb T0, $3
ldr $7, [TABLE, T0, lsl #2]
uxtb T0, $4
ldr $8, [TABLE, T0, lsl #2]
uxtb T0, $2, ror #8
add TABLE, TABLE, #1024
ldr T0, [TABLE, T0, lsl #2]
eor $5, $5, T0
uxtb T0, $3, ror #8
ldr T0, [TABLE, T0, lsl #2]
eor $6, $6, T0
uxtb T0, $4, ror #8
ldr T0, [TABLE, T0, lsl #2]
eor $7, $7, T0
uxtb T0, $1, ror #8
ldr T0, [TABLE, T0, lsl #2]
eor $8, $8, T0
uxtb T0, $3, ror #16
add TABLE, TABLE, #1024
ldr T0, [TABLE, T0, lsl #2]
eor $5, $5, T0
uxtb T0, $4, ror #16
ldr T0, [TABLE, T0, lsl #2]
eor $6, $6, T0
uxtb T0, $1, ror #16
ldr T0, [TABLE, T0, lsl #2]
eor $7, $7, T0
uxtb T0, $2, ror #16
ldr T0, [TABLE, T0, lsl #2]
eor $8, $8, T0
uxtb T0, $4, ror #24
add TABLE, TABLE, #1024
ldr T0, [TABLE, T0, lsl #2]
eor $5, $5, T0
uxtb T0, $1, ror #24
ldr T0, [TABLE, T0, lsl #2]
eor $6, $6, T0
uxtb T0, $2, ror #24
ldr T0, [TABLE, T0, lsl #2]
eor $7, $7, T0
uxtb T0, $3, ror #24
ldr T0, [TABLE, T0, lsl #2]
ldm $9!, {$1,$2,$3,$4}
eor $8, $8, T0
sub TABLE, TABLE, #3072
eor $5, $5, $1
eor $6, $6, $2
eor $7, $7, $3
eor $8, $8, $4
')
.file "aes-encrypt-internal.asm"
C _aes_encrypt(unsigned rounds, const uint32_t *keys,
C const struct aes_table *T,
C size_t length, uint8_t *dst,
C uint8_t *src)
.text
ALIGN(4)
PROLOGUE(_nettle_aes_encrypt)
teq LENGTH, #0
beq .Lend
ldr SRC, [sp, #+4]
push {r0,r1, r4,r5,r6,r7,r8,r10,r11,lr}
ALIGN(16)
.Lblock_loop:
ldm sp, {COUNT, KEY}
add TABLE, TABLE, #AES_TABLE0
AES_LOAD(SRC,KEY,W0)
AES_LOAD(SRC,KEY,W1)
AES_LOAD(SRC,KEY,W2)
AES_LOAD(SRC,KEY,W3)
str SRC, FRAME_SRC
b .Lentry
ALIGN(16)
.Lround_loop:
C Transform X -> W
AES_ENCRYPT_ROUND(X0, X1, X2, X3, W0, W1, W2, W3, KEY)
.Lentry:
subs COUNT, COUNT,#2
C Transform W -> X
AES_ENCRYPT_ROUND(W0, W1, W2, W3, X0, X1, X2, X3, KEY)
bne .Lround_loop
sub TABLE, TABLE, #AES_TABLE0
C Final round
ldr DST, FRAME_DST
AES_FINAL_ROUND_V6(X0, X1, X2, X3, KEY, W0)
AES_FINAL_ROUND_V6(X1, X2, X3, X0, KEY, W1)
AES_FINAL_ROUND_V6(X2, X3, X0, X1, KEY, W2)
AES_FINAL_ROUND_V6(X3, X0, X1, X2, KEY, W3)
ldr SRC, FRAME_SRC
AES_STORE(DST,W0)
AES_STORE(DST,W1)
AES_STORE(DST,W2)
AES_STORE(DST,W3)
str DST, FRAME_DST
subs LENGTH, LENGTH, #16
bhi .Lblock_loop
add sp, sp, #8 C Drop saved r0, r1
pop {r4,r5,r6,r7,r8,r10,r11,pc}
.Lend:
bx lr
EPILOGUE(_nettle_aes_encrypt)
C arm/v6/sha1-compress.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "sha1-compress.asm"
.arch armv6
define(`STATE', `r0')
define(`INPUT', `r1')
define(`SA', `r2')
define(`SB', `r3')
define(`SC', `r4')
define(`SD', `r5')
define(`SE', `r6')
define(`T0', `r7')
define(`SHIFT', `r8')
define(`WPREV', `r10')
define(`W', `r12')
define(`K', `lr')
C FIXME: Could avoid a mov with even and odd variants.
define(`LOAD', `
ldr T0, [INPUT], #+4
sel W, WPREV, T0
ror W, W, SHIFT
mov WPREV, T0
IF_LE(` rev W, W')
str W, [SP,#eval(4*$1)]
')
define(`EXPN', `
ldr W, [sp, #+eval(4*$1)]
ldr T0, [sp, #+eval(4*(($1 + 2) % 16))]
eor W, W, T0
ldr T0, [sp, #+eval(4*(($1 + 8) % 16))]
eor W, W, T0
ldr T0, [sp, #+eval(4*(($1 + 13) % 16))]
eor W, W, T0
ror W, W, #31
str W, [sp, #+eval(4*$1)]
')
C F1(B,C,D) = D^(B&(C^D))
C ROUND1(A,B,C,D,E)
define(`ROUND1', `
eor T0, $3, $4
add $5, $5, K
and T0, T0, $2
add $5, $5, $1, ror #27
eor T0, T0, $4
add $5, $5, W
ror $2, $2, #2
add $5, $5, T0
')
C F2(B,C,D) = B^C^D
define(`ROUND2', `
eor T0, $2, $4
add $5, $5, K
eor T0, T0, $3
add $5, $5, $1, ror #27
add $5, $5, W
ror $2, $2, #2
add $5, $5, T0
')
C F3(B,C,D) = (B&C) | (D & (B|C)) = (B & (C ^ D)) + (C & D)
define(`ROUND3', `
eor T0, $3, $4
add $5, $5, K
and T0, T0, $2
add $5, $5, $1, ror #27
add $5, $5, T0
add $5, $5, W
and T0, $3, $4
ror $2, $2, #2
add $5, $5, T0
')
C void nettle_sha1_compress(uint32_t *state, const uint8_t *input)
.text
.align 2
.LK1:
.int 0x5A827999
.LK2:
.int 0x6ED9EBA1
.LK3:
.int 0x8F1BBCDC
PROLOGUE(nettle_sha1_compress)
push {r4,r5,r6,r7,r8,r10,lr}
sub sp, sp, #64
C Sets SHIFT to 8*low bits of input pointer. Sets up GE flags
C as follows, corresponding to bytes to be used from WPREV
C SHIFT 0 8 16 24
C CPSR.GE 0000 1110 1100 1000
ands SHIFT, INPUT, #3
and INPUT, INPUT, $-4
ldr WPREV, [INPUT]
addne INPUT, INPUT, #4 C Unaligned input
lsl SHIFT, SHIFT, #3
mov T0, #0
movne T0, #-1
IF_LE(` lsl W, T0, SHIFT')
IF_BE(` lsr W, T0, SHIFT')
uadd8 T0, T0, W C Sets APSR.GE bits
C on BE rotate right by 32-SHIFT bits
C because there is no rotate left
IF_BE(` rsb SHIFT, SHIFT, #32')
ldr K, .LK1
ldm STATE, {SA,SB,SC,SD,SE}
LOAD( 0) ROUND1(SA, SB, SC, SD, SE)
LOAD( 1) ROUND1(SE, SA, SB, SC, SD)
LOAD( 2) ROUND1(SD, SE, SA, SB, SC)
LOAD( 3) ROUND1(SC, SD, SE, SA, SB)
LOAD( 4) ROUND1(SB, SC, SD, SE, SA)
LOAD( 5) ROUND1(SA, SB, SC, SD, SE)
LOAD( 6) ROUND1(SE, SA, SB, SC, SD)
LOAD( 7) ROUND1(SD, SE, SA, SB, SC)
LOAD( 8) ROUND1(SC, SD, SE, SA, SB)
LOAD( 9) ROUND1(SB, SC, SD, SE, SA)
LOAD(10) ROUND1(SA, SB, SC, SD, SE)
LOAD(11) ROUND1(SE, SA, SB, SC, SD)
LOAD(12) ROUND1(SD, SE, SA, SB, SC)
LOAD(13) ROUND1(SC, SD, SE, SA, SB)
LOAD(14) ROUND1(SB, SC, SD, SE, SA)
LOAD(15) ROUND1(SA, SB, SC, SD, SE)
EXPN( 0) ROUND1(SE, SA, SB, SC, SD)
EXPN( 1) ROUND1(SD, SE, SA, SB, SC)
EXPN( 2) ROUND1(SC, SD, SE, SA, SB)
EXPN( 3) ROUND1(SB, SC, SD, SE, SA)
ldr K, .LK2
EXPN( 4) ROUND2(SA, SB, SC, SD, SE)
EXPN( 5) ROUND2(SE, SA, SB, SC, SD)
EXPN( 6) ROUND2(SD, SE, SA, SB, SC)
EXPN( 7) ROUND2(SC, SD, SE, SA, SB)
EXPN( 8) ROUND2(SB, SC, SD, SE, SA)
EXPN( 9) ROUND2(SA, SB, SC, SD, SE)
EXPN(10) ROUND2(SE, SA, SB, SC, SD)
EXPN(11) ROUND2(SD, SE, SA, SB, SC)
EXPN(12) ROUND2(SC, SD, SE, SA, SB)
EXPN(13) ROUND2(SB, SC, SD, SE, SA)
EXPN(14) ROUND2(SA, SB, SC, SD, SE)
EXPN(15) ROUND2(SE, SA, SB, SC, SD)
EXPN( 0) ROUND2(SD, SE, SA, SB, SC)
EXPN( 1) ROUND2(SC, SD, SE, SA, SB)
EXPN( 2) ROUND2(SB, SC, SD, SE, SA)
EXPN( 3) ROUND2(SA, SB, SC, SD, SE)
EXPN( 4) ROUND2(SE, SA, SB, SC, SD)
EXPN( 5) ROUND2(SD, SE, SA, SB, SC)
EXPN( 6) ROUND2(SC, SD, SE, SA, SB)
EXPN( 7) ROUND2(SB, SC, SD, SE, SA)
ldr K, .LK3
EXPN( 8) ROUND3(SA, SB, SC, SD, SE)
EXPN( 9) ROUND3(SE, SA, SB, SC, SD)
EXPN(10) ROUND3(SD, SE, SA, SB, SC)
EXPN(11) ROUND3(SC, SD, SE, SA, SB)
EXPN(12) ROUND3(SB, SC, SD, SE, SA)
EXPN(13) ROUND3(SA, SB, SC, SD, SE)
EXPN(14) ROUND3(SE, SA, SB, SC, SD)
EXPN(15) ROUND3(SD, SE, SA, SB, SC)
EXPN( 0) ROUND3(SC, SD, SE, SA, SB)
EXPN( 1) ROUND3(SB, SC, SD, SE, SA)
EXPN( 2) ROUND3(SA, SB, SC, SD, SE)
EXPN( 3) ROUND3(SE, SA, SB, SC, SD)
EXPN( 4) ROUND3(SD, SE, SA, SB, SC)
EXPN( 5) ROUND3(SC, SD, SE, SA, SB)
EXPN( 6) ROUND3(SB, SC, SD, SE, SA)
EXPN( 7) ROUND3(SA, SB, SC, SD, SE)
EXPN( 8) ROUND3(SE, SA, SB, SC, SD)
EXPN( 9) ROUND3(SD, SE, SA, SB, SC)
EXPN(10) ROUND3(SC, SD, SE, SA, SB)
EXPN(11) ROUND3(SB, SC, SD, SE, SA)
ldr K, .LK4
EXPN(12) ROUND2(SA, SB, SC, SD, SE)
EXPN(13) ROUND2(SE, SA, SB, SC, SD)
EXPN(14) ROUND2(SD, SE, SA, SB, SC)
EXPN(15) ROUND2(SC, SD, SE, SA, SB)
EXPN( 0) ROUND2(SB, SC, SD, SE, SA)
EXPN( 1) ROUND2(SA, SB, SC, SD, SE)
EXPN( 2) ROUND2(SE, SA, SB, SC, SD)
EXPN( 3) ROUND2(SD, SE, SA, SB, SC)
EXPN( 4) ROUND2(SC, SD, SE, SA, SB)
EXPN( 5) ROUND2(SB, SC, SD, SE, SA)
EXPN( 6) ROUND2(SA, SB, SC, SD, SE)
EXPN( 7) ROUND2(SE, SA, SB, SC, SD)
EXPN( 8) ROUND2(SD, SE, SA, SB, SC)
EXPN( 9) ROUND2(SC, SD, SE, SA, SB)
EXPN(10) ROUND2(SB, SC, SD, SE, SA)
EXPN(11) ROUND2(SA, SB, SC, SD, SE)
EXPN(12) ROUND2(SE, SA, SB, SC, SD)
EXPN(13) ROUND2(SD, SE, SA, SB, SC)
EXPN(14) ROUND2(SC, SD, SE, SA, SB)
EXPN(15) ROUND2(SB, SC, SD, SE, SA)
C Use registers we no longer need.
ldm STATE, {INPUT,T0,SHIFT,W,K}
add SA, SA, INPUT
add SB, SB, T0
add SC, SC, SHIFT
add SD, SD, W
add SE, SE, K
add sp, sp, #64
stm STATE, {SA,SB,SC,SD,SE}
pop {r4,r5,r6,r7,r8,r10,pc}
EPILOGUE(nettle_sha1_compress)
.LK4:
.int 0xCA62C1D6
C arm/v6/sha256-compress-n.asm
ifelse(`
Copyright (C) 2013, 2022 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "sha256-compress-n.asm"
.arch armv6
define(`STATE', `r0')
define(`K', `r1')
define(`BLOCKS', `r2')
define(`INPUT', `r3')
define(`SA', `r2') C Overlap BLOCKS
define(`SB', `r4')
define(`SC', `r5')
define(`SD', `r6')
define(`SE', `r7')
define(`SF', `r8')
define(`SG', `r10')
define(`SH', `r11')
define(`T0', `r12')
define(`T1', `r3') C Overlap INPUT
define(`COUNT', `r0') C Overlap STATE
define(`W', `r14')
C Used for data load. Must not clobber STATE (r0), K (r1) or INPUT (r3)
define(`I0', `r2')
define(`I1', `r4')
define(`I2', `r5')
define(`I3', `r6')
define(`I4', `r7')
define(`DST', `r8')
define(`SHIFT', `r10')
define(`ILEFT', `r11')
define(`EXPN', `
ldr W, [sp, #+eval(4*$1)]
ldr T0, [sp, #+eval(4*(($1 + 14) % 16))]
ror T1, T0, #17
eor T1, T1, T0, ror #19
eor T1, T1, T0, lsr #10
add W, W, T1
ldr T0, [sp, #+eval(4*(($1 + 9) % 16))]
add W, W, T0
ldr T0, [sp, #+eval(4*(($1 + 1) % 16))]
ror T1, T0, #7
eor T1, T1, T0, ror #18
eor T1, T1, T0, lsr #3
add W, W, T1
str W, [sp, #+eval(4*$1)]
')
C ROUND(A,B,C,D,E,F,G,H)
C
C H += S1(E) + Choice(E,F,G) + K + W
C D += H
C H += S0(A) + Majority(A,B,C)
C
C Where
C
C S1(E) = E<<<26 ^ E<<<21 ^ E<<<7
C S0(A) = A<<<30 ^ A<<<19 ^ A<<<10
C Choice (E, F, G) = G^(E&(F^G))
C Majority (A,B,C) = (A&B) + (C&(A^B))
define(`ROUND', `
ror T0, $5, #6
eor T0, T0, $5, ror #11
eor T0, T0, $5, ror #25
add $8, $8, T0
eor T0, $6, $7
and T0, T0, $5
eor T0, T0, $7
add $8,$8, T0
ldr T0, [K], #+4
add $8, $8, W
add $8, $8, T0
add $4, $4, $8
ror T0, $1, #2
eor T0, T0, $1, ror #13
eor T0, T0, $1, ror #22
add $8, $8, T0
and T0, $1, $2
add $8, $8, T0
eor T0, $1, $2
and T0, T0, $3
add $8, $8, T0
')
define(`NOEXPN', `
ldr W, [sp, + $1]
add $1, $1, #4
')
.text
.align 2
define(`SHIFT_OFFSET', 64)
define(`INPUT_OFFSET', 68)
define(`I0_OFFSET', 72)
define(`STATE_OFFSET', 76)
define(`K_OFFSET', 80)
define(`BLOCKS_OFFSET', 84)
C const uint8_t *
C _nettle_sha256_compress_n(uint32_t *state, const uint32_t *k,
C size_t blocks, const uint8_t *input)
PROLOGUE(_nettle_sha256_compress_n)
cmp BLOCKS, #0
bne .Lwork
mov r0, INPUT
bx lr
.Lwork:
C Also save STATE (r0), K (r1) and BLOCKS (r2)
push {r0,r1,r2,r4,r5,r6,r7,r8,r10,r11,r12,r14}
sub sp, sp, #STATE_OFFSET
C Load data up front, since we don't have enough registers
C to load and shift on-the-fly
ands SHIFT, INPUT, #3
and INPUT, INPUT, $-4
ldr I0, [INPUT]
addne INPUT, INPUT, #4
lsl SHIFT, SHIFT, #3
mov T0, #0
movne T0, #-1
IF_LE(` lsl I1, T0, SHIFT')
IF_BE(` lsr I1, T0, SHIFT')
uadd8 T0, T0, I1 C Sets APSR.GE bits
C on BE rotate right by 32-SHIFT bits
C because there is no rotate left
IF_BE(` rsb SHIFT, SHIFT, #32')
str SHIFT, [sp, #SHIFT_OFFSET]
.Loop_block:
mov DST, sp
mov ILEFT, #4
.Lcopy:
ldm INPUT!, {I1,I2,I3,I4}
sel I0, I0, I1
ror I0, I0, SHIFT
IF_LE(` rev I0, I0')
sel I1, I1, I2
ror I1, I1, SHIFT
IF_LE(` rev I1, I1')
sel I2, I2, I3
ror I2, I2, SHIFT
IF_LE(` rev I2, I2')
sel I3, I3, I4
ror I3, I3, SHIFT
IF_LE(` rev I3, I3')
subs ILEFT, ILEFT, #1
stm DST!, {I0,I1,I2,I3}
mov I0, I4
bne .Lcopy
str INPUT, [sp, #INPUT_OFFSET]
str I0, [sp, #I0_OFFSET]
C Process block, with input at sp, expanded on the fly
ldm STATE, {SA,SB,SC,SD,SE,SF,SG,SH}
mov COUNT,#0
.Loop1:
NOEXPN(COUNT) ROUND(SA,SB,SC,SD,SE,SF,SG,SH)
NOEXPN(COUNT) ROUND(SH,SA,SB,SC,SD,SE,SF,SG)
NOEXPN(COUNT) ROUND(SG,SH,SA,SB,SC,SD,SE,SF)
NOEXPN(COUNT) ROUND(SF,SG,SH,SA,SB,SC,SD,SE)
NOEXPN(COUNT) ROUND(SE,SF,SG,SH,SA,SB,SC,SD)
NOEXPN(COUNT) ROUND(SD,SE,SF,SG,SH,SA,SB,SC)
NOEXPN(COUNT) ROUND(SC,SD,SE,SF,SG,SH,SA,SB)
NOEXPN(COUNT) ROUND(SB,SC,SD,SE,SF,SG,SH,SA)
cmp COUNT,#64
bne .Loop1
mov COUNT, #3
.Loop2:
EXPN( 0) ROUND(SA,SB,SC,SD,SE,SF,SG,SH)
EXPN( 1) ROUND(SH,SA,SB,SC,SD,SE,SF,SG)
EXPN( 2) ROUND(SG,SH,SA,SB,SC,SD,SE,SF)
EXPN( 3) ROUND(SF,SG,SH,SA,SB,SC,SD,SE)
EXPN( 4) ROUND(SE,SF,SG,SH,SA,SB,SC,SD)
EXPN( 5) ROUND(SD,SE,SF,SG,SH,SA,SB,SC)
EXPN( 6) ROUND(SC,SD,SE,SF,SG,SH,SA,SB)
EXPN( 7) ROUND(SB,SC,SD,SE,SF,SG,SH,SA)
EXPN( 8) ROUND(SA,SB,SC,SD,SE,SF,SG,SH)
EXPN( 9) ROUND(SH,SA,SB,SC,SD,SE,SF,SG)
EXPN(10) ROUND(SG,SH,SA,SB,SC,SD,SE,SF)
EXPN(11) ROUND(SF,SG,SH,SA,SB,SC,SD,SE)
EXPN(12) ROUND(SE,SF,SG,SH,SA,SB,SC,SD)
EXPN(13) ROUND(SD,SE,SF,SG,SH,SA,SB,SC)
EXPN(14) ROUND(SC,SD,SE,SF,SG,SH,SA,SB)
subs COUNT, COUNT, #1
EXPN(15) ROUND(SB,SC,SD,SE,SF,SG,SH,SA)
bne .Loop2
ldr STATE, [sp, #STATE_OFFSET]
C No longer needed registers
ldm STATE, {K, T1, T0, W}
add SA, SA, K
add SB, SB, T1
add SC, SC, T0
add SD, SD, W
stm STATE!, {SA,SB,SC,SD}
ldm STATE, {K, T1, T0, W}
add SE, SE, K
add SF, SF, T1
add SG, SG, T0
add SH, SH, W
stm STATE, {SE,SF,SG,SH}
sub STATE, STATE, #16
ldr BLOCKS, [sp, #BLOCKS_OFFSET]
subs BLOCKS, BLOCKS, #1
str BLOCKS, [sp, #BLOCKS_OFFSET]
ldr SHIFT, [sp, #SHIFT_OFFSET]
ldr K, [sp, #K_OFFSET]
ldr INPUT, [sp, #INPUT_OFFSET]
ldr I0, [sp, #I0_OFFSET]
bne .Loop_block
C Restore input pointer adjustment
IF_BE(` rsbs SHIFT, SHIFT, #32')
IF_LE(` cmp SHIFT, #0')
subne INPUT, INPUT, #4
orr r0, INPUT, SHIFT, lsr #3
C Discard saved STATE, K and BLOCKS.
add sp, sp, #STATE_OFFSET + 12
pop {r4,r5,r6,r7,r8,r10,r11,r12,pc}
EPILOGUE(_nettle_sha256_compress_n)
General-purpose Registers[1]
There are thirty-one, 64-bit, general-purpose (integer) registers visible to
the A64 instruction set; these are labeled r0-r30. In a 64-bit context these
registers are normally referred to using the names x0-x30; in a 32-bit context
the registers are specified by using w0-w30. Additionally, a stack-pointer
register, SP, can be used with a restricted number of instructions.
The first eight registers, r0-r7, are used to pass argument values into
a subroutine and to return result values from a function.
Software developers creating platform-independent code are advised to avoid
using r18 if at all possible. Most compilers provide a mechanism to prevent
specific registers from being used for general allocation; portable hand-coded
assembler should avoid it entirely. It should not be assumed that treating the
register as callee-saved will be sufficient to satisfy the requirements of the
platform. Virtualization code must, of course, treat the register as they would
any other resource provided to the virtual machine.
A subroutine invocation must preserve the contents of the registers r19-r29
and SP. All 64 bits of each value stored in r19-r29 must be preserved, even
when using the ILP32 data model.
SIMD and Floating-Point Registers[1]
Unlike in AArch32, in AArch64 the 128-bit and 64-bit views of a SIMD and
Floating-Point register do not overlap multiple registers in a narrower view,
so q1, d1 and s1 all refer to the same entry in the register bank.
The first eight registers, v0-v7, are used to pass argument values into
a subroutine and to return result values from a function. They may also
be used to hold intermediate values within a routine (but, in general,
only between subroutine calls).
Registers v8-v15 must be preserved by a callee across subroutine calls;
the remaining registers (v0-v7, v16-v31) do not need to be preserved
(or should be preserved by the caller). Additionally, only the bottom 64 bits
of each value stored in v8-v15 need to be preserved.
Endianness
Similar to arm, aarch64 can run with little-endian or big-endian memory
accesses. Endianness is handled exclusively on load and store operations.
Register layout and operation behaviour is identical in both modes.
When writing SIMD code, endianness interaction with vector loads and stores may
exhibit seemingly unintuitive behaviour, particularly when mixing normal and
vector load/store operations.
See [2] for a good overview, particularly into the pitfalls of using
ldr/str vs. ld1/st1.
For example, ld1 {v1.2d,v2.2d},[x0] will load v1 and v2 with elements of a
one-dimensional vector from consecutive memory locations. So v1.d[0] will be
read from x0+0, v1.d[1] from x0+8 (bytes) and v2.d[0] from x0+16 and v2.d[1]
from x0+24. That'll be the same in LE and BE mode because it is the structure
of the vector prescribed by the load operation. Endianness will be applied to
the individual doublewords but the order in which they're loaded from memory
and in which they're put into d[0] and d[1] won't change.
Another way is to explicitly load a vector of bytes using ld1 {v1.16b,
v2.16b},[x0]. This will load x0+0 into v1.b[0], x0+1 (byte) into v1.b[1] and so
forth. This load (or store) is endianness-neutral and behaves identical in LE
and BE mode.
Care must however be taken when switching views onto the registers: d[0] is
mapped onto b[0] through b[7] and b[0] will be the least significant byte in
d[0] and b[7] will be MSB. This layout is also the same in both memory
endianness modes. ld1 {v1.16b}, however, will always load a vector of bytes
with eight elements as consecutive bytes from memory into b[0] through b[7].
When accessed trough d[0] this will only appear as the expected
doubleword-sized number if it was indeed stored little-endian in memory.
Something similar happens when loading a vector of doublewords (ld1
{v1.2d},[x0]) and then accessing individual bytes of it. Bytes will only be at
the expected indices if the doublewords are indeed stored in current memory
endianness in memory. Therefore it is most intuitive to use the appropriate
vector element width for the data being loaded or stored to apply the necessary
endianness correction.
Finally, ldr/str are not vector operations. When used to load a 128bit
quadword, they will apply endianness to the whole quadword. Therefore
particular care must be taken if the loaded data is then to be regarded as
elements of e.g. a doubleword vector. Indicies may appear reversed on
big-endian systems (because they are).
Hardware-accelerated SHA Instructions
The SHA optimized cores are implemented using SHA hashing instructions added
to AArch64 in crypto extensions. The repository [3] illustrates using those
instructions for optimizing SHA hashing functions.
[1] https://github.com/ARM-software/abi-aa/releases/download/2020Q4/aapcs64.pdf
[2] https://llvm.org/docs/BigEndianNEON.html
[3] https://github.com/noloader/SHA-Intrinsics
C arm64/chacha-2core.asm
ifelse(`
Copyright (C) 2020 Niels Möller and Torbjörn Granlund
Copyright (C) 2022 Mamone Tarsha
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
C Register usage:
C Argments
define(`DST', `x0')
define(`SRC', `x1')
define(`ROUNDS', `x2')
C Working state
define(`ROT24', `v0')
define(`T0', `v16')
C State, even elements in X, odd elements in Y
define(`X0', `v17')
define(`X1', `v18')
define(`X2', `v19')
define(`X3', `v20')
define(`Y0', `v21')
define(`Y1', `v22')
define(`Y2', `v23')
define(`Y3', `v24')
C Original input state
define(`S0', `v25')
define(`S1', `v26')
define(`S2', `v27')
define(`S3', `v28')
define(`S3p1', `v29')
define(`TMP0', `v30')
define(`TMP1', `v31')
C _chacha_2core(uint32_t *dst, const uint32_t *src, unsigned rounds)
PROLOGUE(_nettle_chacha_2core)
eor X1.16b, X1.16b, X1.16b
mov w3, #1
mov X1.s[0], w3
add x3, SRC, #48
ld1 {X3.4s}, [x3]
add Y3.4s, X3.4s, X1.4s
cmhi Y3.4s, X3.4s, Y3.4s
ext Y3.16b, Y3.16b, Y3.16b, #12
orr Y3.16b, Y3.16b, X1.16b
.Lshared_entry:
adr x3, .Lrot24
ld1 {ROT24.4s},[x3]
add Y3.4s, Y3.4s, X3.4s
C Load state
ld1 {X0.4s,X1.4s,X2.4s}, [SRC]
mov S0.16b, X0.16b
mov S1.16b, X1.16b
mov S2.16b, X2.16b
mov S3.16b, X3.16b
mov S3p1.16b, Y3.16b
trn2 Y0.4s, X0.4s, X0.4s C 1 1 3 3
trn1 X0.4s, X0.4s, X0.4s C 0 0 2 2
trn2 Y1.4s, X1.4s, X1.4s C 5 5 7 7
trn1 X1.4s, X1.4s, X1.4s C 4 4 6 6
trn2 Y2.4s, X2.4s, X2.4s C 9 9 11 11
trn1 X2.4s, X2.4s, X2.4s C 8 8 10 10
trn2 Y3.4s, X3.4s, S3p1.4s C 13 13 15 15
trn1 X3.4s, X3.4s, S3p1.4s C 12 12 14 14
.Loop:
C Register layout (A is first block, B is second block)
C
C X0: A0 B0 A2 B2 Y0: A1 B1 A3 B3
C X1: A4 B4 A6 B6 Y1: A5 B5 A7 B7
C X2: A8 B8 A10 B10 Y2: A9 B9 A11 B11
C X3: A12 B12 A14 B14 Y3: A13 B13 A15 B15
add X0.4s, X0.4s, X1.4s
add Y0.4s, Y0.4s, Y1.4s
eor X3.16b, X3.16b, X0.16b
eor Y3.16b, Y3.16b, Y0.16b
rev32 X3.8h, X3.8h
rev32 Y3.8h, Y3.8h
add X2.4s, X2.4s, X3.4s
add Y2.4s, Y2.4s, Y3.4s
eor TMP0.16b, X1.16b, X2.16b
eor TMP1.16b, Y1.16b, Y2.16b
ushr X1.4s, TMP0.4s, #20
ushr Y1.4s, TMP1.4s, #20
sli X1.4s, TMP0.4s, #12
sli Y1.4s, TMP1.4s, #12
add X0.4s, X0.4s, X1.4s
add Y0.4s, Y0.4s, Y1.4s
eor X3.16b, X3.16b, X0.16b
eor Y3.16b, Y3.16b, Y0.16b
tbl X3.16b, {X3.16b}, ROT24.16b
tbl Y3.16b, {Y3.16b}, ROT24.16b
add X2.4s, X2.4s, X3.4s
add Y2.4s, Y2.4s, Y3.4s
eor TMP0.16b, X1.16b, X2.16b
eor TMP1.16b, Y1.16b, Y2.16b
ushr X1.4s, TMP0.4s, #25
ushr Y1.4s, TMP1.4s, #25
sli X1.4s, TMP0.4s, #7
sli Y1.4s, TMP1.4s, #7
ext X1.16b, X1.16b, X1.16b, #8
ext X2.16b, X2.16b, X2.16b, #8
ext Y2.16b, Y2.16b, Y2.16b, #8
ext Y3.16b, Y3.16b, Y3.16b, #8
C Register layout:
C X0: A0 B0 A2 B2 Y0: A1 B1 A3 B3
C Y1: A5 B5 A7 B7 X1: A6 B6 A4 B4 (X1 swapped)
C X2: A10 B10 A8 B8 Y2: A11 A11 A9 B9 (X2, Y2 swapped)
C Y3 A15 B15 A13 B13 X3 A12 B12 A14 B14 (Y3 swapped)
add X0.4s, X0.4s, Y1.4s
add Y0.4s, Y0.4s, X1.4s
eor Y3.16b, Y3.16b, X0.16b
eor X3.16b, X3.16b, Y0.16b
rev32 Y3.8h, Y3.8h
rev32 X3.8h, X3.8h
add X2.4s, X2.4s, Y3.4s
add Y2.4s, Y2.4s, X3.4s
eor TMP0.16b, Y1.16b, X2.16b
eor TMP1.16b, X1.16b, Y2.16b
ushr Y1.4s, TMP0.4s, #20
ushr X1.4s, TMP1.4s, #20
sli Y1.4s, TMP0.4s, #12
sli X1.4s, TMP1.4s, #12
add X0.4s, X0.4s, Y1.4s
add Y0.4s, Y0.4s, X1.4s
eor Y3.16b, Y3.16b, X0.16b
eor X3.16b, X3.16b, Y0.16b
tbl Y3.16b, {Y3.16b}, ROT24.16b
tbl X3.16b, {X3.16b}, ROT24.16b
add X2.4s, X2.4s, Y3.4s
add Y2.4s, Y2.4s, X3.4s
eor TMP0.16b, Y1.16b, X2.16b
eor TMP1.16b, X1.16b, Y2.16b
ushr Y1.4s, TMP0.4s, #25
ushr X1.4s, TMP1.4s, #25
sli Y1.4s, TMP0.4s, #7
sli X1.4s, TMP1.4s, #7
ext X1.16b, X1.16b, X1.16b, #8
ext X2.16b, X2.16b, X2.16b, #8
ext Y2.16b, Y2.16b, Y2.16b, #8
ext Y3.16b, Y3.16b, Y3.16b, #8
subs ROUNDS, ROUNDS, #2
b.ne .Loop
trn1 T0.4s, X0.4s, Y0.4s
trn2 Y0.4s, X0.4s, Y0.4s
trn1 X0.4s, X1.4s, Y1.4s
trn2 Y1.4s, X1.4s, Y1.4s
trn1 X1.4s, X2.4s, Y2.4s
trn2 Y2.4s, X2.4s, Y2.4s
trn1 X2.4s, X3.4s, Y3.4s
trn2 Y3.4s, X3.4s, Y3.4s
add T0.4s, T0.4s, S0.4s
add Y0.4s, Y0.4s, S0.4s
add X0.4s, X0.4s, S1.4s
add Y1.4s, Y1.4s, S1.4s
add X1.4s, X1.4s, S2.4s
add Y2.4s, Y2.4s, S2.4s
add X2.4s, X2.4s, S3.4s
add Y3.4s, Y3.4s, S3p1.4s
st1 {T0.16b,X0.16b,X1.16b,X2.16b}, [DST], #64
st1 {Y0.16b,Y1.16b,Y2.16b,Y3.16b}, [DST]
ret
EPILOGUE(_nettle_chacha_2core)
PROLOGUE(_nettle_chacha_2core32)
eor Y3.16b, Y3.16b, Y3.16b C {0,0,...,0}
mov w3, #1
mov Y3.s[0], w3 C {1,0,...,0}
add x3, SRC, #48
ld1 {X3.4s}, [x3]
b .Lshared_entry
EPILOGUE(_nettle_chacha_2core32)
.align 4
.Lrot24: .long 0x02010003,0x06050407,0x0a09080b,0x0e0d0c0f
C arm64/chacha-4core.asm
ifelse(`
Copyright (C) 2020 Niels Möller and Torbjörn Granlund
Copyright (C) 2022 Mamone Tarsha
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
C Register usage:
C Argments
define(`DST', `x0')
define(`SRC', `x1')
define(`ROUNDS', `x2')
C Working state
C During the loop, used to save the original values for last 4 words
C of each block. Also used as temporaries for transpose.
define(`T0', `v0')
define(`T1', `v1')
define(`T2', `v2')
define(`T3', `v3')
define(`TMP0', `v4')
define(`TMP1', `v5')
define(`TMP2', `v6')
define(`TMP3', `v7')
define(`ROT24', `v8')
C A workaround for expanding multiple digits of argument references to QR macro which is incompatible with POSIX
C See https://www.gnu.org/software/m4/manual/html_node/Arguments.html
define(`P1',
`ifelse($1, 0, v16, $1, 1, v17, $1, 2, v18, $1, 3, v19, $1, 4, v20, $1, 5, v21, $1, 6, v22, $1, 7, v23, $1, 8, v24, $1, 9, v25, $1, 10, v26, $1, 11, v27, $1, 12, v28, $1, 13, v29, $1, 14, v30, $1, 15, v31)')
define(`P2',
`ifelse($1, 0, v16, $1, 1, v21, $1, 2, v26, $1, 3, v31, $1, 4, v20, $1, 5, v25, $1, 6, v30, $1, 7, v19, $1, 8, v24, $1, 9, v29, $1, 10, v18, $1, 11, v23, $1, 12, v28, $1, 13, v17, $1, 14, v22, $1, 15, v27)')
C Main loop for round
define(`QR',`
add $1(0).4s, $1(0).4s, $1(1).4s
add $1(4).4s, $1(4).4s, $1(5).4s
add $1(8).4s, $1(8).4s, $1(9).4s
add $1(12).4s, $1(12).4s, $1(13).4s
eor $1(3).16b, $1(3).16b, $1(0).16b
eor $1(7).16b, $1(7).16b, $1(4).16b
eor $1(11).16b, $1(11).16b, $1(8).16b
eor $1(15).16b, $1(15).16b, $1(12).16b
rev32 $1(3).8h, $1(3).8h
rev32 $1(7).8h, $1(7).8h
rev32 $1(11).8h, $1(11).8h
rev32 $1(15).8h, $1(15).8h
add $1(2).4s, $1(2).4s, $1(3).4s
add $1(6).4s, $1(6).4s, $1(7).4s
add $1(10).4s, $1(10).4s, $1(11).4s
add $1(14).4s, $1(14).4s, $1(15).4s
eor TMP0.16b, $1(1).16b, $1(2).16b
eor TMP1.16b, $1(5).16b, $1(6).16b
eor TMP2.16b, $1(9).16b, $1(10).16b
eor TMP3.16b, $1(13).16b, $1(14).16b
ushr $1(1).4s, TMP0.4s, #20
ushr $1(5).4s, TMP1.4s, #20
ushr $1(9).4s, TMP2.4s, #20
ushr $1(13).4s, TMP3.4s, #20
sli $1(1).4s, TMP0.4s, #12
sli $1(5).4s, TMP1.4s, #12
sli $1(9).4s, TMP2.4s, #12
sli $1(13).4s, TMP3.4s, #12
add $1(0).4s, $1(0).4s, $1(1).4s
add $1(4).4s, $1(4).4s, $1(5).4s
add $1(8).4s, $1(8).4s, $1(9).4s
add $1(12).4s, $1(12).4s, $1(13).4s
eor $1(3).16b, $1(3).16b, $1(0).16b
eor $1(7).16b, $1(7).16b, $1(4).16b
eor $1(11).16b, $1(11).16b, $1(8).16b
eor $1(15).16b, $1(15).16b, $1(12).16b
tbl $1(3).16b, {$1(3).16b}, ROT24.16b
tbl $1(7).16b, {$1(7).16b}, ROT24.16b
tbl $1(11).16b, {$1(11).16b}, ROT24.16b
tbl $1(15).16b, {$1(15).16b}, ROT24.16b
add $1(2).4s, $1(2).4s, $1(3).4s
add $1(6).4s, $1(6).4s, $1(7).4s
add $1(10).4s, $1(10).4s, $1(11).4s
add $1(14).4s, $1(14).4s, $1(15).4s
eor TMP0.16b, $1(1).16b, $1(2).16b
eor TMP1.16b, $1(5).16b, $1(6).16b
eor TMP2.16b, $1(9).16b, $1(10).16b
eor TMP3.16b, $1(13).16b, $1(14).16b
ushr $1(1).4s, TMP0.4s, #25
ushr $1(5).4s, TMP1.4s, #25
ushr $1(9).4s, TMP2.4s, #25
ushr $1(13).4s, TMP3.4s, #25
sli $1(1).4s, TMP0.4s, #7
sli $1(5).4s, TMP1.4s, #7
sli $1(9).4s, TMP2.4s, #7
sli $1(13).4s, TMP3.4s, #7
')
define(`TRANSPOSE',`
zip1 T0.4s, $1.4s, $3.4s C A0 A2 B0 B2
zip1 T1.4s, $2.4s, $4.4s C A1 A3 B1 B3
zip2 T2.4s, $1.4s, $3.4s C C0 C2 D0 D2
zip2 T3.4s, $2.4s, $4.4s C C1 C3 D1 D3
zip1 $1.4s, T0.4s, T1.4s C A0 A1 A2 A3
zip2 $2.4s, T0.4s, T1.4s C B0 B1 B2 B3
zip1 $3.4s, T2.4s, T3.4s C C0 C2 C1 C3
zip2 $4.4s, T2.4s, T3.4s C D0 D1 D2 D3
')
C _chacha_4core(uint32_t *dst, const uint32_t *src, unsigned rounds)
PROLOGUE(_nettle_chacha_4core)
mov w3, #1
dup TMP2.4s, w3 C Apply counter carries
.Lshared_entry:
C Save callee-save registers
fmov x3, d8
adr x4, .Lcnts
ld1 {TMP3.4s,ROT24.4s},[x4]
C Load state and splat
ld1 {v16.4s,v17.4s,v18.4s,v19.4s}, [SRC]
dup v20.4s, v16.s[1]
dup v24.4s, v16.s[2]
dup v28.4s, v16.s[3]
dup v16.4s, v16.s[0]
dup v21.4s, v17.s[1]
dup v25.4s, v17.s[2]
dup v29.4s, v17.s[3]
dup v17.4s, v17.s[0]
dup v22.4s, v18.s[1]
dup v26.4s, v18.s[2]
dup v30.4s, v18.s[3]
dup v18.4s, v18.s[0]
dup v23.4s, v19.s[1]
dup v27.4s, v19.s[2]
dup v31.4s, v19.s[3]
dup v19.4s, v19.s[0]
add v19.4s, v19.4s, TMP3.4s C low adds
cmhi TMP1.4s, TMP3.4s, v19.4s C compute carry-out
and TMP1.16b, TMP1.16b, TMP2.16b C discard carries for 32-bit counter variant
add v23.4s, v23.4s, TMP1.4s C apply carries
C Save all 4x4 of the last words.
mov T0.16b, v19.16b
mov T1.16b, v23.16b
mov T2.16b, v27.16b
mov T3.16b, v31.16b
.Loop:
QR(`P1')
QR(`P2')
subs ROUNDS, ROUNDS, #2
b.ne .Loop
C Add in saved original words, including counters, before
C transpose.
add v19.4s, v19.4s, T0.4s
add v23.4s, v23.4s, T1.4s
add v27.4s, v27.4s, T2.4s
add v31.4s, v31.4s, T3.4s
TRANSPOSE(v16, v20,v24, v28)
TRANSPOSE(v17, v21, v25, v29)
TRANSPOSE(v18, v22, v26, v30)
TRANSPOSE(v19, v23, v27, v31)
ld1 {T0.4s,T1.4s,T2.4s}, [SRC]
add v16.4s, v16.4s, T0.4s
add v20.4s, v20.4s, T0.4s
add v24.4s, v24.4s, T0.4s
add v28.4s, v28.4s, T0.4s
add v17.4s, v17.4s, T1.4s
add v21.4s, v21.4s, T1.4s
add v25.4s, v25.4s, T1.4s
add v29.4s, v29.4s, T1.4s
add v18.4s, v18.4s, T2.4s
add v22.4s, v22.4s, T2.4s
add v26.4s, v26.4s, T2.4s
add v30.4s, v30.4s, T2.4s
st1 {v16.16b,v17.16b,v18.16b,v19.16b}, [DST], #64
st1 {v20.16b,v21.16b,v22.16b,v23.16b}, [DST], #64
st1 {v24.16b,v25.16b,v26.16b,v27.16b}, [DST], #64
st1 {v28.16b,v29.16b,v30.16b,v31.16b}, [DST]
C Restore callee-save registers
fmov d8, x3
ret
EPILOGUE(_nettle_chacha_4core)
PROLOGUE(_nettle_chacha_4core32)
eor TMP2.16b, TMP2.16b, TMP2.16b C Ignore counter carries
b .Lshared_entry
EPILOGUE(_nettle_chacha_4core32)
.align 4
.Lcnts: .long 0,1,2,3 C increments
.Lrot24: .long 0x02010003,0x06050407,0x0a09080b,0x0e0d0c0f
C arm64/chacha-core-internal.asm
ifelse(`
Copyright (C) 2020 Niels Möller and Torbjörn Granlund
Copyright (C) 2022 Mamone Tarsha
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
C Register usage:
C Argments
define(`DST', `x0')
define(`SRC', `x1')
define(`ROUNDS', `x2')
C Working state
define(`X0', `v0')
define(`X1', `v1')
define(`X2', `v2')
define(`X3', `v3')
C Original input state
define(`S0', `v4')
define(`S1', `v5')
define(`S2', `v6')
define(`S3', `v7')
define(`ROT24', `v16')
define(`TMP', `v17')
C QROUND(X0, X1, X2, X3)
define(`QROUND', `
C x0 += x1, x3 ^= x0, x3 lrot 16
C x2 += x3, x1 ^= x2, x1 lrot 12
C x0 += x1, x3 ^= x0, x3 lrot 8
C x2 += x3, x1 ^= x2, x1 lrot 7
add $1.4s, $1.4s, $2.4s
eor $4.16b, $4.16b, $1.16b
rev32 $4.8h, $4.8h
add $3.4s, $3.4s, $4.4s
eor TMP.16b, $2.16b, $3.16b
ushr $2.4s, TMP.4s, #20
sli $2.4s, TMP.4s, #12
add $1.4s, $1.4s, $2.4s
eor $4.16b, $4.16b, $1.16b
tbl $4.16b, {$4.16b}, ROT24.16b
add $3.4s, $3.4s, $4.4s
eor TMP.16b, $2.16b, $3.16b
ushr $2.4s, TMP.4s, #25
sli $2.4s, TMP.4s, #7
')
.text
C _chacha_core(uint32_t *dst, const uint32_t *src, unsigned rounds)
PROLOGUE(_nettle_chacha_core)
adr x3, .Lrot24
ld1 {ROT24.4s},[x3]
ld1 {X0.4s,X1.4s,X2.4s,X3.4s}, [SRC]
mov S0.16b, X0.16b
mov S1.16b, X1.16b
mov S2.16b, X2.16b
mov S3.16b, X3.16b
.Loop:
QROUND(X0, X1, X2, X3)
C Rotate rows, to get
C 0 1 2 3
C 5 6 7 4 <<< 1
C 10 11 8 9 <<< 2
C 15 12 13 14 <<< 3
ext X1.16b, X1.16b, X1.16b, #4
ext X2.16b, X2.16b, X2.16b, #8
ext X3.16b, X3.16b, X3.16b, #12
QROUND(X0, X1, X2, X3)
ext X1.16b, X1.16b, X1.16b, #12
ext X2.16b, X2.16b, X2.16b, #8
ext X3.16b, X3.16b, X3.16b, #4
subs ROUNDS, ROUNDS, #2
b.ne .Loop
add X0.4s, X0.4s, S0.4s
add X1.4s, X1.4s, S1.4s
add X2.4s, X2.4s, S2.4s
add X3.4s, X3.4s, S3.4s
st1 {X0.16b,X1.16b,X2.16b,X3.16b}, [DST]
ret
EPILOGUE(_nettle_chacha_core)
.align 4
.Lrot24: .long 0x02010003,0x06050407,0x0a09080b,0x0e0d0c0f
C arm64/crypto/aes128-decrypt.asm
ifelse(`
Copyright (C) 2021 Mamone Tarsha
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "aes128-decrypt.asm"
.arch armv8-a+crypto
.text
C Register usage:
define(`KEYS', `x0')
define(`LENGTH', `x1')
define(`DST', `x2')
define(`SRC', `x3')
define(`S0', `v0')
define(`S1', `v1')
define(`S2', `v2')
define(`S3', `v3')
define(`K0', `v16')
define(`K1', `v17')
define(`K2', `v18')
define(`K3', `v19')
define(`K4', `v20')
define(`K5', `v21')
define(`K6', `v22')
define(`K7', `v23')
define(`K8', `v24')
define(`K9', `v25')
define(`K10', `v26')
C void
C aes128_decrypt(const struct aes128_ctx *ctx,
C size_t length, uint8_t *dst,
C const uint8_t *src)
PROLOGUE(nettle_aes128_decrypt)
ld1 {K0.4s,K1.4s,K2.4s,K3.4s},[KEYS],#64
ld1 {K4.4s,K5.4s,K6.4s,K7.4s},[KEYS],#64
ld1 {K8.4s,K9.4s,K10.4s},[KEYS]
ands x4,LENGTH,#-64
b.eq L1B
L4B_loop:
ld1 {S0.16b,S1.16b,S2.16b,S3.16b},[SRC],#64
AESD_ROUND_4B(S0,S1,S2,S3,K10)
AESD_ROUND_4B(S0,S1,S2,S3,K9)
AESD_ROUND_4B(S0,S1,S2,S3,K8)
AESD_ROUND_4B(S0,S1,S2,S3,K7)
AESD_ROUND_4B(S0,S1,S2,S3,K6)
AESD_ROUND_4B(S0,S1,S2,S3,K5)
AESD_ROUND_4B(S0,S1,S2,S3,K4)
AESD_ROUND_4B(S0,S1,S2,S3,K3)
AESD_ROUND_4B(S0,S1,S2,S3,K2)
AESD_LAST_ROUND_4B(S0,S1,S2,S3,K1,K0)
st1 {S0.16b,S1.16b,S2.16b,S3.16b},[DST],#64
subs x4,x4,#64
b.ne L4B_loop
and LENGTH,LENGTH,#63
L1B:
cbz LENGTH,Ldone
L1B_loop:
ld1 {S0.16b},[SRC],#16
AESD_ROUND_1B(S0,K10)
AESD_ROUND_1B(S0,K9)
AESD_ROUND_1B(S0,K8)
AESD_ROUND_1B(S0,K7)
AESD_ROUND_1B(S0,K6)
AESD_ROUND_1B(S0,K5)
AESD_ROUND_1B(S0,K4)
AESD_ROUND_1B(S0,K3)
AESD_ROUND_1B(S0,K2)
AESD_LAST_ROUND_1B(S0,K1,K0)
st1 {S0.16b},[DST],#16
subs LENGTH,LENGTH,#16
b.ne L1B_loop
Ldone:
ret
EPILOGUE(nettle_aes128_decrypt)
C arm64/crypto/aes128-encrypt.asm
ifelse(`
Copyright (C) 2021 Mamone Tarsha
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "aes128-encrypt.asm"
.arch armv8-a+crypto
.text
C Register usage:
define(`KEYS', `x0')
define(`LENGTH', `x1')
define(`DST', `x2')
define(`SRC', `x3')
define(`S0', `v0')
define(`S1', `v1')
define(`S2', `v2')
define(`S3', `v3')
define(`K0', `v16')
define(`K1', `v17')
define(`K2', `v18')
define(`K3', `v19')
define(`K4', `v20')
define(`K5', `v21')
define(`K6', `v22')
define(`K7', `v23')
define(`K8', `v24')
define(`K9', `v25')
define(`K10', `v26')
C void
C aes128_encrypt(const struct aes128_ctx *ctx,
C size_t length, uint8_t *dst,
C const uint8_t *src)
PROLOGUE(nettle_aes128_encrypt)
ld1 {K0.4s,K1.4s,K2.4s,K3.4s},[KEYS],#64
ld1 {K4.4s,K5.4s,K6.4s,K7.4s},[KEYS],#64
ld1 {K8.4s,K9.4s,K10.4s},[KEYS]
ands x4,LENGTH,#-64
b.eq L1B
L4B_loop:
ld1 {S0.16b,S1.16b,S2.16b,S3.16b},[SRC],#64
AESE_ROUND_4B(S0,S1,S2,S3,K0)
AESE_ROUND_4B(S0,S1,S2,S3,K1)
AESE_ROUND_4B(S0,S1,S2,S3,K2)
AESE_ROUND_4B(S0,S1,S2,S3,K3)
AESE_ROUND_4B(S0,S1,S2,S3,K4)
AESE_ROUND_4B(S0,S1,S2,S3,K5)
AESE_ROUND_4B(S0,S1,S2,S3,K6)
AESE_ROUND_4B(S0,S1,S2,S3,K7)
AESE_ROUND_4B(S0,S1,S2,S3,K8)
AESE_LAST_ROUND_4B(S0,S1,S2,S3,K9,K10)
st1 {S0.16b,S1.16b,S2.16b,S3.16b},[DST],#64
subs x4,x4,#64
b.ne L4B_loop
and LENGTH,LENGTH,#63
L1B:
cbz LENGTH,Ldone
L1B_loop:
ld1 {S0.16b},[SRC],#16
AESE_ROUND_1B(S0,K0)
AESE_ROUND_1B(S0,K1)
AESE_ROUND_1B(S0,K2)
AESE_ROUND_1B(S0,K3)
AESE_ROUND_1B(S0,K4)
AESE_ROUND_1B(S0,K5)
AESE_ROUND_1B(S0,K6)
AESE_ROUND_1B(S0,K7)
AESE_ROUND_1B(S0,K8)
AESE_LAST_ROUND_1B(S0,K9,K10)
st1 {S0.16b},[DST],#16
subs LENGTH,LENGTH,#16
b.ne L1B_loop
Ldone:
ret
EPILOGUE(nettle_aes128_encrypt)
C arm64/crypto/aes192-decrypt.asm
ifelse(`
Copyright (C) 2021 Mamone Tarsha
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "aes192-decrypt.asm"
.arch armv8-a+crypto
.text
C Register usage:
define(`KEYS', `x0')
define(`LENGTH', `x1')
define(`DST', `x2')
define(`SRC', `x3')
define(`S0', `v0')
define(`S1', `v1')
define(`S2', `v2')
define(`S3', `v3')
define(`K0', `v16')
define(`K1', `v17')
define(`K2', `v18')
define(`K3', `v19')
define(`K4', `v20')
define(`K5', `v21')
define(`K6', `v22')
define(`K7', `v23')
define(`K8', `v24')
define(`K9', `v25')
define(`K10', `v26')
define(`K11', `v27')
define(`K12', `v28')
C void
C aes192_decrypt(const struct aes192_ctx *ctx,
C size_t length, uint8_t *dst,
C const uint8_t *src)
PROLOGUE(nettle_aes192_decrypt)
ld1 {K0.4s,K1.4s,K2.4s,K3.4s},[KEYS],#64
ld1 {K4.4s,K5.4s,K6.4s,K7.4s},[KEYS],#64
ld1 {K8.4s,K9.4s,K10.4s,K11.4s},[KEYS],#64
ld1 {K12.4s},[KEYS]
ands x4,LENGTH,#-64
b.eq L1B
L4B_loop:
ld1 {S0.16b,S1.16b,S2.16b,S3.16b},[SRC],#64
AESD_ROUND_4B(S0,S1,S2,S3,K12)
AESD_ROUND_4B(S0,S1,S2,S3,K11)
AESD_ROUND_4B(S0,S1,S2,S3,K10)
AESD_ROUND_4B(S0,S1,S2,S3,K9)
AESD_ROUND_4B(S0,S1,S2,S3,K8)
AESD_ROUND_4B(S0,S1,S2,S3,K7)
AESD_ROUND_4B(S0,S1,S2,S3,K6)
AESD_ROUND_4B(S0,S1,S2,S3,K5)
AESD_ROUND_4B(S0,S1,S2,S3,K4)
AESD_ROUND_4B(S0,S1,S2,S3,K3)
AESD_ROUND_4B(S0,S1,S2,S3,K2)
AESD_LAST_ROUND_4B(S0,S1,S2,S3,K1,K0)
st1 {S0.16b,S1.16b,S2.16b,S3.16b},[DST],#64
subs x4,x4,#64
b.ne L4B_loop
and LENGTH,LENGTH,#63
L1B:
cbz LENGTH,Ldone
L1B_loop:
ld1 {S0.16b},[SRC],#16
AESD_ROUND_1B(S0,K12)
AESD_ROUND_1B(S0,K11)
AESD_ROUND_1B(S0,K10)
AESD_ROUND_1B(S0,K9)
AESD_ROUND_1B(S0,K8)
AESD_ROUND_1B(S0,K7)
AESD_ROUND_1B(S0,K6)
AESD_ROUND_1B(S0,K5)
AESD_ROUND_1B(S0,K4)
AESD_ROUND_1B(S0,K3)
AESD_ROUND_1B(S0,K2)
AESD_LAST_ROUND_1B(S0,K1,K0)
st1 {S0.16b},[DST],#16
subs LENGTH,LENGTH,#16
b.ne L1B_loop
Ldone:
ret
EPILOGUE(nettle_aes192_decrypt)
C arm64/crypto/aes192-encrypt.asm
ifelse(`
Copyright (C) 2021 Mamone Tarsha
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "aes192-encrypt.asm"
.arch armv8-a+crypto
.text
C Register usage:
define(`KEYS', `x0')
define(`LENGTH', `x1')
define(`DST', `x2')
define(`SRC', `x3')
define(`S0', `v0')
define(`S1', `v1')
define(`S2', `v2')
define(`S3', `v3')
define(`K0', `v16')
define(`K1', `v17')
define(`K2', `v18')
define(`K3', `v19')
define(`K4', `v20')
define(`K5', `v21')
define(`K6', `v22')
define(`K7', `v23')
define(`K8', `v24')
define(`K9', `v25')
define(`K10', `v26')
define(`K11', `v27')
define(`K12', `v28')
C void
C aes192_encrypt(const struct aes192_ctx *ctx,
C size_t length, uint8_t *dst,
C const uint8_t *src)
PROLOGUE(nettle_aes192_encrypt)
ld1 {K0.4s,K1.4s,K2.4s,K3.4s},[KEYS],#64
ld1 {K4.4s,K5.4s,K6.4s,K7.4s},[KEYS],#64
ld1 {K8.4s,K9.4s,K10.4s,K11.4s},[KEYS],#64
ld1 {K12.4s},[KEYS]
ands x4,LENGTH,#-64
b.eq L1B
L4B_loop:
ld1 {S0.16b,S1.16b,S2.16b,S3.16b},[SRC],#64
AESE_ROUND_4B(S0,S1,S2,S3,K0)
AESE_ROUND_4B(S0,S1,S2,S3,K1)
AESE_ROUND_4B(S0,S1,S2,S3,K2)
AESE_ROUND_4B(S0,S1,S2,S3,K3)
AESE_ROUND_4B(S0,S1,S2,S3,K4)
AESE_ROUND_4B(S0,S1,S2,S3,K5)
AESE_ROUND_4B(S0,S1,S2,S3,K6)
AESE_ROUND_4B(S0,S1,S2,S3,K7)
AESE_ROUND_4B(S0,S1,S2,S3,K8)
AESE_ROUND_4B(S0,S1,S2,S3,K9)
AESE_ROUND_4B(S0,S1,S2,S3,K10)
AESE_LAST_ROUND_4B(S0,S1,S2,S3,K11,K12)
st1 {S0.16b,S1.16b,S2.16b,S3.16b},[DST],#64
subs x4,x4,#64
b.ne L4B_loop
and LENGTH,LENGTH,#63
L1B:
cbz LENGTH,Ldone
L1B_loop:
ld1 {S0.16b},[SRC],#16
AESE_ROUND_1B(S0,K0)
AESE_ROUND_1B(S0,K1)
AESE_ROUND_1B(S0,K2)
AESE_ROUND_1B(S0,K3)
AESE_ROUND_1B(S0,K4)
AESE_ROUND_1B(S0,K5)
AESE_ROUND_1B(S0,K6)
AESE_ROUND_1B(S0,K7)
AESE_ROUND_1B(S0,K8)
AESE_ROUND_1B(S0,K9)
AESE_ROUND_1B(S0,K10)
AESE_LAST_ROUND_1B(S0,K11,K12)
st1 {S0.16b},[DST],#16
subs LENGTH,LENGTH,#16
b.ne L1B_loop
Ldone:
ret
EPILOGUE(nettle_aes192_encrypt)
C arm64/crypto/aes256-decrypt.asm
ifelse(`
Copyright (C) 2021 Mamone Tarsha
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "aes256-decrypt.asm"
.arch armv8-a+crypto
.text
C Register usage:
define(`KEYS', `x0')
define(`LENGTH', `x1')
define(`DST', `x2')
define(`SRC', `x3')
define(`S0', `v0')
define(`S1', `v1')
define(`S2', `v2')
define(`S3', `v3')
define(`K0', `v16')
define(`K1', `v17')
define(`K2', `v18')
define(`K3', `v19')
define(`K4', `v20')
define(`K5', `v21')
define(`K6', `v22')
define(`K7', `v23')
define(`K8', `v24')
define(`K9', `v25')
define(`K10', `v26')
define(`K11', `v27')
define(`K12', `v28')
define(`K13', `v29')
define(`K14', `v30')
C void
C aes256_decrypt(const struct aes256_ctx *ctx,
C size_t length, uint8_t *dst,
C const uint8_t *src)
PROLOGUE(nettle_aes256_decrypt)
ld1 {K0.4s,K1.4s,K2.4s,K3.4s},[KEYS],#64
ld1 {K4.4s,K5.4s,K6.4s,K7.4s},[KEYS],#64
ld1 {K8.4s,K9.4s,K10.4s,K11.4s},[KEYS],#64
ld1 {K12.4s,K13.4s,K14.4s},[KEYS]
ands x4,LENGTH,#-64
b.eq L1B
L4B_loop:
ld1 {S0.16b,S1.16b,S2.16b,S3.16b},[SRC],#64
AESD_ROUND_4B(S0,S1,S2,S3,K14)
AESD_ROUND_4B(S0,S1,S2,S3,K13)
AESD_ROUND_4B(S0,S1,S2,S3,K12)
AESD_ROUND_4B(S0,S1,S2,S3,K11)
AESD_ROUND_4B(S0,S1,S2,S3,K10)
AESD_ROUND_4B(S0,S1,S2,S3,K9)
AESD_ROUND_4B(S0,S1,S2,S3,K8)
AESD_ROUND_4B(S0,S1,S2,S3,K7)
AESD_ROUND_4B(S0,S1,S2,S3,K6)
AESD_ROUND_4B(S0,S1,S2,S3,K5)
AESD_ROUND_4B(S0,S1,S2,S3,K4)
AESD_ROUND_4B(S0,S1,S2,S3,K3)
AESD_ROUND_4B(S0,S1,S2,S3,K2)
AESD_LAST_ROUND_4B(S0,S1,S2,S3,K1,K0)
st1 {S0.16b,S1.16b,S2.16b,S3.16b},[DST],#64
subs x4,x4,#64
b.ne L4B_loop
and LENGTH,LENGTH,#63
L1B:
cbz LENGTH,Ldone
L1B_loop:
ld1 {S0.16b},[SRC],#16
AESD_ROUND_1B(S0,K14)
AESD_ROUND_1B(S0,K13)
AESD_ROUND_1B(S0,K12)
AESD_ROUND_1B(S0,K11)
AESD_ROUND_1B(S0,K10)
AESD_ROUND_1B(S0,K9)
AESD_ROUND_1B(S0,K8)
AESD_ROUND_1B(S0,K7)
AESD_ROUND_1B(S0,K6)
AESD_ROUND_1B(S0,K5)
AESD_ROUND_1B(S0,K4)
AESD_ROUND_1B(S0,K3)
AESD_ROUND_1B(S0,K2)
AESD_LAST_ROUND_1B(S0,K1,K0)
st1 {S0.16b},[DST],#16
subs LENGTH,LENGTH,#16
b.ne L1B_loop
Ldone:
ret
EPILOGUE(nettle_aes256_decrypt)