Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • nettle/nettle
  • briansmith/nettle
  • ajlawrence/nettle
  • mhoffmann/nettle
  • devnexen/nettle
  • wiml/nettle
  • lumag/nettle
  • michaelweiser/nettle
  • aberaud/nettle
  • mamonet/nettle
  • npocs/nettle
  • babelouest/nettle
  • ueno/nettle
  • rth/nettle
  • justus/nettle
15 results
Show changes
Showing
with 2129 additions and 0 deletions
C arm/ecc-secp256r1-redc.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "ecc-secp256r1-redc.asm"
.arm
define(`RP', `r1') C Overlaps T1 below
define(`XP', `r2')
define(`T0', `r0') C Overlaps unused modulo argument
define(`T1', `r1')
define(`T2', `r3')
define(`T3', `r4')
define(`T4', `r5')
define(`T5', `r6')
define(`T6', `r7')
define(`T7', `r8')
define(`F0', `r10')
define(`F1', `r11')
define(`F2', `r12')
define(`F3', `lr')
C ecc_secp256r1_redc (const struct ecc_modulo *m, mp_limb_t *rp)
.text
.align 2
PROLOGUE(_nettle_ecc_secp256r1_redc)
C Pushes RP last
push {r1, r4,r5,r6,r7,r8,r10,r11,lr}
ldm XP!, {T0,T1,T2,T3,T4,T5,T6,T7}
C Set <F3,F2,F1> to the high 4 limbs of (B^2-B+1)<T2,T1,T0>
C T2 T1
C T2 T1 T0
C - T2 T1 T0
C -------------
C F3 F2 F1 F0
adds F1, T0, T2
adcs F2, T1, #0
adc F3, T2, #0
subs F0, T1, T0
sbcs F1, F1, T1 C Could also be rsc ?
sbcs F2, F2, T2
sbc F3, F3, #0
C Add:
C T10 T9 T8 T7 T6 T5 T4 T3
C + F3 F2 F1 F0 T0 T2 T1 T0
C --------------------------
C T7 T6 T5 T4 T3 T2 T1 T0
adds T3, T3, T0
adcs T1, T4, T1
adcs T2, T5, T2
adcs T6, T6, T0
mov T0, T3 C FIXME: Be more clever?
mov T3, T6
adcs T4, T7, F0
ldm XP!, {T5,T6,T7}
adcs T5, T5, F1
adcs T6, T6, F2
adcs T7, T7, F3
C New F3, F2, F1, F0, also adding in carry
adcs F1, T0, T2
adcs F2, T1, #0
adc F3, T2, #0
subs F0, T1, T0
sbcs F1, F1, T1 C Could also be rsc ?
sbcs F2, F2, T2
sbc F3, F3, #0
C Start adding
adds T3, T3, T0
adcs T1, T4, T1
adcs T2, T5, T2
adcs T6, T6, T0
mov T0, T3 C FIXME: Be more clever?
mov T3, T6
adcs T4, T7, F0
ldm XP!, {T5,T6,T7}
adcs T5, T5, F1
adcs T6, T6, F2
adcs T7, T7, F3
C Final iteration, eliminate only T0, T1
C Set <F2, F1, F0> to the high 3 limbs of (B^2-B+1)<T1,T0>
C T1 T0 T1
C - T1 T0
C -------------
C F2 F1 F0
C First add in carry
adcs F1, T0, #0
adcs F2, T1, #0
subs F0, T1, T0
sbcs F1, F1, T1
sbc F2, F2, #0
C Add:
C T9 T8 T7 T6 T5 T4 T3 T2
C + F2 F1 F0 T0 0 T1 T0 0
C --------------------------
C F2 F1 T7 T6 T5 T4 T3 T2
adds T3, T3, T0
adcs T4, T4, T1
adcs T5, T5, #0
adcs T6, T6, T0
adcs T7, T7, F0
ldm XP!, {T0, T1}
mov F3, #0
adcs F1, F1, T0
adcs F2, F2, T1
C Sum is < B^8 + p, so it's enough to fold carry once,
C If carry, add in
C B^7 - B^6 - B^3 + 1 = <0, B-2, B-1, B-1, B-1, 0, 0, 1>
C Mask from carry flag, leaving carry intact
adc F3, F3, #0
rsb F3, F3, #0
pop {XP} C Original RP
adcs T0, T2, #0
adcs T1, T3, #0
adcs T2, T4, #0
adcs T3, T5, F3
adcs T4, T6, F3
adcs T5, T7, F3
and F3, F3, #-2
adcs T6, F1, F3
adcs T7, F2, #0
stm XP, {T0,T1,T2,T3,T4,T5,T6,T7}
pop {r4,r5,r6,r7,r8,r10,r11,pc}
EPILOGUE(_nettle_ecc_secp256r1_redc)
C arm/ecc-secp384r1-modp.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "ecc-secp384r1-modp.asm"
.arm
define(`RP', `r1')
define(`XP', `r2')
define(`T0', `r0')
define(`T1', `r3')
define(`T2', `r4')
define(`T3', `r5')
define(`F0', `r6')
define(`F1', `r7')
define(`F2', `r8')
define(`F3', `r10')
define(`F4', `r11')
define(`N', `r12')
define(`H', `lr')
C ecc_secp384r1_modp (const struct ecc_modulo *m, mp_limb_t *rp)
.text
.align 2
PROLOGUE(_nettle_ecc_secp384r1_modp)
push {r4,r5,r6,r7,r8,r10,r11,lr}
add XP, XP, #80
ldm XP, {T0, T1, T2, T3} C 20-23
C First get top 4 limbs, which need folding twice, as
C
C T3 T2 T1 T0
C T3 T2 T1
C -T3
C ----------------
C F4 F3 F2 F1 F0
C
C Start with
C
C T3 T1 T0
C T1
C -T3
C -----------
C F2 F1 F0 Always fits
adds F0, T0, T1
adcs F1, T1, #0
adcs F2, T3, #0
subs F0, F0, T3
sbcs F1, F1, #0
sbcs F2, F2, #0
C T3 T2 T2 0
C F2 F1 F0
C ----------------
C F4 F3 F2 F1 F0
mov F4, #0
adds F1, F1, T2
adcs F2, F2, T2
adcs F3, T3, #0
adcs F4, F4, #0
C Add in to high part
sub XP, XP, #32
ldm XP, {T0, T1, T2, T3} C 12-15
mov H, #0
adds F0, T0, F0
adcs F1, T1, F1
adcs F2, T2, F2
adcs F3, T3, F3
adcs F4, F4, #0 C Do F4 later
C Add to low part, keeping carry (positive or negative) in H
sub XP, XP, #48
ldm XP, {T0, T1, T2, T3} C 0-3
mov H, #0
adds T0, T0, F0
adcs T1, T1, F1
adcs T2, T2, F2
adcs T3, T3, F3
adc H, H, #0
subs T1, T1, F0
sbcs T2, T2, F1
sbcs T3, T3, F2
sbc H, H, #0
adds T3, T3, F0
adc H, H, #0
stm XP!, {T0,T1,T2,T3} C 0-3
mov N, #2
.Loop:
ldm XP, {T0,T1,T2,T3} C 4-7
C First, propagate carry
adds T0, T0, H
asr H, #31 C Sign extend
adcs T1, T1, H
adcs T2, T2, H
adcs T3, T3, H
adc H, H, #0
C +B^4 term
adds T0, T0, F0
adcs T1, T1, F1
adcs T2, T2, F2
adcs T3, T3, F3
adc H, H, #0
C +B^3 terms
ldr F0, [XP, #+48] C 16
adds T0, T0, F1
adcs T1, T1, F2
adcs T2, T2, F3
adcs T3, T3, F0
adc H, H, #0
C -B
ldr F1, [XP, #+52] C 17-18
ldr F2, [XP, #+56]
subs T0, T0, F3
sbcs T1, T1, F0
sbcs T2, T2, F1
sbcs T3, T3, F2
sbcs H, H, #0
C +1
ldr F3, [XP, #+60] C 19
adds T0, T0, F0
adcs T1, T1, F1
adcs T2, T2, F2
adcs T3, T3, F3
adc H, H, #0
subs N, N, #1
stm XP!, {T0,T1,T2,T3}
bne .Loop
C Fold high limbs, we need to add in
C
C F4 F4 0 -F4 F4 H H 0 -H H
C
C We always have F4 >= 0, but we can have H < 0.
C Sign extension gets tricky when F4 = 0 and H < 0.
sub XP, XP, #48
ldm XP, {T0,T1,T2,T3} C 0-3
C H H 0 -H H
C ----------------
C S H F3 F2 F1 F0
C
C Define S = H >> 31 (asr), we then have
C
C F0 = H
C F1 = S - H
C F2 = - [H > 0]
C F3 = H - [H > 0]
C H = H + S
C
C And we get underflow in S - H iff H > 0
C H = 0 H > 0 H = -1
mov F0, H C 0 H -1
asr H, #31
subs F1, H, F0 C 0,C=1 -H,C=0 0,C=1
sbc F2, F2, F2 C 0 -1 0
sbc F3, F0, #0 C 0 H-1 -1
adds T0, T0, F0
adcs T1, T1, F1
adcs T2, T2, F2
adcs T3, T3, F3
adc H, H, F0 C 0+cy H+cy -2+cy
stm XP!, {T0,T1,T2,T3} C 0-3
ldm XP, {T0,T1,T2,T3} C 4-7
C F4 0 -F4
C ---------
C F3 F2 F1
rsbs F1, F4, #0
sbc F2, F2, F2
sbc F3, F4, #0
C Sign extend H
adds F0, F4, H
asr H, H, #31
adcs F1, F1, H
adcs F2, F2, H
adcs F3, F3, H
adcs F4, F4, H
adc H, H, #0
adds T0, T0, F0
adcs T1, T1, F1
adcs T2, T2, F2
adcs T3, T3, F3
stm XP!, {T0,T1,T2,T3} C 4-7
ldm XP, {T0,T1,T2,T3} C 8-11
adcs T0, T0, F4
adcs T1, T1, H
adcs T2, T2, H
adcs T3, T3, H
adc H, H, #0
stm XP, {T0,T1,T2,T3} C 8-11
C Final (unlikely) carry
sub XP, XP, #32
ldm XP!, {T0,T1,T2,T3} C 0-3
C Fold H into F0-F4
mov F0, H
asr H, #31
subs F1, H, F0
sbc F2, F2, F2
sbc F3, F0, #0
add F4, F0, H
adds T0, T0, F0
adcs T1, T1, F1
adcs T2, T2, F2
adcs T3, T3, F3
stm RP!, {T0,T1,T2,T3} C 0-3
ldm XP!, {T0,T1,T2,T3} C 4-7
adcs T0, T0, F4
adcs T1, T1, H
adcs T2, T2, H
adcs T3, T3, H
stm RP!, {T0,T1,T2,T3} C 4-7
ldm XP, {T0,T1,T2,T3} C 8-11
adcs T0, T0, H
adcs T1, T1, H
adcs T2, T2, H
adcs T3, T3, H
stm RP, {T0,T1,T2,T3} C 8-11
pop {r4,r5,r6,r7,r8,r10,r11,pc}
EPILOGUE(_nettle_ecc_secp384r1_modp)
C arm/ecc-secp521r1-modp.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "ecc-secp521r1-modp.asm"
.arm
define(`HP', `r0')
define(`RP', `r1')
define(`XP', `r2')
define(`T0', `r3')
define(`T1', `r4')
define(`T2', `r5')
define(`F0', `r6')
define(`F1', `r7')
define(`F2', `r8')
define(`F3', `r10')
define(`H', `r12')
define(`N', `lr')
C ecc_secp521r1_modp (const struct ecc_modulo *m, mp_limb_t *rp)
.text
.Lc511:
.int 511
.align 2
PROLOGUE(_nettle_ecc_secp521r1_modp)
push {r4,r5,r6,r7,r8,r10,lr}
C Use that B^17 = 2^23 (mod p)
ldr F3, [XP, #+68] C 17
add HP, XP, #72 C 18
ldr T0, [XP] C 0
adds T0, T0, F3, lsl #23
str T0, [XP], #+4
mov N, #5
C 5 iterations, reading limbs 18-20, 21-23, 24-26, 27-29, 30-32
C and adding to limbs 1-3, 4-6, 7-9, 19-12, 13-15
.Loop:
ldm XP, {T0,T1,T2} C 1+3*k -- 3+3*k
lsr F0, F3, #9
ldm HP!, {F1,F2,F3} C 18+3*k -- 20+3*k
orr F0, F0, F1, lsl #23
lsr F1, F1, #9
orr F1, F1, F2, lsl #23
lsr F2, F2, #9
orr F2, F2, F3, lsl #23
adcs T0, T0, F0
adcs T1, T1, F1
adcs T2, T2, F2
sub N, N, #1
stm XP!,{T0,T1,T2}
teq N, #0
bne .Loop
ldr F0, [XP], #-64 C 16
ldr F1, [HP] C 33
ldr T0, .Lc511
C Handling of high limbs
C F0 = rp[16] + carry in + F3 >> 9
adcs F0, F0, F3, lsr #9
C Copy low 9 bits to H, then shift right including carry
and H, F0, T0
mov F0, F0, rrx
lsr F0, F0, #8
C Add in F1 = rp[33], with weight 2^1056 = 2^14
adds F0, F0, F1, lsl #14
lsr F1, F1, #18
adc F1, F1, #0
ldm XP!, {T0, T1} C 0-1
adds T0, T0, F0
adcs T1, T1, F1
stm RP!, {T0, T1}
ldm XP!, {T0,T1,T2,F0,F1,F2,F3} C 2-8
adcs T0, T0, #0
adcs T1, T1, #0
adcs T2, T2, #0
adcs F0, F0, #0
adcs F1, F1, #0
adcs F2, F2, #0
adcs F3, F3, #0
stm RP!, {T0,T1,T2,F0,F1,F2,F3} C 2-8
ldm XP, {T0,T1,T2,F0,F1,F2,F3} C 9-15
adcs T0, T0, #0
adcs T1, T1, #0
adcs T2, T2, #0
adcs F0, F0, #0
adcs F1, F1, #0
adcs F2, F2, #0
adcs F3, F3, #0
adcs H, H, #0
stm RP, {T0,T1,T2,F0,F1,F2,F3,H} C 9-16
pop {r4,r5,r6,r7,r8,r10,pc}
EPILOGUE(_nettle_ecc_secp521r1_modp)
C arm/fat/aes-decrypt-internal-2.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
define(`fat_transform', `$1_armv6')
include_src(`arm/v6/aes-decrypt-internal.asm')
C arm/fat/aes-decrypt-internal.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
define(`fat_transform', `$1_arm')
include_src(`arm/aes-decrypt-internal.asm')
C arm/fat/aes-encrypt-internal-2.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
define(`fat_transform', `$1_armv6')
include_src(`arm/v6/aes-encrypt-internal.asm')
C arm/fat/aes-encrypt-internal.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
define(`fat_transform', `$1_arm')
include_src(`arm/aes-encrypt-internal.asm')
C arm/fat/chacha-3core.asm
ifelse(`
Copyright (C) 2020 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
dnl PROLOGUE(_nettle_fat_chacha_3core) picked up by configure
include_src(`arm/neon/chacha-3core.asm')
C arm/fat/salsa20-2core.asm
ifelse(`
Copyright (C) 2020 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
dnl PROLOGUE(_nettle_fat_salsa20_2core) picked up by configure
include_src(`arm/neon/salsa20-2core.asm')
C arm/fat/sha1-compress-2.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
dnl PROLOGUE(nettle_sha1_compress) picked up by configure
define(`fat_transform', `_$1_armv6')
include_src(`arm/v6/sha1-compress.asm')
C arm/fat/sha256-compress-n-2.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
dnl PROLOGUE(_nettle_sha256_compress_n) picked up by configure
define(`fat_transform', `$1_armv6')
include_src(`arm/v6/sha256-compress-n.asm')
C arm/fat/sha3-permute-2.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
dnl PROLOGUE(_nettle_sha3_permute) picked up by configure
define(`fat_transform', `_$1_neon')
include_src(`arm/neon/sha3-permute.asm')
C arm/fat/sha3-compress-2.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
dnl PROLOGUE(_nettle_sha512_compress) picked up by configure
define(`fat_transform', `$1_neon')
include_src(`arm/neon/sha512-compress.asm')
C arm/fat/umac-nh-2.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
dnl PROLOGUE(_nettle_umac_nh) picked up by configure
define(`fat_transform', `$1_neon')
include_src(`arm/neon/umac-nh.asm')
C arm/fat/umac-nh-n-2.asm
ifelse(`
Copyright (C) 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
dnl PROLOGUE(_nettle_umac_nh_n) picked up by configure
define(`fat_transform', `$1_neon')
include_src(`arm/neon/umac-nh-n.asm')
define(`QREG', `ifelse(
$1, d0, q0,
$1, d2, q1,
$1, d4, q2,
$1, d6, q3,
$1, d8, q4,
$1, d10, q5,
$1, d12, q6,
$1, d14, q7,
$1, d16, q8,
$1, d18, q9,
$1, d20, q10,
$1, d22, q11,
$1, d24, q12,
$1, d26, q13,
$1, d28, q14,
$1, d30, q15,
`NO REGISTER')')dnl
define(`D0REG', `ifelse(
$1, q0, d0,
$1, q1, d2,
$1, q2, d4,
$1, q3, d6,
$1, q4, d8,
$1, q5, d10,
$1, q6, d12,
$1, q7, d14,
$1, q8, d16,
$1, q9, d18,
$1, q10, d20,
$1, q11, d22,
$1, q12, d24,
$1, q13, d26,
$1, q14, d28,
$1, q15, d30,
`NO REGISTER')')dnl
define(`D1REG', `ifelse(
$1, q0, d1,
$1, q1, d3,
$1, q2, d5,
$1, q3, d7,
$1, q4, d9,
$1, q5, d11,
$1, q6, d13,
$1, q7, d15,
$1, q8, d17,
$1, q9, d19,
$1, q10, d21,
$1, q11, d23,
$1, q12, d25,
$1, q13, d27,
$1, q14, d29,
$1, q15, d31,
`NO REGISTER')')dnl
C arm/memxor.asm
ifelse(`
Copyright (C) 2013 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
C Possible speedups:
C
C The ldm instruction can do load two registers per cycle,
C if the address is two-word aligned. Or three registers in two
C cycles, regardless of alignment.
C Register usage:
define(`DST', `r0')
define(`SRC', `r1')
define(`N', `r2')
define(`CNT', `r6')
define(`TNC', `r12')
C little-endian and big-endian need to shift in different directions for
C alignment correction
define(`S0ADJ', IF_LE(`lsr', `lsl'))
define(`S1ADJ', IF_LE(`lsl', `lsr'))
.syntax unified
.file "memxor.asm"
.text
.arm
C memxor(void *dst, const void *src, size_t n)
.align 4
PROLOGUE(nettle_memxor)
cmp N, #0
beq .Lmemxor_done
cmp N, #7
bcs .Lmemxor_large
C Simple byte loop
.Lmemxor_bytes:
ldrb r3, [SRC], #+1
ldrb r12, [DST]
eor r3, r12
strb r3, [DST], #+1
subs N, #1
bne .Lmemxor_bytes
.Lmemxor_done:
bx lr
.Lmemxor_align_loop:
ldrb r3, [SRC], #+1
ldrb r12, [DST]
eor r3, r12
strb r3, [DST], #+1
sub N, #1
.Lmemxor_large:
tst DST, #3
bne .Lmemxor_align_loop
C We have at least 4 bytes left to do here.
sub N, #4
ands r3, SRC, #3
beq .Lmemxor_same
C Different alignment case.
C v original SRC
C +-------+------+
C |SRC |SRC+4 |
C +---+---+------+
C |DST |
C +-------+
C
C With little-endian, we need to do
C DST[i] ^= (SRC[i] >> CNT) ^ (SRC[i+1] << TNC)
C With big-endian, we need to do
C DST[i] ^= (SRC[i] << CNT) ^ (SRC[i+1] >> TNC)
push {r4,r5,r6}
lsl CNT, r3, #3
bic SRC, #3
rsb TNC, CNT, #32
ldr r4, [SRC], #+4
tst N, #4
itet eq
moveq r5, r4
subne N, #4
beq .Lmemxor_odd
.Lmemxor_word_loop:
ldr r5, [SRC], #+4
ldr r3, [DST]
eor r3, r3, r4, S0ADJ CNT
eor r3, r3, r5, S1ADJ TNC
str r3, [DST], #+4
.Lmemxor_odd:
ldr r4, [SRC], #+4
ldr r3, [DST]
eor r3, r3, r5, S0ADJ CNT
eor r3, r3, r4, S1ADJ TNC
str r3, [DST], #+4
subs N, #8
bcs .Lmemxor_word_loop
adds N, #8
beq .Lmemxor_odd_done
C We have TNC/8 left-over bytes in r4, high end on LE and low end on
C BE, excess bits to be discarded by alignment adjustment at the other
S0ADJ r4, CNT
C now byte-aligned at low end on LE and high end on BE
ldr r3, [DST]
eor r3, r4
pop {r4,r5,r6}
C Store bytes, one by one.
.Lmemxor_leftover:
C bring uppermost byte down for saving while preserving lower ones
IF_BE(` ror r3, #24')
strb r3, [DST], #+1
subs N, #1
beq .Lmemxor_done
subs TNC, #8
C bring down next byte, no need to preserve
IF_LE(` lsr r3, #8')
bne .Lmemxor_leftover
b .Lmemxor_bytes
.Lmemxor_odd_done:
pop {r4,r5,r6}
bx lr
.Lmemxor_same:
push {r4,r5,r6,r7,r8,r10,r11,r14} C lr is the link register
subs N, #8
bcc .Lmemxor_same_end
ldmia SRC!, {r3, r4, r5}
C Keep address for loads in r14
mov r14, DST
ldmia r14!, {r6, r7, r8}
subs N, #12
eor r10, r3, r6
eor r11, r4, r7
eor r12, r5, r8
bcc .Lmemxor_same_final_store
subs N, #12
ldmia r14!, {r6, r7, r8}
bcc .Lmemxor_same_wind_down
C 6 cycles per iteration, 0.50 cycles/byte. For this speed,
C loop starts at offset 0x11c in the object file.
.Lmemxor_same_loop:
C r10-r12 contains values to be stored at DST
C r6-r8 contains values read from r14, in advance
ldmia SRC!, {r3, r4, r5}
subs N, #12
stmia DST!, {r10, r11, r12}
eor r10, r3, r6
eor r11, r4, r7
eor r12, r5, r8
ldmia r14!, {r6, r7, r8}
bcs .Lmemxor_same_loop
.Lmemxor_same_wind_down:
C Wind down code
ldmia SRC!, {r3, r4, r5}
stmia DST!, {r10, r11, r12}
eor r10, r3, r6
eor r11, r4, r7
eor r12, r5, r8
.Lmemxor_same_final_store:
stmia DST!, {r10, r11, r12}
.Lmemxor_same_end:
C We have 0-11 bytes left to do, and N holds number of bytes -12.
adds N, #4
bcc .Lmemxor_same_lt_8
C Do 8 bytes more, leftover is in N
ldmia SRC!, {r3, r4}
ldmia DST, {r6, r7}
eor r3, r6
eor r4, r7
stmia DST!, {r3, r4}
pop {r4,r5,r6,r7,r8,r10,r11,r14}
beq .Lmemxor_done
b .Lmemxor_bytes
.Lmemxor_same_lt_8:
pop {r4,r5,r6,r7,r8,r10,r11,r14}
adds N, #4
bcc .Lmemxor_same_lt_4
ldr r3, [SRC], #+4
ldr r12, [DST]
eor r3, r12
str r3, [DST], #+4
beq .Lmemxor_done
b .Lmemxor_bytes
.Lmemxor_same_lt_4:
adds N, #4
beq .Lmemxor_done
b .Lmemxor_bytes
EPILOGUE(nettle_memxor)
C arm/memxor3.asm
ifelse(`
Copyright (C) 2013, 2015 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
C Possible speedups:
C
C The ldm instruction can do load two registers per cycle,
C if the address is two-word aligned. Or three registers in two
C cycles, regardless of alignment.
C Register usage:
define(`DST', `r0')
define(`AP', `r1')
define(`BP', `r2')
define(`N', `r3')
C Temporaries r4-r7
define(`ACNT', `r8')
define(`ATNC', `r10')
define(`BCNT', `r11')
define(`BTNC', `r12')
C little-endian and big-endian need to shift in different directions for
C alignment correction
define(`S0ADJ', IF_LE(`lsr', `lsl'))
define(`S1ADJ', IF_LE(`lsl', `lsr'))
.syntax unified
.file "memxor3.asm"
.text
.arm
C memxor3(void *dst, const void *a, const void *b, size_t n)
.align 2
PROLOGUE(nettle_memxor3)
cmp N, #0
beq .Lmemxor3_ret
push {r4,r5,r6,r7,r8,r10,r11}
cmp N, #7
add AP, N
add BP, N
add DST, N
bcs .Lmemxor3_large
C Simple byte loop
.Lmemxor3_bytes:
ldrb r4, [AP, #-1]!
ldrb r5, [BP, #-1]!
eor r4, r5
strb r4, [DST, #-1]!
subs N, #1
bne .Lmemxor3_bytes
.Lmemxor3_done:
pop {r4,r5,r6,r7,r8,r10,r11}
.Lmemxor3_ret:
bx lr
.Lmemxor3_align_loop:
ldrb r4, [AP, #-1]!
ldrb r5, [BP, #-1]!
eor r5, r4
strb r5, [DST, #-1]!
sub N, #1
.Lmemxor3_large:
tst DST, #3
bne .Lmemxor3_align_loop
C We have at least 4 bytes left to do here.
sub N, #4
ands ACNT, AP, #3
lsl ACNT, #3
beq .Lmemxor3_a_aligned
ands BCNT, BP, #3
lsl BCNT, #3
bne .Lmemxor3_uu
C Swap
mov r4, AP
mov AP, BP
mov BP, r4
.Lmemxor3_au:
C NOTE: We have the relevant shift count in ACNT, not BCNT
C AP is aligned, BP is not
C v original SRC
C +-------+------+
C |SRC-4 |SRC |
C +---+---+------+
C |DST-4 |
C +-------+
C
C With little-endian, we need to do
C DST[i-i] ^= (SRC[i-i] >> CNT) ^ (SRC[i] << TNC)
C With big-endian, we need to do
C DST[i-i] ^= (SRC[i-i] << CNT) ^ (SRC[i] >> TNC)
rsb ATNC, ACNT, #32
bic BP, #3
ldr r4, [BP]
tst N, #4
itet eq
moveq r5, r4
subne N, #4
beq .Lmemxor3_au_odd
.Lmemxor3_au_loop:
ldr r5, [BP, #-4]!
ldr r6, [AP, #-4]!
eor r6, r6, r4, S1ADJ ATNC
eor r6, r6, r5, S0ADJ ACNT
str r6, [DST, #-4]!
.Lmemxor3_au_odd:
ldr r4, [BP, #-4]!
ldr r6, [AP, #-4]!
eor r6, r6, r5, S1ADJ ATNC
eor r6, r6, r4, S0ADJ ACNT
str r6, [DST, #-4]!
subs N, #8
bcs .Lmemxor3_au_loop
adds N, #8
beq .Lmemxor3_done
C Leftover bytes in r4, low end on LE and high end on BE before
C preparatory alignment correction
ldr r5, [AP, #-4]
eor r4, r5, r4, S1ADJ ATNC
C now byte-aligned in high end on LE and low end on BE because we're
C working downwards in saving the very first bytes of the buffer
.Lmemxor3_au_leftover:
C Store a byte at a time
C bring uppermost byte down for saving while preserving lower ones
IF_LE(` ror r4, #24')
strb r4, [DST, #-1]!
subs N, #1
beq .Lmemxor3_done
subs ACNT, #8
C bring down next byte, no need to preserve
IF_BE(` lsr r4, #8')
sub AP, #1
bne .Lmemxor3_au_leftover
b .Lmemxor3_bytes
.Lmemxor3_a_aligned:
ands ACNT, BP, #3
lsl ACNT, #3
bne .Lmemxor3_au ;
C a, b and dst all have the same alignment.
subs N, #8
bcc .Lmemxor3_aligned_word_end
C This loop runs at 8 cycles per iteration. It has been
C observed running at only 7 cycles, for this speed, the loop
C started at offset 0x2ac in the object file.
C FIXME: consider software pipelining, similarly to the memxor
C loop.
.Lmemxor3_aligned_word_loop:
ldmdb AP!, {r4,r5,r6}
ldmdb BP!, {r7,r8,r10}
subs N, #12
eor r4, r7
eor r5, r8
eor r6, r10
stmdb DST!, {r4, r5,r6}
bcs .Lmemxor3_aligned_word_loop
.Lmemxor3_aligned_word_end:
C We have 0-11 bytes left to do, and N holds number of bytes -12.
adds N, #4
bcc .Lmemxor3_aligned_lt_8
C Do 8 bytes more, leftover is in N
ldmdb AP!, {r4, r5}
ldmdb BP!, {r6, r7}
eor r4, r6
eor r5, r7
stmdb DST!, {r4,r5}
beq .Lmemxor3_done
b .Lmemxor3_bytes
.Lmemxor3_aligned_lt_8:
adds N, #4
bcc .Lmemxor3_aligned_lt_4
ldr r4, [AP,#-4]!
ldr r5, [BP,#-4]!
eor r4, r5
str r4, [DST,#-4]!
beq .Lmemxor3_done
b .Lmemxor3_bytes
.Lmemxor3_aligned_lt_4:
adds N, #4
beq .Lmemxor3_done
b .Lmemxor3_bytes
.Lmemxor3_uu:
cmp ACNT, BCNT
bic AP, #3
bic BP, #3
rsb ATNC, ACNT, #32
bne .Lmemxor3_uud
C AP and BP are unaligned in the same way
ldr r4, [AP]
ldr r6, [BP]
eor r4, r6
tst N, #4
itet eq
moveq r5, r4
subne N, #4
beq .Lmemxor3_uu_odd
.Lmemxor3_uu_loop:
ldr r5, [AP, #-4]!
ldr r6, [BP, #-4]!
eor r5, r6
S1ADJ r4, ATNC
eor r4, r4, r5, S0ADJ ACNT
str r4, [DST, #-4]!
.Lmemxor3_uu_odd:
ldr r4, [AP, #-4]!
ldr r6, [BP, #-4]!
eor r4, r6
S1ADJ r5, ATNC
eor r5, r5, r4, S0ADJ ACNT
str r5, [DST, #-4]!
subs N, #8
bcs .Lmemxor3_uu_loop
adds N, #8
beq .Lmemxor3_done
C Leftover bytes in r4, low end on LE and high end on BE before
C preparatory alignment correction
IF_LE(` ror r4, ACNT')
IF_BE(` ror r4, ATNC')
C now byte-aligned in high end on LE and low end on BE because we're
C working downwards in saving the very first bytes of the buffer
.Lmemxor3_uu_leftover:
C bring uppermost byte down for saving while preserving lower ones
IF_LE(` ror r4, #24')
strb r4, [DST, #-1]!
subs N, #1
beq .Lmemxor3_done
subs ACNT, #8
C bring down next byte, no need to preserve
IF_BE(` lsr r4, #8')
bne .Lmemxor3_uu_leftover
b .Lmemxor3_bytes
.Lmemxor3_uud:
C Both AP and BP unaligned, and in different ways
rsb BTNC, BCNT, #32
ldr r4, [AP]
ldr r6, [BP]
tst N, #4
ittet eq
moveq r5, r4
moveq r7, r6
subne N, #4
beq .Lmemxor3_uud_odd
.Lmemxor3_uud_loop:
ldr r5, [AP, #-4]!
ldr r7, [BP, #-4]!
S1ADJ r4, ATNC
eor r4, r4, r6, S1ADJ BTNC
eor r4, r4, r5, S0ADJ ACNT
eor r4, r4, r7, S0ADJ BCNT
str r4, [DST, #-4]!
.Lmemxor3_uud_odd:
ldr r4, [AP, #-4]!
ldr r6, [BP, #-4]!
S1ADJ r5, ATNC
eor r5, r5, r7, S1ADJ BTNC
eor r5, r5, r4, S0ADJ ACNT
eor r5, r5, r6, S0ADJ BCNT
str r5, [DST, #-4]!
subs N, #8
bcs .Lmemxor3_uud_loop
adds N, #8
beq .Lmemxor3_done
C FIXME: More clever left-over handling? For now, just adjust pointers.
add AP, AP, ACNT, lsr #3
add BP, BP, BCNT, lsr #3
b .Lmemxor3_bytes
EPILOGUE(nettle_memxor3)
C arm/neon/chacha-3core.asm
ifelse(`
Copyright (C) 2020 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "chacha-3core.asm"
.fpu neon
define(`DST', `r0')
define(`SRC', `r1')
define(`ROUNDS', `r2')
define(`SRCp32', `r3')
C State, X, Y and Z representing consecutive blocks
define(`X0', `q0')
define(`X1', `q1')
define(`X2', `q2')
define(`X3', `q3')
define(`Y0', `q8')
define(`Y1', `q9')
define(`Y2', `q10')
define(`Y3', `q11')
define(`Z0', `q12')
define(`Z1', `q13')
define(`Z2', `q14')
define(`Z3', `q15')
define(`T0', `q4')
define(`T1', `q5')
define(`T2', `q6')
define(`T3', `q7')
.text
.align 4
.Lcount1:
.int 1,0,0,0
C _chacha_3core(uint32_t *dst, const uint32_t *src, unsigned rounds)
PROLOGUE(_nettle_chacha_3core)
C loads using vld1.32 to be endianness-neutral wrt consecutive 32-bit words
add SRCp32, SRC, #32
vld1.32 {X0,X1}, [SRC]
vld1.32 {X2,X3}, [SRCp32]
vpush {q4,q5,q6,q7}
adr r12, .Lcount1
vld1.32 {Z3}, [r12]
vadd.i64 Y3, X3, Z3 C Increment 64-bit counter
vadd.i64 Z3, Y3, Z3
.Lshared_entry:
vmov Y0, X0
vmov Z0, X0
vmov Y1, X1
vmov Z1, X1
vmov Y2, X2
vmov Z2, X2
C Save initial values for the words including the counters.
vmov T2, Y3
vmov T3, Z3
.Loop:
C Interleave three blocks. Note that with this scheduling,
C only two temporaries, T0 and T1, are needed.
vadd.i32 X0, X0, X1
veor X3, X3, X0
vadd.i32 Y0, Y0, Y1
vrev32.16 X3, X3 C lrot 16
veor Y3, Y3, Y0
vadd.i32 Z0, Z0, Z1
vadd.i32 X2, X2, X3
vrev32.16 Y3, Y3 C lrot 16
veor Z3, Z3, Z0
veor T0, X1, X2
vadd.i32 Y2, Y2, Y3
vrev32.16 Z3, Z3 C lrot 16
vshl.i32 X1, T0, #12
veor T1, Y1, Y2
vadd.i32 Z2, Z2, Z3
vsri.u32 X1, T0, #20
vshl.i32 Y1, T1, #12
veor T0, Z1, Z2
vadd.i32 X0, X0, X1
vsri.u32 Y1, T1, #20
vshl.i32 Z1, T0, #12
veor T1, X3, X0
vadd.i32 Y0, Y0, Y1
vsri.u32 Z1, T0, #20
vshl.i32 X3, T1, #8
veor T0, Y3, Y0
vadd.i32 Z0, Z0, Z1
vsri.u32 X3, T1, #24
vshl.i32 Y3, T0, #8
veor T1, Z3, Z0
vadd.i32 X2, X2, X3
vsri.u32 Y3, T0, #24
vext.32 X3, X3, X3, #3
vshl.i32 Z3, T1, #8
veor T0, X1, X2
vadd.i32 Y2, Y2, Y3
vsri.u32 Z3, T1, #24
vext.32 Y3, Y3, Y3, #3
vshl.i32 X1, T0, #7
veor T1, Y1, Y2
vadd.i32 Z2, Z2, Z3
vsri.u32 X1, T0, #25
vshl.i32 Y1, T1, #7
veor T0, Z1, Z2
vext.32 X1, X1, X1, #1
vsri.u32 Y1, T1, #25
vshl.i32 Z1, T0, #7
vext.32 Y2, Y2, Y2, #2
vext.32 Y1, Y1, Y1, #1
vsri.u32 Z1, T0, #25
vext.32 X2, X2, X2, #2
C Second QROUND
vadd.i32 X0, X0, X1
vext.32 Z2, Z2, Z2, #2
vext.32 Z1, Z1, Z1, #1
veor X3, X3, X0
vadd.i32 Y0, Y0, Y1
vext.32 Z3, Z3, Z3, #3
vrev32.16 X3, X3 C lrot 16
veor Y3, Y3, Y0
vadd.i32 Z0, Z0, Z1
vadd.i32 X2, X2, X3
vrev32.16 Y3, Y3 C lrot 16
veor Z3, Z3, Z0
veor T0, X1, X2
vadd.i32 Y2, Y2, Y3
vrev32.16 Z3, Z3 C lrot 16
vshl.i32 X1, T0, #12
veor T1, Y1, Y2
vadd.i32 Z2, Z2, Z3
vsri.u32 X1, T0, #20
vshl.i32 Y1, T1, #12
veor T0, Z1, Z2
vadd.i32 X0, X0, X1
vsri.u32 Y1, T1, #20
vshl.i32 Z1, T0, #12
veor T1, X3, X0
vadd.i32 Y0, Y0, Y1
vsri.u32 Z1, T0, #20
vshl.i32 X3, T1, #8
veor T0, Y3, Y0
vadd.i32 Z0, Z0, Z1
vsri.u32 X3, T1, #24
vshl.i32 Y3, T0, #8
veor T1, Z3, Z0
vadd.i32 X2, X2, X3
vsri.u32 Y3, T0, #24
vext.32 X3, X3, X3, #1
vshl.i32 Z3, T1, #8
veor T0, X1, X2
vext.32 X2, X2, X2, #2
vadd.i32 Y2, Y2, Y3
vext.32 Y3, Y3, Y3, #1
vsri.u32 Z3, T1, #24
vshl.i32 X1, T0, #7
veor T1, Y1, Y2
vext.32 Y2, Y2, Y2, #2
vadd.i32 Z2, Z2, Z3
vext.32 Z3, Z3, Z3, #1
vsri.u32 X1, T0, #25
vshl.i32 Y1, T1, #7
veor T0, Z1, Z2
vext.32 Z2, Z2, Z2, #2
vext.32 X1, X1, X1, #3
vsri.u32 Y1, T1, #25
vshl.i32 Z1, T0, #7
vext.32 Y1, Y1, Y1, #3
vsri.u32 Z1, T0, #25
subs ROUNDS, ROUNDS, #2
vext.32 Z1, Z1, Z1, #3
bhi .Loop
C Add updated counters
vadd.i32 Y3, Y3, T2
vadd.i32 Z3, Z3, T3
vld1.32 {T0,T1}, [SRC]
vadd.i32 X0, X0, T0
vadd.i32 X1, X1, T1
C vst1.8 because caller expects results little-endian
C interleave loads, calculations and stores to save cycles on stores
C use vstm when little-endian for some additional speedup
IF_BE(` vst1.8 {X0,X1}, [DST]!')
vld1.32 {T2,T3}, [SRCp32]
vadd.i32 X2, X2, T2
vadd.i32 X3, X3, T3
IF_BE(` vst1.8 {X2,X3}, [DST]!')
IF_LE(` vstmia DST!, {X0,X1,X2,X3}')
vadd.i32 Y0, Y0, T0
vadd.i32 Y1, Y1, T1
IF_BE(` vst1.8 {Y0,Y1}, [DST]!')
vadd.i32 Y2, Y2, T2
IF_BE(` vst1.8 {Y2,Y3}, [DST]!')
IF_LE(` vstmia DST!, {Y0,Y1,Y2,Y3}')
vadd.i32 Z0, Z0, T0
vadd.i32 Z1, Z1, T1
IF_BE(` vst1.8 {Z0,Z1}, [DST]!')
vadd.i32 Z2, Z2, T2
vpop {q4,q5,q6,q7}
IF_BE(` vst1.8 {Z2,Z3}, [DST]')
IF_LE(` vstm DST, {Z0,Z1,Z2,Z3}')
bx lr
EPILOGUE(_nettle_chacha_3core)
PROLOGUE(_nettle_chacha_3core32)
add SRCp32, SRC, #32
vld1.32 {X0,X1}, [SRC]
vld1.32 {X2,X3}, [SRCp32]
vpush {q4,q5,q6,q7}
adr r12, .Lcount1
vld1.32 {Z3}, [r12]
vadd.i32 Y3, X3, Z3 C Increment 32-bit counter
vadd.i32 Z3, Y3, Z3
b .Lshared_entry
EPILOGUE(_nettle_chacha_3core32)
C arm/neon/salsa20-2core.asm
ifelse(`
Copyright (C) 2020 Niels Möller
This file is part of GNU Nettle.
GNU Nettle is free software: you can redistribute it and/or
modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your
option) any later version.
or
* the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your
option) any later version.
or both in parallel, as here.
GNU Nettle is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
')
.file "salsa20-2core.asm"
.fpu neon
define(`DST', `r0')
define(`SRC', `r1')
define(`ROUNDS', `r2')
define(`SRCp32', `r3')
C State, even elements in X, odd elements in Y
define(`X0', `q0')
define(`X1', `q1')
define(`X2', `q2')
define(`X3', `q3')
define(`Y0', `q8')
define(`Y1', `q9')
define(`Y2', `q10')
define(`Y3', `q11')
define(`T0', `q12')
define(`T1', `q13')
define(`T2', `q14')
define(`T3', `q15')
.text
.align 4
.Lcount1:
.int 1,0,0,0
C _salsa20_2core(uint32_t *dst, const uint32_t *src, unsigned rounds)
PROLOGUE(_nettle_salsa20_2core)
C loads using vld1.32 to be endianness-neutral wrt consecutive 32-bit words
add SRCp32, SRC, #32
vld1.32 {X0,X1}, [SRC]
vld1.32 {X2,X3}, [SRCp32]
adr r12, .Lcount1
vmov Y3, X0
vld1.32 {Y1}, [r12]
vmov Y0, X1
vadd.i64 Y1, Y1, X2 C Increment counter
vmov Y2, X3
vtrn.32 X0, Y3 C X0: 0 0 2 2 Y3: 1 1 3 3
vtrn.32 X1, Y0 C X1: 4 4 6 6 Y0: 5 5 7 7
vtrn.32 X2, Y1 C X2: 8 8 10 10 Y1: 9 9 11 11
vtrn.32 X3, Y2 C X3: 12 12 14 14 Y2: 13 13 15 15
C Swap, to get
C X0: 0 10 Y0: 5 15
C X1: 4 14 Y1: 9 3
C X2: 8 2 Y2: 13 7
C X3: 12 6 Y3: 1 11
vswp D1REG(X0), D1REG(X2)
vswp D1REG(X1), D1REG(X3)
vswp D1REG(Y0), D1REG(Y2)
vswp D1REG(Y1), D1REG(Y3)
.Loop:
C Register layout (A is first block, B is second block)
C
C X0: A0 B0 A10 B10 Y0: A5 A5 A15 B15
C X1: A4 B4 A14 B14 Y1: A9 B9 A3 B3
C X2: A8 B8 A2 B2 Y2: A13 B13 A7 B7
C X3: A12 B12 A6 B6 Y3: A1 B1 A11 B11
vadd.i32 T0, X0, X3
vshl.i32 T1, T0, #7
vadd.i32 T2, Y0, Y3
vsri.u32 T1, T0, #25
vshl.i32 T3, T2, #7
veor X1, X1, T1
vsri.u32 T3, T2, #25
vadd.i32 T0, X1, X0
veor Y1, Y1, T3
vshl.i32 T1, T0, #9
vadd.i32 T2, Y1, Y0
vsri.u32 T1, T0, #23
vshl.i32 T3, T2, #9
veor X2, X2, T1
vsri.u32 T3, T2, #23
vadd.i32 T0, X2, X1
veor Y2, Y2, T3
vshl.i32 T1, T0, #13
vadd.i32 T2, Y2, Y1
vsri.u32 T1, T0, #19
vshl.i32 T3, T2, #13
veor X3, X3, T1
vsri.u32 T3, T2, #19
vadd.i32 T0, X3, X2
veor Y3, Y3, T3
vshl.i32 T1, T0, #18
vadd.i32 T2, Y3, Y2
vext.32 Y1, Y1, Y1, #2
vsri.u32 T1, T0, #14
vshl.i32 T3, T2, #18
vext.32 Y2, Y2, Y2, #2
veor X0, X0, T1
vsri.u32 T3, T2, #14
vext.32 X3, X3, X3, #2
veor Y0, Y0, T3
C Register layout:
C X0: A0 B0 A10 B10 Y0: A5 A5 A15 B15
C Y1: A3 B3 A9 B9 X1: A4 B4 A14 B14 (Y1 swapped)
C X2: A2 B2 A8 B8 Y2: A7 B7 A13 B13 (X2, Y2 swapped)
C Y3: A1 B1 A11 B11 X3: A6 B6 A12 B12 (X3 swapped)
vadd.i32 T0, X0, Y1
vext.32 X2, X2, X2, #2
vshl.i32 T1, T0, #7
vadd.i32 T2, Y0, X1
vsri.u32 T1, T0, #25
vshl.i32 T3, T2, #7
veor Y3, Y3, T1
vsri.u32 T3, T2, #25
vadd.i32 T0, Y3, X0
veor X3, X3, T3
vshl.i32 T1, T0, #9
vadd.i32 T2, X3, Y0
vsri.u32 T1, T0, #23
vshl.i32 T3, T2, #9
veor X2, X2, T1
vsri.u32 T3, T2, #23
vadd.i32 T0, X2, Y3
veor Y2, Y2, T3
vshl.i32 T1, T0, #13
vadd.i32 T2, Y2, X3
vsri.u32 T1, T0, #19
vshl.i32 T3, T2, #13
veor Y1, Y1, T1
vsri.u32 T3, T2, #19
vadd.i32 T0, Y1, X2
veor X1, X1, T3
vext.32 X2, X2, X2, #2
vshl.i32 T1, T0, #18
vadd.i32 T2, X1, Y2
vext.32 Y1, Y1, Y1, #2
vsri.u32 T1, T0, #14
subs ROUNDS, ROUNDS, #2
vshl.i32 T3, T2, #18
vext.32 X3, X3, X3, #2
veor X0, X0, T1
vsri.u32 T3, T2, #14
vext.32 Y2, Y2, Y2, #2
veor Y0, Y0, T3
bhi .Loop
C Inverse swaps and transpositions
vswp D1REG(X0), D1REG(X2)
vswp D1REG(X1), D1REG(X3)
vswp D1REG(Y0), D1REG(Y2)
vswp D1REG(Y1), D1REG(Y3)
vld1.32 {T0,T1}, [SRC]
vld1.32 {T2,T3}, [SRCp32]
vtrn.32 X0, Y3
vtrn.32 X1, Y0
vtrn.32 X2, Y1
vtrn.32 X3, Y2
C Add in the original context
vadd.i32 X0, X0, T0
vadd.i32 X1, X1, T1
C vst1.8 because caller expects results little-endian
C interleave loads, calculations and stores to save cycles on stores
C use vstm when little-endian for some additional speedup
IF_BE(` vst1.8 {X0,X1}, [DST]!')
vadd.i32 X2, X2, T2
vadd.i32 X3, X3, T3
IF_BE(` vst1.8 {X2,X3}, [DST]!')
IF_LE(` vstmia DST!, {X0,X1,X2,X3}')
vld1.32 {X0}, [r12]
vadd.i32 T0, T0, Y3
vadd.i64 T2, T2, X0
vadd.i32 T1, T1, Y0
IF_BE(` vst1.8 {T0,T1}, [DST]!')
vadd.i32 T2, T2, Y1
vadd.i32 T3, T3, Y2
IF_BE(` vst1.8 {T2,T3}, [DST]')
IF_LE(` vstm DST, {T0,T1,T2,T3}')
bx lr
EPILOGUE(_nettle_salsa20_2core)