Commit 42b8d661 authored by Niels Möller's avatar Niels Möller
Browse files

Use default m4 quote character in asm files, part 5

Update powerpc64 files.
parent 1a7da940
C powerpc64/fat/aes-decrypt-internal-2.asm
ifelse(<
ifelse(`
Copyright (C) 2020 Mamone Tarsha
This file is part of GNU Nettle.
......@@ -29,9 +29,9 @@ ifelse(<
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
>)
')
dnl PROLOGUE(_nettle_aes_decrypt) picked up by configure
define(<fat_transform>, <$1_ppc64>)
include_src(<powerpc64/p8/aes-decrypt-internal.asm>)
define(`fat_transform', `$1_ppc64')
include_src(`powerpc64/p8/aes-decrypt-internal.asm')
C powerpc64/fat/aes-encrypt-internal-2.asm
ifelse(<
ifelse(`
Copyright (C) 2020 Mamone Tarsha
This file is part of GNU Nettle.
......@@ -29,9 +29,9 @@ ifelse(<
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
>)
')
dnl PROLOGUE(_nettle_aes_encrypt) picked up by configure
define(<fat_transform>, <$1_ppc64>)
include_src(<powerpc64/p8/aes-encrypt-internal.asm>)
define(`fat_transform', `$1_ppc64')
include_src(`powerpc64/p8/aes-encrypt-internal.asm')
define(<PROLOGUE>,
<.globl C_NAME($1)
define(`PROLOGUE',
`.globl C_NAME($1)
DECLARE_FUNC(C_NAME($1))
ifelse(WORDS_BIGENDIAN,no,
<ifdef(<FUNC_ALIGN>,<.align FUNC_ALIGN>)
`ifdef(`FUNC_ALIGN',`.align FUNC_ALIGN')
C_NAME($1):
addis 2,12,(.TOC.-C_NAME($1))@ha
addi 2,2,(.TOC.-C_NAME($1))@l
.localentry C_NAME($1), .-C_NAME($1)>,
<.section ".opd","aw"
.localentry C_NAME($1), .-C_NAME($1)',
`.section ".opd","aw"
.align 3
C_NAME($1):
.quad .C_NAME($1),.TOC.@tocbase,0
.previous
ifdef(<FUNC_ALIGN>,<.align FUNC_ALIGN>)
.C_NAME($1):>)
undefine(<FUNC_ALIGN>)>)
ifdef(`FUNC_ALIGN',`.align FUNC_ALIGN')
.C_NAME($1):')
undefine(`FUNC_ALIGN')')
define(<EPILOGUE>,
<ifelse(WORDS_BIGENDIAN,no,
<.size C_NAME($1), . - C_NAME($1)>,
<.size .C_NAME($1), . - .C_NAME($1)
.size C_NAME($1), . - .C_NAME($1)>)>)
define(`EPILOGUE',
`ifelse(WORDS_BIGENDIAN,no,
`.size C_NAME($1), . - C_NAME($1)',
`.size .C_NAME($1), . - .C_NAME($1)
.size C_NAME($1), . - .C_NAME($1)')')
C Get vector-scalar register from vector register
C VSR(VR)
define(<VSR>,<32+$1>)
define(`VSR',`32+$1')
C Load the quadword in DATA_SRC storage into
C VEC_DST. GPR is general-purpose register
C used to obtain the effective address of
C DATA_SRC storage.
C DATA_LOAD_VEC(VEC_DST, DATA_SRC, GPR)
define(<DATA_LOAD_VEC>,
<ld $3,$2@got(2)
lvx $1,0,$3>)
define(`DATA_LOAD_VEC',
`ld $3,$2@got(2)
lvx $1,0,$3')
C powerpc64/p8/aes-decrypt-internal.asm
ifelse(<
ifelse(`
Copyright (C) 2020 Mamone Tarsha
This file is part of GNU Nettle.
......@@ -27,36 +27,36 @@ ifelse(<
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
>)
')
C Register usage:
define(<SP>, <1>)
define(<TOCP>, <2>)
define(`SP', `1')
define(`TOCP', `2')
define(<ROUNDS>, <3>)
define(<KEYS>, <4>)
define(<LENGTH>, <6>)
define(<DST>, <7>)
define(<SRC>, <8>)
define(`ROUNDS', `3')
define(`KEYS', `4')
define(`LENGTH', `6')
define(`DST', `7')
define(`SRC', `8')
define(<swap_mask>, <0>)
define(`swap_mask', `0')
define(<K>, <1>)
define(<S0>, <2>)
define(<S1>, <3>)
define(<S2>, <4>)
define(<S3>, <5>)
define(<S4>, <6>)
define(<S5>, <7>)
define(<S6>, <8>)
define(<S7>, <9>)
define(`K', `1')
define(`S0', `2')
define(`S1', `3')
define(`S2', `4')
define(`S3', `5')
define(`S4', `6')
define(`S5', `7')
define(`S6', `8')
define(`S7', `9')
C ZERO vector register is used in place of RoundKey
C for vncipher instruction because the order of InvMixColumns
C and Xor processes are flipped in that instruction.
C The Xor process with RoundKey is executed afterward.
define(<ZERO>, <10>)
define(`ZERO', `10')
.file "aes-decrypt-internal.asm"
......@@ -67,7 +67,7 @@ define(<ZERO>, <10>)
C size_t length, uint8_t *dst,
C uint8_t *src)
define(<FUNC_ALIGN>, <5>)
define(`FUNC_ALIGN', `5')
PROLOGUE(_nettle_aes_decrypt)
vxor ZERO,ZERO,ZERO
......@@ -110,14 +110,14 @@ Lx8_loop:
lxvd2x VSR(S6),30,SRC
lxvd2x VSR(S7),31,SRC
IF_LE(<vperm S0,S0,S0,swap_mask
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask
vperm S4,S4,S4,swap_mask
vperm S5,S5,S5,swap_mask
vperm S6,S6,S6,swap_mask
vperm S7,S7,S7,swap_mask>)
vperm S7,S7,S7,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
......@@ -164,14 +164,14 @@ L8x_round_loop:
vncipherlast S6,S6,K
vncipherlast S7,S7,K
IF_LE(<vperm S0,S0,S0,swap_mask
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask
vperm S4,S4,S4,swap_mask
vperm S5,S5,S5,swap_mask
vperm S6,S6,S6,swap_mask
vperm S7,S7,S7,swap_mask>)
vperm S7,S7,S7,swap_mask')
stxvd2x VSR(S0),0,DST
stxvd2x VSR(S1),25,DST
......@@ -213,10 +213,10 @@ L4x:
addi 9,9,0x10
lxvd2x VSR(S3),9,SRC
IF_LE(<vperm S0,S0,S0,swap_mask
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask>)
vperm S3,S3,S3,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
......@@ -247,10 +247,10 @@ L4x_round_loop:
vncipherlast S2,S2,K
vncipherlast S3,S3,K
IF_LE(<vperm S0,S0,S0,swap_mask
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask>)
vperm S3,S3,S3,swap_mask')
stxvd2x VSR(S0),0,DST
li 9,0x10
......@@ -277,8 +277,8 @@ L2x:
li 9,0x10
lxvd2x VSR(S1),9,SRC
IF_LE(<vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask>)
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
......@@ -301,8 +301,8 @@ L2x_round_loop:
vncipherlast S0,S0,K
vncipherlast S1,S1,K
IF_LE(<vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask>)
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask')
stxvd2x VSR(S0),0,DST
li 9,0x10
......@@ -322,7 +322,7 @@ L1x:
lxvd2x VSR(S0),0,SRC
IF_LE(<vperm S0,S0,S0,swap_mask>)
IF_LE(`vperm S0,S0,S0,swap_mask')
vxor S0,S0,K
......@@ -341,7 +341,7 @@ L1x_round_loop:
vperm K,K,K,swap_mask
vncipherlast S0,S0,K
IF_LE(<vperm S0,S0,S0,swap_mask>)
IF_LE(`vperm S0,S0,S0,swap_mask')
stxvd2x VSR(S0),0,DST
......@@ -352,5 +352,5 @@ EPILOGUE(_nettle_aes_decrypt)
.data
.align 4
.swap_mask:
IF_LE(<.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7>)
IF_BE(<.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12>)
IF_LE(`.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7')
IF_BE(`.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12')
C powerpc64/p8/aes-encrypt-internal.asm
ifelse(<
ifelse(`
Copyright (C) 2020 Mamone Tarsha
This file is part of GNU Nettle.
......@@ -27,30 +27,30 @@ ifelse(<
You should have received copies of the GNU General Public License and
the GNU Lesser General Public License along with this program. If
not, see http://www.gnu.org/licenses/.
>)
')
C Register usage:
define(<SP>, <1>)
define(<TOCP>, <2>)
define(`SP', `1')
define(`TOCP', `2')
define(<ROUNDS>, <3>)
define(<KEYS>, <4>)
define(<LENGTH>, <6>)
define(<DST>, <7>)
define(<SRC>, <8>)
define(`ROUNDS', `3')
define(`KEYS', `4')
define(`LENGTH', `6')
define(`DST', `7')
define(`SRC', `8')
define(<swap_mask>, <0>)
define(`swap_mask', `0')
define(<K>, <1>)
define(<S0>, <2>)
define(<S1>, <3>)
define(<S2>, <4>)
define(<S3>, <5>)
define(<S4>, <6>)
define(<S5>, <7>)
define(<S6>, <8>)
define(<S7>, <9>)
define(`K', `1')
define(`S0', `2')
define(`S1', `3')
define(`S2', `4')
define(`S3', `5')
define(`S4', `6')
define(`S5', `7')
define(`S6', `8')
define(`S7', `9')
.file "aes-encrypt-internal.asm"
......@@ -61,7 +61,7 @@ define(<S7>, <9>)
C size_t length, uint8_t *dst,
C uint8_t *src)
define(<FUNC_ALIGN>, <5>)
define(`FUNC_ALIGN', `5')
PROLOGUE(_nettle_aes_encrypt)
DATA_LOAD_VEC(swap_mask,.swap_mask,5)
......@@ -102,14 +102,14 @@ Lx8_loop:
lxvd2x VSR(S6),30,SRC
lxvd2x VSR(S7),31,SRC
IF_LE(<vperm S0,S0,S0,swap_mask
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask
vperm S4,S4,S4,swap_mask
vperm S5,S5,S5,swap_mask
vperm S6,S6,S6,swap_mask
vperm S7,S7,S7,swap_mask>)
vperm S7,S7,S7,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
......@@ -148,14 +148,14 @@ L8x_round_loop:
vcipherlast S6,S6,K
vcipherlast S7,S7,K
IF_LE(<vperm S0,S0,S0,swap_mask
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask
vperm S4,S4,S4,swap_mask
vperm S5,S5,S5,swap_mask
vperm S6,S6,S6,swap_mask
vperm S7,S7,S7,swap_mask>)
vperm S7,S7,S7,swap_mask')
stxvd2x VSR(S0),0,DST
stxvd2x VSR(S1),25,DST
......@@ -197,10 +197,10 @@ L4x:
addi 9,9,0x10
lxvd2x VSR(S3),9,SRC
IF_LE(<vperm S0,S0,S0,swap_mask
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask>)
vperm S3,S3,S3,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
......@@ -227,10 +227,10 @@ L4x_round_loop:
vcipherlast S2,S2,K
vcipherlast S3,S3,K
IF_LE(<vperm S0,S0,S0,swap_mask
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask
vperm S2,S2,S2,swap_mask
vperm S3,S3,S3,swap_mask>)
vperm S3,S3,S3,swap_mask')
stxvd2x VSR(S0),0,DST
li 9,0x10
......@@ -257,8 +257,8 @@ L2x:
li 9,0x10
lxvd2x VSR(S1),9,SRC
IF_LE(<vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask>)
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask')
vxor S0,S0,K
vxor S1,S1,K
......@@ -279,8 +279,8 @@ L2x_round_loop:
vcipherlast S0,S0,K
vcipherlast S1,S1,K
IF_LE(<vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask>)
IF_LE(`vperm S0,S0,S0,swap_mask
vperm S1,S1,S1,swap_mask')
stxvd2x VSR(S0),0,DST
li 9,0x10
......@@ -300,7 +300,7 @@ L1x:
lxvd2x VSR(S0),0,SRC
IF_LE(<vperm S0,S0,S0,swap_mask>)
IF_LE(`vperm S0,S0,S0,swap_mask')
vxor S0,S0,K
......@@ -318,7 +318,7 @@ L1x_round_loop:
vperm K,K,K,swap_mask
vcipherlast S0,S0,K
IF_LE(<vperm S0,S0,S0,swap_mask>)
IF_LE(`vperm S0,S0,S0,swap_mask')
stxvd2x VSR(S0),0,DST
......@@ -329,5 +329,5 @@ EPILOGUE(_nettle_aes_encrypt)
.data
.align 4
.swap_mask:
IF_LE(<.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7>)
IF_BE(<.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12>)
IF_LE(`.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7')
IF_BE(`.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12')
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment