memxor.asm 9.14 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
C arm/memxor.asm

ifelse(<
   Copyright (C) 2013 Niels Möller

   This file is part of GNU Nettle.

   GNU Nettle is free software: you can redistribute it and/or
   modify it under the terms of either:

     * the GNU Lesser General Public License as published by the Free
       Software Foundation; either version 3 of the License, or (at your
       option) any later version.

   or

     * the GNU General Public License as published by the Free
       Software Foundation; either version 2 of the License, or (at your
       option) any later version.

   or both in parallel, as here.

   GNU Nettle is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   General Public License for more details.

   You should have received copies of the GNU General Public License and
   the GNU Lesser General Public License along with this program.  If
   not, see http://www.gnu.org/licenses/.
>) 
32

33
34
35
36
37
38
C Possible speedups:
C
C The ldm instruction can do load two registers per cycle,
C if the address is two-word aligned. Or three registers in two
C cycles, regardless of alignment.

39
40
41
42
43
44
C Register usage:

define(<DST>, <r0>)
define(<SRC>, <r1>)
define(<N>, <r2>)
define(<CNT>, <r6>)
45
define(<TNC>, <r12>)
46
47
48
49
50
51
52
53

	.syntax unified

	.file "memxor.asm"

	.text
	.arm

Niels Möller's avatar
Niels Möller committed
54
	C memxor(void *dst, const void *src, size_t n)
Niels Möller's avatar
Niels Möller committed
55
	.align 4
56
PROLOGUE(nettle_memxor)
57
	cmp	N, #0
58
	beq	.Lmemxor_done
59
60
61
62
63
64
65

	cmp	N, #7
	bcs	.Lmemxor_large

	C Simple byte loop
.Lmemxor_bytes:
	ldrb	r3, [SRC], #+1
66
67
	ldrb	r12, [DST]
	eor	r3, r12
68
69
70
71
72
73
74
75
76
	strb	r3, [DST], #+1
	subs	N, #1
	bne	.Lmemxor_bytes

.Lmemxor_done:
	bx	lr

.Lmemxor_align_loop:
	ldrb	r3, [SRC], #+1
77
78
	ldrb	r12, [DST]
	eor	r3, r12
79
80
81
82
83
84
85
86
87
88
	strb	r3, [DST], #+1
	sub	N, #1

.Lmemxor_large:
	tst	DST, #3
	bne	.Lmemxor_align_loop

	C We have at least 4 bytes left to do here.
	sub	N, #4

89
	ands	r3, SRC, #3
90
91
92
93
94
95
96
97
98
99
100
101
102
	beq	.Lmemxor_same

	C Different alignment case.
	C     v original SRC
	C +-------+------+
	C |SRC    |SRC+4 |
	C +---+---+------+
	C     |DST    |
	C     +-------+
	C
	C With little-endian, we need to do
	C DST[i] ^= (SRC[i] >> CNT) ^ (SRC[i+1] << TNC)

103
104
105
	push	{r4,r5,r6}
	
	lsl	CNT, r3, #3
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
	bic	SRC, #3
	rsb	TNC, CNT, #32

	ldr	r4, [SRC], #+4

	tst	N, #4
	itet	eq
	moveq	r5, r4
	subne	N, #4
	beq	.Lmemxor_odd

.Lmemxor_word_loop:
	ldr	r5, [SRC], #+4
	ldr	r3, [DST]
	eor	r3, r3, r4, lsr CNT
	eor	r3, r3, r5, lsl TNC
	str	r3, [DST], #+4
.Lmemxor_odd:
	ldr	r4, [SRC], #+4
	ldr	r3, [DST]
	eor	r3, r3, r5, lsr CNT
	eor	r3, r3, r4, lsl TNC
	str	r3, [DST], #+4
	subs	N, #8
	bcs	.Lmemxor_word_loop
	adds	N, #8
132
	beq	.Lmemxor_odd_done
133
134
135
136
137

	C We have TNC/8 left-over bytes in r4, high end
	lsr	r4, CNT
	ldr	r3, [DST]
	eor	r3, r4
138
139
140

	pop	{r4,r5,r6}

141
142
143
144
145
146
147
148
149
	C Store bytes, one by one.
.Lmemxor_leftover:
	strb	r3, [DST], #+1
	subs	N, #1
	beq	.Lmemxor_done
	subs	TNC, #8
	lsr	r3, #8
	bne	.Lmemxor_leftover
	b	.Lmemxor_bytes
150
151
152
.Lmemxor_odd_done:
	pop	{r4,r5,r6}
	bx	lr
153
154

.Lmemxor_same:
155
156
	push	{r4,r5,r6,r7,r8,r10,r11,r14}	C lr is the link register

157
158
159
	subs	N, #8
	bcc	.Lmemxor_same_end

Niels Möller's avatar
Niels Möller committed
160
161
162
163
164
165
166
167
168
169
170
171
172
	ldmia	SRC!, {r3, r4, r5}
	C Keep address for loads in r14
	mov	r14, DST
	ldmia	r14!, {r6, r7, r8}
	subs	N, #12
	eor	r10, r3, r6
	eor	r11, r4, r7
	eor	r12, r5, r8
	bcc	.Lmemxor_same_final_store
	subs	N, #12
	ldmia	r14!, {r6, r7, r8}
	bcc	.Lmemxor_same_wind_down

173
174
175
	C 6 cycles per iteration, 0.50 cycles/byte. For this speed,
	C loop starts at offset 0x11c in the object file.

176
.Lmemxor_same_loop:
Niels Möller's avatar
Niels Möller committed
177
178
	C r10-r12 contains values to be stored at DST
	C r6-r8 contains values read from r14, in advance
179
180
	ldmia	SRC!, {r3, r4, r5}
	subs	N, #12
Niels Möller's avatar
Niels Möller committed
181
182
183
184
185
	stmia	DST!, {r10, r11, r12}
	eor	r10, r3, r6
	eor	r11, r4, r7
	eor	r12, r5, r8
	ldmia	r14!, {r6, r7, r8}
186
	bcs	.Lmemxor_same_loop
Niels Möller's avatar
Niels Möller committed
187
188
189
190
191
192
193
194
195
196

.Lmemxor_same_wind_down:
	C Wind down code
	ldmia	SRC!, {r3, r4, r5}
	stmia	DST!, {r10, r11, r12}
	eor	r10, r3, r6
	eor	r11, r4, r7
	eor	r12, r5, r8
.Lmemxor_same_final_store:
	stmia	DST!, {r10, r11, r12}
197
198
	
.Lmemxor_same_end:
199
200
201
202
203
204
205
206
207
	C We have 0-11 bytes left to do, and N holds number of bytes -12.
	adds	N, #4
	bcc	.Lmemxor_same_lt_8
	C Do 8 bytes more, leftover is in N
	ldmia	SRC!, {r3, r4}
	ldmia	DST, {r6, r7}
	eor	r3, r6
	eor	r4, r7
	stmia	DST!, {r3, r4}
208
	pop	{r4,r5,r6,r7,r8,r10,r11,r14}
209
210
	beq	.Lmemxor_done
	b	.Lmemxor_bytes
211
212

.Lmemxor_same_lt_8:
213
	pop	{r4,r5,r6,r7,r8,r10,r11,r14}
214
215
216
217
	adds	N, #4
	bcc	.Lmemxor_same_lt_4

	ldr	r3, [SRC], #+4
218
219
	ldr	r12, [DST]
	eor	r3, r12
220
221
222
223
224
225
226
227
228
	str	r3, [DST], #+4
	beq	.Lmemxor_done
	b	.Lmemxor_bytes

.Lmemxor_same_lt_4:
	adds	N, #4
	beq	.Lmemxor_done
	b	.Lmemxor_bytes
	
229
EPILOGUE(nettle_memxor)
230
231
232
233
234
235
236
237
238
239
240
241
242
243

define(<DST>, <r0>)
define(<AP>, <r1>)
define(<BP>, <r2>)
define(<N>, <r3>)
undefine(<CNT>)
undefine(<TNC>)

C Temporaries r4-r7
define(<ACNT>, <r8>)
define(<ATNC>, <r10>)
define(<BCNT>, <r11>)
define(<BTNC>, <r12>)

Niels Möller's avatar
Niels Möller committed
244
	C memxor3(void *dst, const void *a, const void *b, size_t n)
245
	.align 2
246
PROLOGUE(nettle_memxor3)
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
	cmp	N, #0
	beq	.Lmemxor3_ret

	push	{r4,r5,r6,r7,r8,r10,r11}
	cmp	N, #7

	add	AP, N
	add	BP, N
	add	DST, N

	bcs	.Lmemxor3_large

	C Simple byte loop
.Lmemxor3_bytes:
	ldrb	r4, [AP, #-1]!
	ldrb	r5, [BP, #-1]!
	eor	r4, r5
	strb	r4, [DST, #-1]!
	subs	N, #1
	bne	.Lmemxor3_bytes

.Lmemxor3_done:
	pop	{r4,r5,r6,r7,r8,r10,r11}
.Lmemxor3_ret:
	bx	lr

.Lmemxor3_align_loop:
	ldrb	r4, [AP, #-1]!
	ldrb	r5, [BP, #-1]!
	eor	r5, r4
	strb	r5, [DST, #-1]!
	sub	N, #1

.Lmemxor3_large:
	tst	DST, #3
	bne	.Lmemxor3_align_loop

	C We have at least 4 bytes left to do here.
	sub	N, #4
	ands	ACNT, AP, #3
	lsl	ACNT, #3
	beq	.Lmemxor3_a_aligned

	ands	BCNT, BP, #3
	lsl	BCNT, #3
	bne	.Lmemxor3_uu

	C Swap
	mov	r4, AP
	mov	AP, BP
	mov	BP, r4

.Lmemxor3_au:
Niels Möller's avatar
Niels Möller committed
300
	C NOTE: We have the relevant shift count in ACNT, not BCNT
301
302
303
304
305
306
307
308
309
310
311

	C AP is aligned, BP is not
	C           v original SRC
	C +-------+------+
	C |SRC-4  |SRC   |
	C +---+---+------+
	C     |DST-4  |
	C     +-------+
	C
	C With little-endian, we need to do
	C DST[i-i] ^= (SRC[i-i] >> CNT) ^ (SRC[i] << TNC)
Niels Möller's avatar
Niels Möller committed
312
	rsb	ATNC, ACNT, #32
313
314
315
316
317
318
319
320
321
322
323
324
325
	bic	BP, #3

	ldr	r4, [BP]

	tst	N, #4
	itet	eq
	moveq	r5, r4
	subne	N, #4
	beq	.Lmemxor3_au_odd

.Lmemxor3_au_loop:
	ldr	r5, [BP, #-4]!
	ldr	r6, [AP, #-4]!
Niels Möller's avatar
Niels Möller committed
326
327
	eor	r6, r6, r4, lsl ATNC
	eor	r6, r6, r5, lsr ACNT
328
329
330
331
	str	r6, [DST, #-4]!
.Lmemxor3_au_odd:
	ldr	r4, [BP, #-4]!
	ldr	r6, [AP, #-4]!
Niels Möller's avatar
Niels Möller committed
332
333
	eor	r6, r6, r5, lsl ATNC
	eor	r6, r6, r4, lsr ACNT
334
335
336
337
338
339
340
341
	str	r6, [DST, #-4]!
	subs	N, #8
	bcs	.Lmemxor3_au_loop
	adds	N, #8
	beq	.Lmemxor3_done

	C Leftover bytes in r4, low end
	ldr	r5, [AP, #-4]
Niels Möller's avatar
Niels Möller committed
342
	eor	r4, r5, r4, lsl ATNC
343
344
345
346
347
348
349

.Lmemxor3_au_leftover:
	C Store a byte at a time
	ror	r4, #24
	strb	r4, [DST, #-1]!
	subs	N, #1
	beq	.Lmemxor3_done
Niels Möller's avatar
Niels Möller committed
350
	subs	ACNT, #8
351
352
353
354
355
	sub	AP, #1
	bne	.Lmemxor3_au_leftover
	b	.Lmemxor3_bytes

.Lmemxor3_a_aligned:
Niels Möller's avatar
Niels Möller committed
356
357
	ands	ACNT, BP, #3
	lsl	ACNT, #3
358
359
360
	bne	.Lmemxor3_au ;

	C a, b and dst all have the same alignment.
361
362
	subs	N, #8
	bcc	.Lmemxor3_aligned_word_end
Niels Möller's avatar
Niels Möller committed
363

364
365
366
367
368
369
	C This loop runs at 8 cycles per iteration. It has been
	C observed running at only 7 cycles, for this speed, the loop
	C started at offset 0x2ac in the object file.

	C FIXME: consider software pipelining, similarly to the memxor
	C loop.
370
371
	
.Lmemxor3_aligned_word_loop:
Niels Möller's avatar
Niels Möller committed
372
373
374
375
376
377
378
379
	ldmdb	AP!, {r4,r5,r6}
	ldmdb	BP!, {r7,r8,r10}
	subs	N, #12
	eor	r4, r7
	eor	r5, r8
	eor	r6, r10
	stmdb	DST!, {r4, r5,r6}
	bcs	.Lmemxor3_aligned_word_loop
380

Niels Möller's avatar
Niels Möller committed
381
382
383
384
385
386
387
.Lmemxor3_aligned_word_end:
	C We have 0-11 bytes left to do, and N holds number of bytes -12.
	adds	N, #4
	bcc	.Lmemxor3_aligned_lt_8
	C Do 8 bytes more, leftover is in N
	ldmdb	AP!, {r4, r5}
	ldmdb	BP!, {r6, r7}
388
389
	eor	r4, r6
	eor	r5, r7
Niels Möller's avatar
Niels Möller committed
390
391
392
	stmdb	DST!, {r4,r5}
	beq	.Lmemxor3_done
	b	.Lmemxor3_bytes
393

Niels Möller's avatar
Niels Möller committed
394
395
396
397
398
399
400
401
402
403
404
405
406
.Lmemxor3_aligned_lt_8:
	adds	N, #4
	bcc	.Lmemxor3_aligned_lt_4

	ldr	r4, [AP,#-4]!
	ldr	r5, [BP,#-4]!
	eor	r4, r5
	str	r4, [DST,#-4]!
	beq	.Lmemxor3_done
	b	.Lmemxor3_bytes

.Lmemxor3_aligned_lt_4:
	adds	N, #4	
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
	beq	.Lmemxor3_done
	b	.Lmemxor3_bytes

.Lmemxor3_uu:

	cmp	ACNT, BCNT
	bic	AP, #3
	bic	BP, #3
	rsb	ATNC, ACNT, #32

	bne	.Lmemxor3_uud

	C AP and BP are unaligned in the same way

	ldr	r4, [AP]
	ldr	r6, [BP]
	eor	r4, r6

	tst	N, #4
	itet	eq
	moveq	r5, r4
	subne	N, #4
	beq	.Lmemxor3_uu_odd

.Lmemxor3_uu_loop:
	ldr	r5, [AP, #-4]!
	ldr	r6, [BP, #-4]!
	eor	r5, r6
	lsl	r4, ATNC
	eor	r4, r4, r5, lsr ACNT
	str	r4, [DST, #-4]!
.Lmemxor3_uu_odd:
	ldr	r4, [AP, #-4]!
	ldr	r6, [BP, #-4]!
	eor	r4, r6
	lsl	r5, ATNC
	eor	r5, r5, r4, lsr ACNT
	str	r5, [DST, #-4]!
	subs	N, #8
	bcs	.Lmemxor3_uu_loop
	adds	N, #8
	beq	.Lmemxor3_done

	C Leftover bytes in a4, low end
	ror	r4, ACNT
.Lmemxor3_uu_leftover:
	ror	r4, #24
	strb	r4, [DST, #-1]!
	subs	N, #1
	beq	.Lmemxor3_done
	subs	ACNT, #8
	bne	.Lmemxor3_uu_leftover
	b	.Lmemxor3_bytes

.Lmemxor3_uud:
	C Both AP and BP unaligned, and in different ways
	rsb	BTNC, BCNT, #32

	ldr	r4, [AP]
	ldr	r6, [BP]

	tst	N, #4
	ittet	eq
	moveq	r5, r4
	moveq	r7, r6
	subne	N, #4
	beq	.Lmemxor3_uud_odd

.Lmemxor3_uud_loop:
	ldr	r5, [AP, #-4]!
	ldr	r7, [BP, #-4]!
	lsl	r4, ATNC
	eor	r4, r4, r6, lsl BTNC
	eor	r4, r4, r5, lsr ACNT
	eor	r4, r4, r7, lsr BCNT
	str	r4, [DST, #-4]!
.Lmemxor3_uud_odd:
	ldr	r4, [AP, #-4]!
	ldr	r6, [BP, #-4]!
	lsl	r5, ATNC
	eor	r5, r5, r7, lsl BTNC
	eor	r5, r5, r4, lsr ACNT
	eor	r5, r5, r6, lsr BCNT
	str	r5, [DST, #-4]!
	subs	N, #8
	bcs	.Lmemxor3_uud_loop
	adds	N, #8
	beq	.Lmemxor3_done

	C FIXME: More clever left-over handling? For now, just adjust pointers.
	add	AP, AP,	ACNT, lsr #3
	add	BP, BP, BCNT, lsr #3
	b	.Lmemxor3_bytes
500
EPILOGUE(nettle_memxor3)