serpent-decrypt.c 12.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
/* serpent-decrypt.c
 *
 * The serpent block cipher.
 *
 * For more details on this algorithm, see the Serpent website at
 * http://www.cl.cam.ac.uk/~rja14/serpent.html
 */

/* nettle, low-level cryptographics library
 *
 * Copyright (C) 2011  Niels Möller
 * Copyright (C) 2010, 2011  Simon Josefsson
 * Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
 *  
 * The nettle library is free software; you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published by
 * the Free Software Foundation; either version 2.1 of the License, or (at your
 * option) any later version.
 * 
 * The nettle library is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
 * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
 * License for more details.
 * 
 * You should have received a copy of the GNU Lesser General Public License
 * along with the nettle library; see the file COPYING.LIB.  If not, write to
 * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
 * MA 02111-1307, USA.
 */

/* This file is derived from cipher/serpent.c in Libgcrypt v1.4.6.
   The adaption to Nettle was made by Simon Josefsson on 2010-12-07
   with final touches on 2011-05-30.  Changes include replacing
   libgcrypt with nettle in the license template, renaming
   serpent_context to serpent_ctx, renaming u32 to uint32_t, removing
   libgcrypt stubs and selftests, modifying entry function prototypes,
   using FOR_BLOCKS to iterate through data in encrypt/decrypt, using
   LE_READ_UINT32 and LE_WRITE_UINT32 to access data in
   encrypt/decrypt, and running indent on the code. */

#if HAVE_CONFIG_H
#include "config.h"
#endif

#include <assert.h>
#include <limits.h>

#include "serpent.h"

#include "macros.h"
#include "serpent-internal.h"

/* These are the S-Boxes of Serpent.  They are copied from Serpents
   reference implementation (the optimized one, contained in
   `floppy2') and are therefore:

     Copyright (C) 1998 Ross Anderson, Eli Biham, Lars Knudsen.

  To quote the Serpent homepage
  (http://www.cl.cam.ac.uk/~rja14/serpent.html):

  "Serpent is now completely in the public domain, and we impose no
   restrictions on its use.  This was announced on the 21st August at
   the First AES Candidate Conference. The optimised implementations
   in the submission package are now under the GNU PUBLIC LICENSE
   (GPL), although some comments in the code still say otherwise. You
   are welcome to use Serpent for any application."  */

69
/* Original single-assignment form:
70

71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
     t01 = x2  ^ x3;
     t02 = x0  | x1;
     t03 = x1  | x2;
     t04 = x2  & t01;
     t05 = t02 ^ t01;
     t06 = x0  | t04;
     y2  =     ~ t05;
     t08 = x1  ^ x3;
     t09 = t03 & t08;
     t10 = x3  | y2;
     y1  = t09 ^ t06;
     t12 = x0  | t05;
     t13 = y1  ^ t12;
     t14 = t03 ^ t10;
     t15 = x0  ^ x2;
     y3  = t14 ^ t13;
     t17 = t05 & t13;
     t18 = t14 | t17;
     y0  = t15 ^ t18;
*/
91
#define SBOX0_INVERSE(type, x0, x1, x2, x3, y0, y1, y2, y3)	\
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
  do {								\
    y0  = x0 ^ x2;						\
    y2  = x0 | x1;						\
    y1  = x2 ^ x3;						\
    y2 ^= y1;							\
    y1 &= x2;							\
    x2 |= x1;							\
    x1 ^= x3;							\
    y1 |= x0;							\
    x1 &= x2;							\
    y1 ^= x1;							\
    x0 |= y2;							\
    x0 ^= y1;							\
    x1  = y2 & x0;						\
    y2  = ~ y2;							\
    x3 |= y2;							\
    x3 ^= x2;							\
    y3  = x3 ^ x0;						\
    x1 |= x3;							\
    y0 ^= x1;							\
112
113
  } while (0)

114
#define SBOX1_INVERSE(type, x0, x1, x2, x3, y0, y1, y2, y3) \
115
116
117
  do { \
    type t02, t03, t04, t05, t06, t07, t08; \
    type t09, t10, t11, t14, t15, t17, t01; \
118
119
120
    t01 = x0  ^ x1; \
    t02 = x1  | x3; \
    t03 = x0  & x2; \
121
122
    t04 = x2  ^ t02; \
    t05 = x0  | t04; \
123
    t06 = t01 & t05; \
124
125
    t07 = x3  | t03; \
    t08 = x1  ^ t06; \
126
127
    t09 = t07 ^ t06; \
    t10 = t04 | t03; \
128
129
130
    t11 = x3  & t08; \
    y2  =     ~ t09; \
    y1  = t10 ^ t11; \
131
132
    t14 = x0  | y2; \
    t15 = t06 ^ y1; \
133
134
135
    y3  = t01 ^ t04; \
    t17 = x2  ^ t15; \
    y0  = t14 ^ t17; \
136
137
  } while (0)

138
#define SBOX2_INVERSE(type, x0, x1, x2, x3, y0, y1, y2, y3) \
139
140
141
  do {						\
    type t02, t03, t04, t06, t07, t08, t09; \
    type t10, t11, t12, t15, t16, t17, t01; \
142
143
144
    t01 = x0  ^ x3; \
    t02 = x2  ^ x3; \
    t03 = x0  & x2; \
145
146
    t04 = x1  | t02; \
    y0  = t01 ^ t04; \
147
148
149
    t06 = x0  | x2; \
    t07 = x3  | y0; \
    t08 =     ~ x3; \
150
    t09 = x1  & t06; \
151
    t10 = t08 | t03; \
152
    t11 = x1  & t07; \
153
    t12 = t06 & t02; \
154
155
    y3  = t09 ^ t10; \
    y1  = t12 ^ t11; \
156
157
    t15 = x2  & y3; \
    t16 = y0  ^ y1; \
158
    t17 = t10 ^ t15; \
159
    y2  = t16 ^ t17; \
160
161
  } while (0)

162
#define SBOX3_INVERSE(type, x0, x1, x2, x3, y0, y1, y2, y3) \
163
164
165
  do { \
    type t02, t03, t04, t05, t06, t07, t09; \
    type t11, t12, t13, t14, t16, t01; \
166
167
    t01 = x2  | x3; \
    t02 = x0  | x3; \
168
169
    t03 = x2  ^ t02; \
    t04 = x1  ^ t02; \
170
    t05 = x0  ^ x3; \
171
    t06 = t04 & t03; \
172
173
174
175
176
    t07 = x1  & t01; \
    y2  = t05 ^ t06; \
    t09 = x0  ^ t03; \
    y0  = t07 ^ t03; \
    t11 = y0  | t05; \
177
    t12 = t09 & t11; \
178
    t13 = x0  & y2; \
179
    t14 = t01 ^ t05; \
180
181
182
    y1  = x1  ^ t12; \
    t16 = x1  | t13; \
    y3  = t14 ^ t16; \
183
184
  } while (0)

185
#define SBOX4_INVERSE(type, x0, x1, x2, x3, y0, y1, y2, y3) \
186
187
188
  do { \
    type t02, t03, t04, t05, t06, t07, t09; \
    type t10, t11, t12, t13, t15, t01; \
189
190
    t01 = x1  | x3; \
    t02 = x2  | x3; \
191
192
    t03 = x0  & t01; \
    t04 = x1  ^ t02; \
193
    t05 = x2  ^ x3; \
194
    t06 =     ~ t03; \
195
196
197
198
    t07 = x0  & t04; \
    y1  = t05 ^ t07; \
    t09 = y1  | t06; \
    t10 = x0  ^ t07; \
199
    t11 = t01 ^ t09; \
200
201
202
203
204
205
    t12 = x3  ^ t04; \
    t13 = x2  | t10; \
    y3  = t03 ^ t12; \
    t15 = x0  ^ t04; \
    y2  = t11 ^ t13; \
    y0  = t15 ^ t09; \
206
207
  } while (0)

208
#define SBOX5_INVERSE(type, x0, x1, x2, x3, y0, y1, y2, y3) \
209
210
211
  do { \
    type t02, t03, t04, t05, t07, t08, t09; \
    type t10, t12, t13, t15, t16, t01; \
212
    t01 = x0  & x3; \
213
    t02 = x2  ^ t01; \
214
    t03 = x0  ^ x3; \
215
    t04 = x1  & t02; \
216
    t05 = x0  & x2; \
217
    y0  = t03 ^ t04; \
218
219
    t07 = x0  & y0; \
    t08 = t01 ^ y0; \
220
    t09 = x1  | t05; \
221
    t10 =     ~ x1; \
222
    y1  = t08 ^ t09; \
223
    t12 = t10 | t07; \
224
    t13 = y0  | y1; \
225
    y3  = t02 ^ t12; \
226
    t15 = t02 ^ t13; \
227
    t16 = x1  ^ x3; \
228
    y2  = t16 ^ t15; \
229
230
  } while (0)

231
#define SBOX6_INVERSE(type, x0, x1, x2, x3, y0, y1, y2, y3) \
232
233
234
  do { \
    type t02, t03, t04, t05, t06, t07, t08, t09; \
    type t12, t13, t14, t15, t16, t17, t01;	     \
235
236
    t01 = x0  ^ x2; \
    t02 =     ~ x2; \
237
238
239
    t03 = x1  & t01; \
    t04 = x1  | t02; \
    t05 = x3  | t03; \
240
    t06 = x1  ^ x3; \
241
242
    t07 = x0  & t04; \
    t08 = x0  | t02; \
243
    t09 = t07 ^ t05; \
244
245
    y1  = t06 ^ t08; \
    y0  =     ~ t09; \
246
    t12 = x1  & y0; \
247
248
249
    t13 = t01 & t05; \
    t14 = t01 ^ t12; \
    t15 = t07 ^ t13; \
250
    t16 = x3  | t02; \
251
    t17 = x0  ^ y1; \
252
253
    y3  = t17 ^ t15; \
    y2  = t16 ^ t14; \
254
255
  } while (0)

256
#define SBOX7_INVERSE(type, x0, x1, x2, x3, y0, y1, y2, y3) \
257
258
259
  do { \
    type t02, t03, t04, t06, t07, t08, t09; \
    type t10, t11, t13, t14, t15, t16, t01; \
260
261
    t01 = x0  & x1; \
    t02 = x0  | x1; \
262
263
264
265
    t03 = x2  | t01; \
    t04 = x3  & t02; \
    y3  = t03 ^ t04; \
    t06 = x1  ^ t04; \
266
    t07 = x3  ^ y3; \
267
268
    t08 =     ~ t07; \
    t09 = t06 | t08; \
269
270
    t10 = x1  ^ x3; \
    t11 = x0  | x3; \
271
272
273
    y1  = x0  ^ t09; \
    t13 = x2  ^ t06; \
    t14 = x2  & t11; \
274
    t15 = x3  | y1; \
275
    t16 = t01 | t10; \
276
277
    y0  = t13 ^ t15; \
    y2  = t14 ^ t16; \
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
  } while (0)

/* In-place inverse linear transformation.  */
#define LINEAR_TRANSFORMATION_INVERSE(x0,x1,x2,x3)	 \
  do {                                                   \
    x2 = ROL32 (x2, 10);                    \
    x0 = ROL32 (x0, 27);                    \
    x2 = x2 ^ x3 ^ (x1 << 7); \
    x0 = x0 ^ x1 ^ x3;        \
    x3 = ROL32 (x3, 25);                     \
    x1 = ROL32 (x1, 31);                     \
    x3 = x3 ^ x2 ^ (x0 << 3); \
    x1 = x1 ^ x0 ^ x2;        \
    x2 = ROL32 (x2, 29);                     \
    x0 = ROL32 (x0, 19);                    \
  } while (0)

/* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are
   y0,y1,y2,y3. */
#define ROUND_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
  do {							       \
    LINEAR_TRANSFORMATION_INVERSE (x0,x1,x2,x3);	       \
    SBOX##which##_INVERSE(uint32_t, x0,x1,x2,x3, y0,y1,y2,y3);	       \
    KEYXOR(y0,y1,y2,y3, subkey);			       \
  } while (0)

#if HAVE_NATIVE_64_BIT

/* In-place inverse linear transformation.  */
#define LINEAR_TRANSFORMATION64_INVERSE(x0,x1,x2,x3)	 \
  do {                                                   \
    x2 = ROL64 (x2, 10);                    \
    x0 = ROL64 (x0, 27);                    \
    x2 = x2 ^ x3 ^ RSHIFT64(x1, 7); \
    x0 = x0 ^ x1 ^ x3;        \
    x3 = ROL64 (x3, 25);                     \
    x1 = ROL64 (x1, 31);                     \
    x3 = x3 ^ x2 ^ RSHIFT64(x0, 3); \
    x1 = x1 ^ x0 ^ x2;        \
    x2 = ROL64 (x2, 29);                     \
    x0 = ROL64 (x0, 19);                    \
  } while (0)

#define ROUND64_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
  do {							       \
    LINEAR_TRANSFORMATION64_INVERSE (x0,x1,x2,x3);	       \
    SBOX##which##_INVERSE(uint64_t, x0,x1,x2,x3, y0,y1,y2,y3);	       \
    KEYXOR64(y0,y1,y2,y3, subkey);			       \
  } while (0)

#endif /* HAVE_NATIVE_64_BIT */

void
serpent_decrypt (const struct serpent_ctx *ctx,
		 unsigned length, uint8_t * dst, const uint8_t * src)
{
  assert( !(length % SERPENT_BLOCK_SIZE));

#if HAVE_NATIVE_64_BIT
  if (length & SERPENT_BLOCK_SIZE)
#else
  while (length >= SERPENT_BLOCK_SIZE)
#endif
    {
      uint32_t x0,x1,x2,x3, y0,y1,y2,y3;
      unsigned k;

      x0 = LE_READ_UINT32 (src);
      x1 = LE_READ_UINT32 (src + 4);
      x2 = LE_READ_UINT32 (src + 8);
      x3 = LE_READ_UINT32 (src + 12);

      /* Inverse of special round */
      KEYXOR (x0,x1,x2,x3, ctx->keys[32]);
      SBOX7_INVERSE (uint32_t, x0,x1,x2,x3, y0,y1,y2,y3);
      KEYXOR (y0,y1,y2,y3, ctx->keys[31]);

      k = 24;
      goto start32;
      while (k > 0)
	{
	  k -= 8;
	  ROUND_INVERSE (7, ctx->keys[k+7], x0,x1,x2,x3, y0,y1,y2,y3);
	start32:
	  ROUND_INVERSE (6, ctx->keys[k+6], y0,y1,y2,y3, x0,x1,x2,x3);
	  ROUND_INVERSE (5, ctx->keys[k+5], x0,x1,x2,x3, y0,y1,y2,y3);
	  ROUND_INVERSE (4, ctx->keys[k+4], y0,y1,y2,y3, x0,x1,x2,x3);
	  ROUND_INVERSE (3, ctx->keys[k+3], x0,x1,x2,x3, y0,y1,y2,y3);
	  ROUND_INVERSE (2, ctx->keys[k+2], y0,y1,y2,y3, x0,x1,x2,x3);
	  ROUND_INVERSE (1, ctx->keys[k+1], x0,x1,x2,x3, y0,y1,y2,y3);
	  ROUND_INVERSE (0, ctx->keys[k], y0,y1,y2,y3, x0,x1,x2,x3);
	}
      
      LE_WRITE_UINT32 (dst, x0);
      LE_WRITE_UINT32 (dst + 4, x1);
      LE_WRITE_UINT32 (dst + 8, x2);
      LE_WRITE_UINT32 (dst + 12, x3);

      src += SERPENT_BLOCK_SIZE;
      dst += SERPENT_BLOCK_SIZE;
      length -= SERPENT_BLOCK_SIZE;
    }
#if HAVE_NATIVE_64_BIT
  FOR_BLOCKS(length, dst, src, 2*SERPENT_BLOCK_SIZE)
    {
      uint64_t x0,x1,x2,x3, y0,y1,y2,y3;
      unsigned k;

      x0 = LE_READ_UINT32 (src);
      x1 = LE_READ_UINT32 (src + 4);
      x2 = LE_READ_UINT32 (src + 8);
      x3 = LE_READ_UINT32 (src + 12);

      x0 <<= 32; x0 |= LE_READ_UINT32 (src + 16);
      x1 <<= 32; x1 |= LE_READ_UINT32 (src + 20);
      x2 <<= 32; x2 |= LE_READ_UINT32 (src + 24);
      x3 <<= 32; x3 |= LE_READ_UINT32 (src + 28);

      /* Inverse of special round */
      KEYXOR64 (x0,x1,x2,x3, ctx->keys[32]);
      SBOX7_INVERSE (uint64_t, x0,x1,x2,x3, y0,y1,y2,y3);
      KEYXOR64 (y0,y1,y2,y3, ctx->keys[31]);

      k = 24;
      goto start64;
      while (k > 0)
	{
	  k -= 8;
	  ROUND64_INVERSE (7, ctx->keys[k+7], x0,x1,x2,x3, y0,y1,y2,y3);
	start64:
	  ROUND64_INVERSE (6, ctx->keys[k+6], y0,y1,y2,y3, x0,x1,x2,x3);
	  ROUND64_INVERSE (5, ctx->keys[k+5], x0,x1,x2,x3, y0,y1,y2,y3);
	  ROUND64_INVERSE (4, ctx->keys[k+4], y0,y1,y2,y3, x0,x1,x2,x3);
	  ROUND64_INVERSE (3, ctx->keys[k+3], x0,x1,x2,x3, y0,y1,y2,y3);
	  ROUND64_INVERSE (2, ctx->keys[k+2], y0,y1,y2,y3, x0,x1,x2,x3);
	  ROUND64_INVERSE (1, ctx->keys[k+1], x0,x1,x2,x3, y0,y1,y2,y3);
	  ROUND64_INVERSE (0, ctx->keys[k], y0,y1,y2,y3, x0,x1,x2,x3);
	}
    
      LE_WRITE_UINT32 (dst + 16, x0);
      LE_WRITE_UINT32 (dst + 20, x1);
      LE_WRITE_UINT32 (dst + 24, x2);
      LE_WRITE_UINT32 (dst + 28, x3);
      x0 >>= 32; LE_WRITE_UINT32 (dst, x0);
      x1 >>= 32; LE_WRITE_UINT32 (dst + 4, x1);
      x2 >>= 32; LE_WRITE_UINT32 (dst + 8, x2);
      x3 >>= 32; LE_WRITE_UINT32 (dst + 12, x3);
    }
#endif /* HAVE_NATIVE_64_BIT */  
}