Commit 8a56233b authored by Niels Möller's avatar Niels Möller

Use ROTL32 in the serpent code.

parent e4a28f55
...@@ -2,9 +2,11 @@ ...@@ -2,9 +2,11 @@
* macros.h (ROTL32): New macro, to replace (almost) all other * macros.h (ROTL32): New macro, to replace (almost) all other
rotation macros. rotation macros.
* aes-set-encrypt-key.c: Include macros.h. * aes-set-encrypt-key.c: Include macros.h.
(aes_set_encrypt_key): Use ROTL32. (aes_set_encrypt_key): Use ROTL32.
* aes-internal.h (ROTBYTE, ROTRBYTE): Deleted macros. * aes-internal.h (ROTBYTE, ROTRBYTE): Deleted macros.
* camellia-internal.h (ROL32): Deleted macro. * camellia-internal.h (ROL32): Deleted macro.
(ROTL128): Renamed for consistency, from... (ROTL128): Renamed for consistency, from...
(ROL128): ... old name. (ROL128): ... old name.
...@@ -13,9 +15,19 @@ ...@@ -13,9 +15,19 @@
* cast128.c (ROL): Deleted macro. * cast128.c (ROL): Deleted macro.
(F1, F2, F3): Updated to use ROTL32 (reversed order of arguments). (F1, F2, F3): Updated to use ROTL32 (reversed order of arguments).
Also added proper do { ... } while (0) wrappers. Also added proper do { ... } while (0) wrappers.
* ripemd160-compress.c (ROL32): Deleted macro. * ripemd160-compress.c (ROL32): Deleted macro.
(R): Updated to use ROTL32 (reversed order of arguments). (R): Updated to use ROTL32 (reversed order of arguments).
* serpent-internal.h (ROL32): Deleted macro.
(ROTL64): Renamed (from ROL64) and reorderd arguments, for
consistency.
(RSHIFT64): Reordered arguments, for consistency.
* serpent-decrypt.c: Updated for renamed rotation macros, with
reversed argument order.
* serpent-encrypt.c: Likewise.
* serpent-set-key.c: Likewise.
2012-03-30 Niels Möller <nisse@lysator.liu.se> 2012-03-30 Niels Möller <nisse@lysator.liu.se>
* nettle-internal.c (nettle_salsa20): Cipher struct for * nettle-internal.c (nettle_salsa20): Cipher struct for
......
...@@ -412,16 +412,16 @@ ...@@ -412,16 +412,16 @@
/* In-place inverse linear transformation. */ /* In-place inverse linear transformation. */
#define LINEAR_TRANSFORMATION_INVERSE(x0,x1,x2,x3) \ #define LINEAR_TRANSFORMATION_INVERSE(x0,x1,x2,x3) \
do { \ do { \
x2 = ROL32 (x2, 10); \ x2 = ROTL32 (10, x2); \
x0 = ROL32 (x0, 27); \ x0 = ROTL32 (27, x0); \
x2 = x2 ^ x3 ^ (x1 << 7); \ x2 = x2 ^ x3 ^ (x1 << 7); \
x0 = x0 ^ x1 ^ x3; \ x0 = x0 ^ x1 ^ x3; \
x3 = ROL32 (x3, 25); \ x3 = ROTL32 (25, x3); \
x1 = ROL32 (x1, 31); \ x1 = ROTL32 (31, x1); \
x3 = x3 ^ x2 ^ (x0 << 3); \ x3 = x3 ^ x2 ^ (x0 << 3); \
x1 = x1 ^ x0 ^ x2; \ x1 = x1 ^ x0 ^ x2; \
x2 = ROL32 (x2, 29); \ x2 = ROTL32 (29, x2); \
x0 = ROL32 (x0, 19); \ x0 = ROTL32 (19, x0); \
} while (0) } while (0)
/* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are /* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are
...@@ -438,16 +438,16 @@ ...@@ -438,16 +438,16 @@
/* In-place inverse linear transformation. */ /* In-place inverse linear transformation. */
#define LINEAR_TRANSFORMATION64_INVERSE(x0,x1,x2,x3) \ #define LINEAR_TRANSFORMATION64_INVERSE(x0,x1,x2,x3) \
do { \ do { \
x2 = ROL64 (x2, 10); \ x2 = ROTL64 (10, x2); \
x0 = ROL64 (x0, 27); \ x0 = ROTL64 (27, x0); \
x2 = x2 ^ x3 ^ RSHIFT64(x1, 7); \ x2 = x2 ^ x3 ^ RSHIFT64(7, x1); \
x0 = x0 ^ x1 ^ x3; \ x0 = x0 ^ x1 ^ x3; \
x3 = ROL64 (x3, 25); \ x3 = ROTL64 (25, x3); \
x1 = ROL64 (x1, 31); \ x1 = ROTL64 (31, x1); \
x3 = x3 ^ x2 ^ RSHIFT64(x0, 3); \ x3 = x3 ^ x2 ^ RSHIFT64(3, x0); \
x1 = x1 ^ x0 ^ x2; \ x1 = x1 ^ x0 ^ x2; \
x2 = ROL64 (x2, 29); \ x2 = ROTL64 (29, x2); \
x0 = ROL64 (x0, 19); \ x0 = ROTL64 (19, x0); \
} while (0) } while (0)
#define ROUND64_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \ #define ROUND64_INVERSE(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
......
...@@ -386,16 +386,16 @@ ...@@ -386,16 +386,16 @@
/* In-place linear transformation. */ /* In-place linear transformation. */
#define LINEAR_TRANSFORMATION(x0,x1,x2,x3) \ #define LINEAR_TRANSFORMATION(x0,x1,x2,x3) \
do { \ do { \
x0 = ROL32 (x0, 13); \ x0 = ROTL32 (13, x0); \
x2 = ROL32 (x2, 3); \ x2 = ROTL32 (3, x2); \
x1 = x1 ^ x0 ^ x2; \ x1 = x1 ^ x0 ^ x2; \
x3 = x3 ^ x2 ^ (x0 << 3); \ x3 = x3 ^ x2 ^ (x0 << 3); \
x1 = ROL32 (x1, 1); \ x1 = ROTL32 (1, x1); \
x3 = ROL32 (x3, 7); \ x3 = ROTL32 (7, x3); \
x0 = x0 ^ x1 ^ x3; \ x0 = x0 ^ x1 ^ x3; \
x2 = x2 ^ x3 ^ (x1 << 7); \ x2 = x2 ^ x3 ^ (x1 << 7); \
x0 = ROL32 (x0, 5); \ x0 = ROTL32 (5, x0); \
x2 = ROL32 (x2, 22); \ x2 = ROTL32 (22, x2); \
} while (0) } while (0)
/* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are /* Round inputs are x0,x1,x2,x3 (destroyed), and round outputs are
...@@ -411,16 +411,16 @@ ...@@ -411,16 +411,16 @@
#define LINEAR_TRANSFORMATION64(x0,x1,x2,x3) \ #define LINEAR_TRANSFORMATION64(x0,x1,x2,x3) \
do { \ do { \
x0 = ROL64 (x0, 13); \ x0 = ROTL64 (13, x0); \
x2 = ROL64 (x2, 3); \ x2 = ROTL64 (3, x2); \
x1 = x1 ^ x0 ^ x2; \ x1 = x1 ^ x0 ^ x2; \
x3 = x3 ^ x2 ^ RSHIFT64(x0, 3); \ x3 = x3 ^ x2 ^ RSHIFT64(3, x0); \
x1 = ROL64 (x1, 1); \ x1 = ROTL64 (1, x1); \
x3 = ROL64 (x3, 7); \ x3 = ROTL64 (7, x3); \
x0 = x0 ^ x1 ^ x3; \ x0 = x0 ^ x1 ^ x3; \
x2 = x2 ^ x3 ^ RSHIFT64(x1, 7); \ x2 = x2 ^ x3 ^ RSHIFT64(7, x1); \
x0 = ROL64 (x0, 5); \ x0 = ROTL64 (5, x0); \
x2 = ROL64 (x2, 22); \ x2 = ROTL64 (22, x2); \
} while (0) } while (0)
#define ROUND64(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \ #define ROUND64(which, subkey, x0,x1,x2,x3, y0,y1,y2,y3) \
......
...@@ -41,9 +41,6 @@ ...@@ -41,9 +41,6 @@
#ifndef NETTLE_SERPENT_INTERNAL_H_INCLUDED #ifndef NETTLE_SERPENT_INTERNAL_H_INCLUDED
#define NETTLE_SERPENT_INTERNAL_H_INCLUDED #define NETTLE_SERPENT_INTERNAL_H_INCLUDED
/* FIXME: Unify ROL macros used here, in camellia.c and cast128.c. */
#define ROL32(x,n) ((((x))<<(n)) | (((x))>>(32-(n))))
#define KEYXOR(x0,x1,x2,x3, subkey) \ #define KEYXOR(x0,x1,x2,x3, subkey) \
do { \ do { \
(x0) ^= (subkey)[0]; \ (x0) ^= (subkey)[0]; \
...@@ -54,7 +51,7 @@ ...@@ -54,7 +51,7 @@
#if HAVE_NATIVE_64_BIT #if HAVE_NATIVE_64_BIT
/* Operate independently on both halves of a 64-bit word. */ /* Operate independently on both halves of a 64-bit word. */
#define ROL64(x,n) \ #define ROTL64(n,x) \
(((x) << (n) & ~((((uint64_t) 1 << (n))-1) << 32)) \ (((x) << (n) & ~((((uint64_t) 1 << (n))-1) << 32)) \
|(((x) >> (32-(n))) & ~((((uint64_t) 1 << (32-(n)))-1) << (n)))) |(((x) >> (32-(n))) & ~((((uint64_t) 1 << (32-(n)))-1) << (n))))
...@@ -67,7 +64,7 @@ ...@@ -67,7 +64,7 @@
_sk = (subkey)[3]; _sk |= _sk << 32; (x3) ^= _sk; \ _sk = (subkey)[3]; _sk |= _sk << 32; (x3) ^= _sk; \
} while (0) } while (0)
#define RSHIFT64(x,n) \ #define RSHIFT64(n,x) \
( ((x) << (n)) & ~((((uint64_t) 1 << (n)) - 1) << 32)) ( ((x) << (n)) & ~((((uint64_t) 1 << (n)) - 1) << 32))
#endif /* HAVE_NATIVE_64_BIT */ #endif /* HAVE_NATIVE_64_BIT */
......
...@@ -270,7 +270,7 @@ ...@@ -270,7 +270,7 @@
do { \ do { \
uint32_t _wn = (w)[(i)] ^ (w)[((i)+3)&7] ^ w[((i)+5)&7] \ uint32_t _wn = (w)[(i)] ^ (w)[((i)+3)&7] ^ w[((i)+5)&7] \
^ w[((i)+7)&7] ^ PHI ^ (k)++; \ ^ w[((i)+7)&7] ^ PHI ^ (k)++; \
((w)[(i)] = ROL32(_wn, 11)); \ ((w)[(i)] = ROTL32(11, _wn)); \
} while (0) } while (0)
/* Note: Increments k four times and keys once */ /* Note: Increments k four times and keys once */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment