diff --git a/sha256.c b/sha256.c index 539285cabbd89be8edf55d38678c6e9aba270aa9..c0500a286174ae0c6dc51b03769c5a1db6685824 100644 --- a/sha256.c +++ b/sha256.c @@ -62,7 +62,7 @@ K[64] = 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL, }; -#define COMPRESS(digest, data) (_nettle_sha256_compress((digest), (data), K)) +#define COMPRESS(ctx, data) (_nettle_sha256_compress((ctx)->state, (data), K)) /* Initialize the SHA values */ @@ -89,7 +89,7 @@ void sha256_update(struct sha256_ctx *ctx, unsigned length, const uint8_t *data) { - MD_UPDATE (ctx, length, data, COMPRESS); + MD_UPDATE (ctx, length, data, COMPRESS, MD_INCR(ctx)); } static void @@ -97,9 +97,23 @@ sha256_write_digest(struct sha256_ctx *ctx, unsigned length, uint8_t *digest) { + uint32_t high, low; + assert(length <= SHA256_DIGEST_SIZE); - MD_FINAL(ctx, 32, 9, COMPRESS, WRITE_UINT32); + MD_PAD(ctx, 8, COMPRESS); + + /* There are 512 = 2^9 bits in one block */ + high = (ctx->count_high << 9) | (ctx->count_low >> 23); + low = (ctx->count_low << 9) | (ctx->index << 3); + + /* This is slightly inefficient, as the numbers are converted to + big-endian format, and will be converted back by the compression + function. It's probably not worth the effort to fix this. */ + WRITE_UINT32(ctx->block + (SHA256_DATA_SIZE - 8), high); + WRITE_UINT32(ctx->block + (SHA256_DATA_SIZE - 4), low); + COMPRESS(ctx, ctx->block); + _nettle_write_be32(length, digest, ctx->state); } diff --git a/sha512.c b/sha512.c index 195fb670015409cd4472730df2147c3b67a3c4df..eb7a29e4ab21d3d895b1de8859cdb6ecabe77f5c 100644 --- a/sha512.c +++ b/sha512.c @@ -104,7 +104,7 @@ K[80] = 0x5FCB6FAB3AD6FAECULL,0x6C44198C4A475817ULL, }; -#define COMPRESS(digest, data) (_nettle_sha512_compress((digest), (data), K)) +#define COMPRESS(ctx, data) (_nettle_sha512_compress((ctx)->state, (data), K)) void sha512_init(struct sha512_ctx *ctx) @@ -139,7 +139,7 @@ void sha512_update(struct sha512_ctx *ctx, unsigned length, const uint8_t *data) { - MD_UPDATE (ctx, length, data, COMPRESS); + MD_UPDATE (ctx, length, data, COMPRESS, MD_INCR(ctx)); } static void @@ -147,15 +147,27 @@ sha512_write_digest(struct sha512_ctx *ctx, unsigned length, uint8_t *digest) { + uint64_t high, low; + unsigned i; unsigned words; unsigned leftover; assert(length <= SHA512_DIGEST_SIZE); - - /* There are 1024 = 2^10 bits in one block */ - MD_FINAL(ctx, 64, 10, COMPRESS, WRITE_UINT64); - + + MD_PAD(ctx, 16, COMPRESS); + + /* There are 1024 = 2^10 bits in one block */ + high = (ctx->count_high << 10) | (ctx->count_low >> 54); + low = (ctx->count_low << 10) | (ctx->index << 3); + + /* This is slightly inefficient, as the numbers are converted to + big-endian format, and will be converted back by the compression + function. It's probably not worth the effort to fix this. */ + WRITE_UINT64(ctx->block + (SHA512_DATA_SIZE - 16), high); + WRITE_UINT64(ctx->block + (SHA512_DATA_SIZE - 8), low); + COMPRESS(ctx, ctx->block); + words = length / 8; leftover = length % 8;