Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
N
nettle
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Deploy
Releases
Container registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Dmitry Baryshkov
nettle
Commits
0325eaf3
Commit
0325eaf3
authored
7 years ago
by
Niels Möller
Browse files
Options
Downloads
Patches
Plain Diff
CTR mode optimizations for 16-byte block size.
parent
e09ed92e
Branches
ctr-opt
Branches containing commit
Tags
Tags containing commit
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
ChangeLog
+7
-0
7 additions, 0 deletions
ChangeLog
ctr.c
+112
-20
112 additions, 20 deletions
ctr.c
with
119 additions
and
20 deletions
ChangeLog
+
7
−
0
View file @
0325eaf3
...
@@ -9,6 +9,13 @@
...
@@ -9,6 +9,13 @@
2018-01-08 Niels Möller <nisse@lysator.liu.se>
2018-01-08 Niels Möller <nisse@lysator.liu.se>
* ctr.c (ctr_crypt16): New function, with optimizations specific
to 16-byte block size.
(ctr_fill16): New helper function, definition depending on
WORDS_BIGENDIAN, and little endian version requiring
HAVE_BUILTIN_BSWAP64.
(ctr_crypt): Use ctr_crypt16, when appropriate.
* nettle-types.h (union nettle_block16): Add uint64_t field.
* nettle-types.h (union nettle_block16): Add uint64_t field.
* configure.ac: Check for __builtin_bswap64, define
* configure.ac: Check for __builtin_bswap64, define
...
...
This diff is collapsed.
Click to expand it.
ctr.c
+
112
−
20
View file @
0325eaf3
...
@@ -62,21 +62,114 @@ ctr_fill (size_t block_size, uint8_t *ctr, size_t length, uint8_t *buffer)
...
@@ -62,21 +62,114 @@ ctr_fill (size_t block_size, uint8_t *ctr, size_t length, uint8_t *buffer)
return
i
;
return
i
;
}
}
#if WORDS_BIGENDIAN
# define USE_CTR_CRYPT16 1
static
void
ctr_fill16
(
uint8_t
*
ctr
,
size_t
blocks
,
uint64_t
*
buffer
)
{
uint64_t
hi
,
lo
;
hi
=
READ_UINT64
(
ctr
);
lo
=
READ_UINT64
(
ctr
+
8
);
while
(
blocks
--
>
0
)
{
*
buffer
++
=
hi
;
*
buffer
++
=
lo
;
hi
+=
!
(
++
lo
);
}
WRITE_UINT64
(
ctr
,
hi
);
WRITE_UINT64
(
ctr
+
8
,
lo
);
}
#else
/* !WORDS_BIGENDIAN */
# if HAVE_BUILTIN_BSWAP64
# define USE_CTR_CRYPT16 1
static
void
ctr_fill16
(
uint8_t
*
ctr
,
size_t
blocks
,
uint64_t
*
buffer
)
{
uint64_t
hi
,
lo
;
/* Read hi in native endianness */
hi
=
LE_READ_UINT64
(
ctr
);
lo
=
READ_UINT64
(
ctr
+
8
);
while
(
blocks
--
>
0
)
{
*
buffer
++
=
hi
;
*
buffer
++
=
__builtin_bswap64
(
lo
);
if
(
!++
lo
)
hi
=
__builtin_bswap64
(
__builtin_bswap64
(
hi
)
+
1
);
}
LE_WRITE_UINT64
(
ctr
,
hi
);
WRITE_UINT64
(
ctr
+
8
,
lo
);
}
# else
/* ! HAVE_BUILTIN_BSWAP64 */
# define USE_CTR_CRYPT16 0
# endif
#endif
/* !WORDS_BIGENDIAN */
#if USE_CTR_CRYPT16
static
size_t
ctr_crypt16
(
const
void
*
ctx
,
nettle_cipher_func
*
f
,
uint8_t
*
ctr
,
size_t
length
,
uint8_t
*
dst
,
const
uint8_t
*
src
)
{
if
(
dst
!=
src
&&
!
((
uintptr_t
)
dst
%
sizeof
(
uint64_t
)))
{
size_t
blocks
=
length
/
16u
;
ctr_fill16
(
ctr
,
blocks
,
(
uint64_t
*
)
dst
);
f
(
ctx
,
blocks
*
16
,
dst
,
dst
);
memxor
(
dst
,
src
,
blocks
*
16
);
return
blocks
*
16
;
}
else
{
/* Construct an aligned buffer of consecutive counter values, of
size at most CTR_BUFFER_LIMIT. */
TMP_DECL
(
buffer
,
union
nettle_block16
,
CTR_BUFFER_LIMIT
/
16
);
size_t
blocks
=
(
length
+
15
)
/
16u
;
size_t
i
;
TMP_ALLOC
(
buffer
,
MIN
(
blocks
,
CTR_BUFFER_LIMIT
/
16
));
for
(
i
=
0
;
blocks
>=
CTR_BUFFER_LIMIT
/
16
;
i
+=
CTR_BUFFER_LIMIT
,
blocks
-=
CTR_BUFFER_LIMIT
/
16
)
{
ctr_fill16
(
ctr
,
CTR_BUFFER_LIMIT
/
16
,
buffer
->
u64
);
f
(
ctx
,
CTR_BUFFER_LIMIT
,
buffer
->
b
,
buffer
->
b
);
if
(
length
-
i
<
CTR_BUFFER_LIMIT
)
goto
done
;
memxor3
(
dst
,
src
,
buffer
->
b
,
CTR_BUFFER_LIMIT
);
}
if
(
blocks
>
0
)
{
assert
(
length
-
i
<
CTR_BUFFER_LIMIT
);
ctr_fill16
(
ctr
,
blocks
,
buffer
->
u64
);
f
(
ctx
,
blocks
*
16
,
buffer
->
b
,
buffer
->
b
);
done:
memxor3
(
dst
+
i
,
src
+
i
,
buffer
->
b
,
length
-
i
);
}
return
length
;
}
}
#endif
/* USE_CTR_CRYPT16 */
void
void
ctr_crypt
(
const
void
*
ctx
,
nettle_cipher_func
*
f
,
ctr_crypt
(
const
void
*
ctx
,
nettle_cipher_func
*
f
,
size_t
block_size
,
uint8_t
*
ctr
,
size_t
block_size
,
uint8_t
*
ctr
,
size_t
length
,
uint8_t
*
dst
,
size_t
length
,
uint8_t
*
dst
,
const
uint8_t
*
src
)
const
uint8_t
*
src
)
{
{
if
(
src
!=
dst
)
#if USE_CTR_CRYPT16
{
if
(
block_size
==
16
)
if
(
length
==
block_size
)
{
{
f
(
ctx
,
block_size
,
dst
,
ctr
);
size_t
done
=
ctr_crypt16
(
ctx
,
f
,
ctr
,
length
,
dst
,
src
);
INCREMENT
(
block_size
,
ctr
);
length
-=
done
;
memxor
(
dst
,
src
,
block_size
);
src
+=
done
;
dst
+=
done
;
}
}
else
#endif
if
(
src
!=
dst
)
{
{
size_t
filled
=
ctr_fill
(
block_size
,
ctr
,
length
,
dst
);
size_t
filled
=
ctr_fill
(
block_size
,
ctr
,
length
,
dst
);
...
@@ -85,13 +178,12 @@ ctr_crypt(const void *ctx, nettle_cipher_func *f,
...
@@ -85,13 +178,12 @@ ctr_crypt(const void *ctx, nettle_cipher_func *f,
if
(
filled
<
length
)
if
(
filled
<
length
)
{
{
TMP_DECL
(
b
uffer
,
uint8_t
,
NETTLE_MAX_CIPHER_BLOCK_SIZE
);
TMP_DECL
(
b
lock
,
uint8_t
,
NETTLE_MAX_CIPHER_BLOCK_SIZE
);
TMP_ALLOC
(
b
uffer
,
block_size
);
TMP_ALLOC
(
b
lock
,
block_size
);
f
(
ctx
,
block_size
,
b
uffer
,
ctr
);
f
(
ctx
,
block_size
,
b
lock
,
ctr
);
INCREMENT
(
block_size
,
ctr
);
INCREMENT
(
block_size
,
ctr
);
memxor3
(
dst
+
filled
,
src
+
filled
,
buffer
,
length
-
filled
);
memxor3
(
dst
+
filled
,
src
+
filled
,
block
,
length
-
filled
);
}
}
}
}
}
else
else
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment