diff --git a/ChangeLog b/ChangeLog
index a1391ae6816e6a02358f171e6d0ec4656caa14c6..38d313e9281ab505821d09e23a966f6c10d2591d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,7 @@
 2013-04-15  Niels Möller  <nisse@lysator.liu.se>
 
+	* x86_64/umac-nh.asm: New file. 4.4 time speedup.
+
 	* armv7/umac-nh-n.asm: New file. 2.0-2.3 time speedup.
 
 	* testsuite/umac-test.c (test_align): Fixed memory leak.
diff --git a/x86_64/umac-nh.asm b/x86_64/umac-nh.asm
new file mode 100644
index 0000000000000000000000000000000000000000..f9230cd8d68fa497e5f5ada8ca915a245fca19f9
--- /dev/null
+++ b/x86_64/umac-nh.asm
@@ -0,0 +1,68 @@
+C nettle, low-level cryptographics library
+C 
+C Copyright (C) 2013 Niels Möller
+C  
+C The nettle library is free software; you can redistribute it and/or modify
+C it under the terms of the GNU Lesser General Public License as published by
+C the Free Software Foundation; either version 2.1 of the License, or (at your
+C option) any later version.
+C 
+C The nettle library is distributed in the hope that it will be useful, but
+C WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+C or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+C License for more details.
+C 
+C You should have received a copy of the GNU Lesser General Public License
+C along with the nettle library; see the file COPYING.LIB.  If not, write to
+C the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+C MA 02111-1301, USA.
+
+define(<KEY>, <%rdi>)
+define(<LENGTH>, <%rsi>)
+define(<MSG>, <%rdx>)
+
+define(<XA>, <%xmm0>)
+define(<XB>, <%xmm1>)
+define(<XK0>, <%xmm2>)
+define(<XK1>, <%xmm3>)
+define(<XY>, <%xmm4>)
+define(<XT0>, <%xmm5>)
+define(<XT1>, <%xmm6>)
+
+C FIXME: Would be nice if we could force the key array to be 16-byte
+C aligned.
+
+	.file "umac-nh.asm"
+	
+	C umac_nh(const uint32_t *key, unsigned length, const uint8_t *msg)
+	.text
+	ALIGN(4)
+PROLOGUE(_nettle_umac_nh)
+	W64_ENTRY(3, 7)
+	pxor	XY, XY
+.Loop:
+	movups	(KEY), XK0
+	movups	16(KEY), XK1
+	movups	(MSG), XA
+	movups	16(MSG), XB
+	paddd	XK0, XA
+	paddd	XK1, XB
+	pshufd	$0x31, XA, XT0
+	pshufd	$0x31, XB, XT1
+	pmuludq	XT0, XT1
+	paddq	XT1, XY	
+	pmuludq	XA, XB
+	paddq	XB, XY
+	C Length is only 32 bits
+	subl	$32, XREG(LENGTH)
+	lea	32(KEY), KEY
+	lea	32(MSG), MSG
+	ja	.Loop
+
+	pshufd	$0xe, XY, XT0
+	paddq	XT0, XY
+	C Really a movq, but write as movd to please Apple's assembler
+	movd	XY, %rax
+	W64_EXIT(3, 7)
+	ret
+EPILOGUE(_nettle_umac_nh)