summaryrefslogtreecommitdiff
path: root/arch/x86/crypto/ghash-clmulni-intel_asm.S
blob: 185fad49d86f0c114411f4f4bc879bdbd9102874 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
/*
 * Accelerated GHASH implementation with Intel PCLMULQDQ-NI
 * instructions. This file contains accelerated part of ghash
 * implementation. More information about PCLMULQDQ can be found at:
 *
 * http://software.intel.com/en-us/articles/carry-less-multiplication-and-its-usage-for-computing-the-gcm-mode/
 *
 * Copyright (c) 2009 Intel Corp.
 *   Author: Huang Ying <ying.huang@intel.com>
 *	     Vinodh Gopal
 *	     Erdinc Ozturk
 *	     Deniz Karakoyunlu
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 */

#include <linux/linkage.h>
#include <asm/inst.h>

.data

.align 16
.Lbswap_mask:
	.octa 0x000102030405060708090a0b0c0d0e0f

#define DATA	%xmm0
#define SHASH	%xmm1
#define T1	%xmm2
#define T2	%xmm3
#define T3	%xmm4
#define BSWAP	%xmm5
#define IN1	%xmm6

.text

/*
 * __clmul_gf128mul_ble:	internal ABI
 * input:
 *	DATA:			operand1
 *	SHASH:			operand2, hash_key << 1 mod poly
 * output:
 *	DATA:			operand1 * operand2 mod poly
 * changed:
 *	T1
 *	T2
 *	T3
 */
__clmul_gf128mul_ble:
	movaps DATA, T1
	pshufd $0b01001110, DATA, T2
	pshufd $0b01001110, SHASH, T3
	pxor DATA, T2
	pxor SHASH, T3

	PCLMULQDQ 0x00 SHASH DATA	# DATA = a0 * b0
	PCLMULQDQ 0x11 SHASH T1		# T1 = a1 * b1
	PCLMULQDQ 0x00 T3 T2		# T2 = (a1 + a0) * (b1 + b0)
	pxor DATA, T2
	pxor T1, T2			# T2 = a0 * b1 + a1 * b0

	movaps T2, T3
	pslldq $8, T3
	psrldq $8, T2
	pxor T3, DATA
	pxor T2, T1			# <T1:DATA> is result of
					# carry-less multiplication

	# first phase of the reduction
	movaps DATA, T3
	psllq $1, T3
	pxor DATA, T3
	psllq $5, T3
	pxor DATA, T3
	psllq $57, T3
	movaps T3, T2
	pslldq $8, T2
	psrldq $8, T3
	pxor T2, DATA
	pxor T3, T1

	# second phase of the reduction
	movaps DATA, T2
	psrlq $5, T2
	pxor DATA, T2
	psrlq $1, T2
	pxor DATA, T2
	psrlq $1, T2
	pxor T2, T1
	pxor T1, DATA
	ret
ENDPROC(__clmul_gf128mul_ble)

/* void clmul_ghash_mul(char *dst, const be128 *shash) */
ENTRY(clmul_ghash_mul)
	movups (%rdi), DATA
	movups (%rsi), SHASH
	movaps .Lbswap_mask, BSWAP
	PSHUFB_XMM BSWAP DATA
	call __clmul_gf128mul_ble
	PSHUFB_XMM BSWAP DATA
	movups DATA, (%rdi)
	ret
ENDPROC(clmul_ghash_mul)

/*
 * void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
 *			   const be128 *shash);
 */
ENTRY(clmul_ghash_update)
	cmp $16, %rdx
	jb .Lupdate_just_ret	# check length
	movaps .Lbswap_mask, BSWAP
	movups (%rdi), DATA
	movups (%rcx), SHASH
	PSHUFB_XMM BSWAP DATA
.align 4
.Lupdate_loop:
	movups (%rsi), IN1
	PSHUFB_XMM BSWAP IN1
	pxor IN1, DATA
	call __clmul_gf128mul_ble
	sub $16, %rdx
	add $16, %rsi
	cmp $16, %rdx
	jge .Lupdate_loop
	PSHUFB_XMM BSWAP DATA
	movups DATA, (%rdi)
.Lupdate_just_ret:
	ret
ENDPROC(clmul_ghash_update)