blob: c8335014a0441e7637fdbeeee85d794b4ab72fa9 [file] [log] [blame]
Alexander Boyko78c37d12013-01-10 18:54:59 +04001/* GPL HEADER START
2 *
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License version 2 for more details (a copy is included
13 * in the LICENSE file that accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License
16 * version 2 along with this program; If not, see http://www.gnu.org/licenses
17 *
18 * Please visit http://www.xyratex.com/contact if you need additional
19 * information or have any questions.
20 *
21 * GPL HEADER END
22 */
23
24/*
25 * Copyright 2012 Xyratex Technology Limited
26 *
27 * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
28 * calculation.
29 * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
30 * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
31 * at:
32 * http://www.intel.com/products/processor/manuals/
33 * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
34 * Volume 2B: Instruction Set Reference, N-Z
35 *
36 * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
37 * Alexander Boyko <Alexander_Boyko@xyratex.com>
38 */
39
40#include <linux/linkage.h>
41#include <asm/inst.h>
42
43
44.align 16
45/*
46 * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
47 * #define CONSTANT_R1 0x154442bd4LL
48 *
49 * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
50 * #define CONSTANT_R2 0x1c6e41596LL
51 */
52.Lconstant_R2R1:
53 .octa 0x00000001c6e415960000000154442bd4
54/*
55 * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
56 * #define CONSTANT_R3 0x1751997d0LL
57 *
58 * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
59 * #define CONSTANT_R4 0x0ccaa009eLL
60 */
61.Lconstant_R4R3:
62 .octa 0x00000000ccaa009e00000001751997d0
63/*
64 * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
65 * #define CONSTANT_R5 0x163cd6124LL
66 */
67.Lconstant_R5:
68 .octa 0x00000000000000000000000163cd6124
69.Lconstant_mask32:
70 .octa 0x000000000000000000000000FFFFFFFF
71/*
72 * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
73 *
74 * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))` = 0x1F7011641LL
75 * #define CONSTANT_RU 0x1F7011641LL
76 */
77.Lconstant_RUpoly:
78 .octa 0x00000001F701164100000001DB710641
79
80#define CONSTANT %xmm0
81
82#ifdef __x86_64__
83#define BUF %rdi
84#define LEN %rsi
85#define CRC %edx
86#else
Alexander Boyko78c37d12013-01-10 18:54:59 +040087#define BUF %eax
88#define LEN %edx
89#define CRC %ecx
90#endif
91
92
93
94.text
95/**
96 * Calculate crc32
97 * BUF - buffer (16 bytes aligned)
98 * LEN - sizeof buffer (16 bytes aligned), LEN should be grater than 63
99 * CRC - initial crc32
100 * return %eax crc32
101 * uint crc32_pclmul_le_16(unsigned char const *buffer,
102 * size_t len, uint crc32)
103 */
104.globl crc32_pclmul_le_16
105.align 4, 0x90
106crc32_pclmul_le_16:/* buffer and buffer size are 16 bytes aligned */
107 movdqa (BUF), %xmm1
108 movdqa 0x10(BUF), %xmm2
109 movdqa 0x20(BUF), %xmm3
110 movdqa 0x30(BUF), %xmm4
111 movd CRC, CONSTANT
112 pxor CONSTANT, %xmm1
113 sub $0x40, LEN
114 add $0x40, BUF
115#ifndef __x86_64__
116 /* This is for position independent code(-fPIC) support for 32bit */
117 call delta
118delta:
119 pop %ecx
120#endif
121 cmp $0x40, LEN
122 jb less_64
123
124#ifdef __x86_64__
125 movdqa .Lconstant_R2R1(%rip), CONSTANT
126#else
127 movdqa .Lconstant_R2R1 - delta(%ecx), CONSTANT
128#endif
129
130loop_64:/* 64 bytes Full cache line folding */
131 prefetchnta 0x40(BUF)
132 movdqa %xmm1, %xmm5
133 movdqa %xmm2, %xmm6
134 movdqa %xmm3, %xmm7
135#ifdef __x86_64__
136 movdqa %xmm4, %xmm8
137#endif
138 PCLMULQDQ 00, CONSTANT, %xmm1
139 PCLMULQDQ 00, CONSTANT, %xmm2
140 PCLMULQDQ 00, CONSTANT, %xmm3
141#ifdef __x86_64__
142 PCLMULQDQ 00, CONSTANT, %xmm4
143#endif
144 PCLMULQDQ 0x11, CONSTANT, %xmm5
145 PCLMULQDQ 0x11, CONSTANT, %xmm6
146 PCLMULQDQ 0x11, CONSTANT, %xmm7
147#ifdef __x86_64__
148 PCLMULQDQ 0x11, CONSTANT, %xmm8
149#endif
150 pxor %xmm5, %xmm1
151 pxor %xmm6, %xmm2
152 pxor %xmm7, %xmm3
153#ifdef __x86_64__
154 pxor %xmm8, %xmm4
155#else
156 /* xmm8 unsupported for x32 */
157 movdqa %xmm4, %xmm5
158 PCLMULQDQ 00, CONSTANT, %xmm4
159 PCLMULQDQ 0x11, CONSTANT, %xmm5
160 pxor %xmm5, %xmm4
161#endif
162
163 pxor (BUF), %xmm1
164 pxor 0x10(BUF), %xmm2
165 pxor 0x20(BUF), %xmm3
166 pxor 0x30(BUF), %xmm4
167
168 sub $0x40, LEN
169 add $0x40, BUF
170 cmp $0x40, LEN
171 jge loop_64
172less_64:/* Folding cache line into 128bit */
173#ifdef __x86_64__
174 movdqa .Lconstant_R4R3(%rip), CONSTANT
175#else
176 movdqa .Lconstant_R4R3 - delta(%ecx), CONSTANT
177#endif
178 prefetchnta (BUF)
179
180 movdqa %xmm1, %xmm5
181 PCLMULQDQ 0x00, CONSTANT, %xmm1
182 PCLMULQDQ 0x11, CONSTANT, %xmm5
183 pxor %xmm5, %xmm1
184 pxor %xmm2, %xmm1
185
186 movdqa %xmm1, %xmm5
187 PCLMULQDQ 0x00, CONSTANT, %xmm1
188 PCLMULQDQ 0x11, CONSTANT, %xmm5
189 pxor %xmm5, %xmm1
190 pxor %xmm3, %xmm1
191
192 movdqa %xmm1, %xmm5
193 PCLMULQDQ 0x00, CONSTANT, %xmm1
194 PCLMULQDQ 0x11, CONSTANT, %xmm5
195 pxor %xmm5, %xmm1
196 pxor %xmm4, %xmm1
197
198 cmp $0x10, LEN
199 jb fold_64
200loop_16:/* Folding rest buffer into 128bit */
201 movdqa %xmm1, %xmm5
202 PCLMULQDQ 0x00, CONSTANT, %xmm1
203 PCLMULQDQ 0x11, CONSTANT, %xmm5
204 pxor %xmm5, %xmm1
205 pxor (BUF), %xmm1
206 sub $0x10, LEN
207 add $0x10, BUF
208 cmp $0x10, LEN
209 jge loop_16
210
211fold_64:
212 /* perform the last 64 bit fold, also adds 32 zeroes
213 * to the input stream */
214 PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */
215 psrldq $0x08, %xmm1
216 pxor CONSTANT, %xmm1
217
218 /* final 32-bit fold */
219 movdqa %xmm1, %xmm2
220#ifdef __x86_64__
221 movdqa .Lconstant_R5(%rip), CONSTANT
222 movdqa .Lconstant_mask32(%rip), %xmm3
223#else
224 movdqa .Lconstant_R5 - delta(%ecx), CONSTANT
225 movdqa .Lconstant_mask32 - delta(%ecx), %xmm3
226#endif
227 psrldq $0x04, %xmm2
228 pand %xmm3, %xmm1
229 PCLMULQDQ 0x00, CONSTANT, %xmm1
230 pxor %xmm2, %xmm1
231
232 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
233#ifdef __x86_64__
234 movdqa .Lconstant_RUpoly(%rip), CONSTANT
235#else
236 movdqa .Lconstant_RUpoly - delta(%ecx), CONSTANT
237#endif
238 movdqa %xmm1, %xmm2
239 pand %xmm3, %xmm1
240 PCLMULQDQ 0x10, CONSTANT, %xmm1
241 pand %xmm3, %xmm1
242 PCLMULQDQ 0x00, CONSTANT, %xmm1
243 pxor %xmm2, %xmm1
244 pextrd $0x01, %xmm1, %eax
245
246 ret