rte_memcpy_32.h revision 3d9b7210
1/*
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2015 RehiveTech. All rights reserved.
5 *
6 *   Redistribution and use in source and binary forms, with or without
7 *   modification, are permitted provided that the following conditions
8 *   are met:
9 *
10 *     * Redistributions of source code must retain the above copyright
11 *       notice, this list of conditions and the following disclaimer.
12 *     * Redistributions in binary form must reproduce the above copyright
13 *       notice, this list of conditions and the following disclaimer in
14 *       the documentation and/or other materials provided with the
15 *       distribution.
16 *     * Neither the name of RehiveTech nor the names of its
17 *       contributors may be used to endorse or promote products derived
18 *       from this software without specific prior written permission.
19 *
20 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#ifndef _RTE_MEMCPY_ARM32_H_
34#define _RTE_MEMCPY_ARM32_H_
35
36#include <stdint.h>
37#include <string.h>
38
39#ifdef __cplusplus
40extern "C" {
41#endif
42
43#include "generic/rte_memcpy.h"
44
45#ifdef RTE_ARCH_ARM_NEON_MEMCPY
46
47#ifndef RTE_MACHINE_CPUFLAG_NEON
48#error "Cannot optimize memcpy by NEON as the CPU seems to not support this"
49#endif
50
51/* ARM NEON Intrinsics are used to copy data */
52#include <arm_neon.h>
53
54static inline void
55rte_mov16(uint8_t *dst, const uint8_t *src)
56{
57	vst1q_u8(dst, vld1q_u8(src));
58}
59
60static inline void
61rte_mov32(uint8_t *dst, const uint8_t *src)
62{
63	asm volatile (
64		"vld1.8 {d0-d3}, [%0]\n\t"
65		"vst1.8 {d0-d3}, [%1]\n\t"
66		: "+r" (src), "+r" (dst)
67		: : "memory", "d0", "d1", "d2", "d3");
68}
69
70static inline void
71rte_mov48(uint8_t *dst, const uint8_t *src)
72{
73	asm volatile (
74		"vld1.8 {d0-d3}, [%0]!\n\t"
75		"vld1.8 {d4-d5}, [%0]\n\t"
76		"vst1.8 {d0-d3}, [%1]!\n\t"
77		"vst1.8 {d4-d5}, [%1]\n\t"
78		: "+r" (src), "+r" (dst)
79		:
80		: "memory", "d0", "d1", "d2", "d3", "d4", "d5");
81}
82
83static inline void
84rte_mov64(uint8_t *dst, const uint8_t *src)
85{
86	asm volatile (
87		"vld1.8 {d0-d3}, [%0]!\n\t"
88		"vld1.8 {d4-d7}, [%0]\n\t"
89		"vst1.8 {d0-d3}, [%1]!\n\t"
90		"vst1.8 {d4-d7}, [%1]\n\t"
91		: "+r" (src), "+r" (dst)
92		:
93		: "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7");
94}
95
96static inline void
97rte_mov128(uint8_t *dst, const uint8_t *src)
98{
99	asm volatile ("pld [%0, #64]" : : "r" (src));
100	asm volatile (
101		"vld1.8 {d0-d3},   [%0]!\n\t"
102		"vld1.8 {d4-d7},   [%0]!\n\t"
103		"vld1.8 {d8-d11},  [%0]!\n\t"
104		"vld1.8 {d12-d15}, [%0]\n\t"
105		"vst1.8 {d0-d3},   [%1]!\n\t"
106		"vst1.8 {d4-d7},   [%1]!\n\t"
107		"vst1.8 {d8-d11},  [%1]!\n\t"
108		"vst1.8 {d12-d15}, [%1]\n\t"
109		: "+r" (src), "+r" (dst)
110		:
111		: "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
112		"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15");
113}
114
115static inline void
116rte_mov256(uint8_t *dst, const uint8_t *src)
117{
118	asm volatile ("pld [%0,  #64]" : : "r" (src));
119	asm volatile ("pld [%0, #128]" : : "r" (src));
120	asm volatile ("pld [%0, #192]" : : "r" (src));
121	asm volatile ("pld [%0, #256]" : : "r" (src));
122	asm volatile ("pld [%0, #320]" : : "r" (src));
123	asm volatile ("pld [%0, #384]" : : "r" (src));
124	asm volatile ("pld [%0, #448]" : : "r" (src));
125	asm volatile (
126		"vld1.8 {d0-d3},   [%0]!\n\t"
127		"vld1.8 {d4-d7},   [%0]!\n\t"
128		"vld1.8 {d8-d11},  [%0]!\n\t"
129		"vld1.8 {d12-d15}, [%0]!\n\t"
130		"vld1.8 {d16-d19}, [%0]!\n\t"
131		"vld1.8 {d20-d23}, [%0]!\n\t"
132		"vld1.8 {d24-d27}, [%0]!\n\t"
133		"vld1.8 {d28-d31}, [%0]\n\t"
134		"vst1.8 {d0-d3},   [%1]!\n\t"
135		"vst1.8 {d4-d7},   [%1]!\n\t"
136		"vst1.8 {d8-d11},  [%1]!\n\t"
137		"vst1.8 {d12-d15}, [%1]!\n\t"
138		"vst1.8 {d16-d19}, [%1]!\n\t"
139		"vst1.8 {d20-d23}, [%1]!\n\t"
140		"vst1.8 {d24-d27}, [%1]!\n\t"
141		"vst1.8 {d28-d31}, [%1]!\n\t"
142		: "+r" (src), "+r" (dst)
143		:
144		: "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
145		"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
146		"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
147		"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31");
148}
149
150#define rte_memcpy(dst, src, n)              \
151	__extension__ ({                     \
152	(__builtin_constant_p(n)) ?          \
153	memcpy((dst), (src), (n)) :          \
154	rte_memcpy_func((dst), (src), (n)); })
155
156static inline void *
157rte_memcpy_func(void *dst, const void *src, size_t n)
158{
159	void *ret = dst;
160
161	/* We can't copy < 16 bytes using XMM registers so do it manually. */
162	if (n < 16) {
163		if (n & 0x01) {
164			*(uint8_t *)dst = *(const uint8_t *)src;
165			dst = (uint8_t *)dst + 1;
166			src = (const uint8_t *)src + 1;
167		}
168		if (n & 0x02) {
169			*(uint16_t *)dst = *(const uint16_t *)src;
170			dst = (uint16_t *)dst + 1;
171			src = (const uint16_t *)src + 1;
172		}
173		if (n & 0x04) {
174			*(uint32_t *)dst = *(const uint32_t *)src;
175			dst = (uint32_t *)dst + 1;
176			src = (const uint32_t *)src + 1;
177		}
178		if (n & 0x08) {
179			/* ARMv7 can not handle unaligned access to long long
180			 * (uint64_t). Therefore two uint32_t operations are
181			 * used.
182			 */
183			*(uint32_t *)dst = *(const uint32_t *)src;
184			dst = (uint32_t *)dst + 1;
185			src = (const uint32_t *)src + 1;
186			*(uint32_t *)dst = *(const uint32_t *)src;
187		}
188		return ret;
189	}
190
191	/* Special fast cases for <= 128 bytes */
192	if (n <= 32) {
193		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
194		rte_mov16((uint8_t *)dst - 16 + n,
195			(const uint8_t *)src - 16 + n);
196		return ret;
197	}
198
199	if (n <= 64) {
200		rte_mov32((uint8_t *)dst, (const uint8_t *)src);
201		rte_mov32((uint8_t *)dst - 32 + n,
202			(const uint8_t *)src - 32 + n);
203		return ret;
204	}
205
206	if (n <= 128) {
207		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
208		rte_mov64((uint8_t *)dst - 64 + n,
209			(const uint8_t *)src - 64 + n);
210		return ret;
211	}
212
213	/*
214	 * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
215	 * copies was found to be faster than doing 128 and 32 byte copies as
216	 * well.
217	 */
218	for ( ; n >= 256; n -= 256) {
219		rte_mov256((uint8_t *)dst, (const uint8_t *)src);
220		dst = (uint8_t *)dst + 256;
221		src = (const uint8_t *)src + 256;
222	}
223
224	/*
225	 * We split the remaining bytes (which will be less than 256) into
226	 * 64byte (2^6) chunks.
227	 * Using incrementing integers in the case labels of a switch statement
228	 * enourages the compiler to use a jump table. To get incrementing
229	 * integers, we shift the 2 relevant bits to the LSB position to first
230	 * get decrementing integers, and then subtract.
231	 */
232	switch (3 - (n >> 6)) {
233	case 0x00:
234		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
235		n -= 64;
236		dst = (uint8_t *)dst + 64;
237		src = (const uint8_t *)src + 64;      /* fallthrough */
238	case 0x01:
239		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
240		n -= 64;
241		dst = (uint8_t *)dst + 64;
242		src = (const uint8_t *)src + 64;      /* fallthrough */
243	case 0x02:
244		rte_mov64((uint8_t *)dst, (const uint8_t *)src);
245		n -= 64;
246		dst = (uint8_t *)dst + 64;
247		src = (const uint8_t *)src + 64;      /* fallthrough */
248	default:
249		break;
250	}
251
252	/*
253	 * We split the remaining bytes (which will be less than 64) into
254	 * 16byte (2^4) chunks, using the same switch structure as above.
255	 */
256	switch (3 - (n >> 4)) {
257	case 0x00:
258		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
259		n -= 16;
260		dst = (uint8_t *)dst + 16;
261		src = (const uint8_t *)src + 16;      /* fallthrough */
262	case 0x01:
263		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
264		n -= 16;
265		dst = (uint8_t *)dst + 16;
266		src = (const uint8_t *)src + 16;      /* fallthrough */
267	case 0x02:
268		rte_mov16((uint8_t *)dst, (const uint8_t *)src);
269		n -= 16;
270		dst = (uint8_t *)dst + 16;
271		src = (const uint8_t *)src + 16;      /* fallthrough */
272	default:
273		break;
274	}
275
276	/* Copy any remaining bytes, without going beyond end of buffers */
277	if (n != 0)
278		rte_mov16((uint8_t *)dst - 16 + n,
279			(const uint8_t *)src - 16 + n);
280	return ret;
281}
282
283#else
284
285static inline void
286rte_mov16(uint8_t *dst, const uint8_t *src)
287{
288	memcpy(dst, src, 16);
289}
290
291static inline void
292rte_mov32(uint8_t *dst, const uint8_t *src)
293{
294	memcpy(dst, src, 32);
295}
296
297static inline void
298rte_mov48(uint8_t *dst, const uint8_t *src)
299{
300	memcpy(dst, src, 48);
301}
302
303static inline void
304rte_mov64(uint8_t *dst, const uint8_t *src)
305{
306	memcpy(dst, src, 64);
307}
308
309static inline void
310rte_mov128(uint8_t *dst, const uint8_t *src)
311{
312	memcpy(dst, src, 128);
313}
314
315static inline void
316rte_mov256(uint8_t *dst, const uint8_t *src)
317{
318	memcpy(dst, src, 256);
319}
320
321static inline void *
322rte_memcpy(void *dst, const void *src, size_t n)
323{
324	return memcpy(dst, src, n);
325}
326
327#endif /* RTE_ARCH_ARM_NEON_MEMCPY */
328
329#ifdef __cplusplus
330}
331#endif
332
333#endif /* _RTE_MEMCPY_ARM32_H_ */
334