1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2015 Cavium Networks. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
8 *   All rights reserved.
9 *
10 *   Derived rte_lpm_lookupx4 implementation from lib/librte_lpm/rte_lpm_sse.h
11 *
12 *   Redistribution and use in source and binary forms, with or without
13 *   modification, are permitted provided that the following conditions
14 *   are met:
15 *
16 *     * Redistributions of source code must retain the above copyright
17 *       notice, this list of conditions and the following disclaimer.
18 *     * Redistributions in binary form must reproduce the above copyright
19 *       notice, this list of conditions and the following disclaimer in
20 *       the documentation and/or other materials provided with the
21 *       distribution.
22 *     * Neither the name of Cavium Networks nor the names of its
23 *       contributors may be used to endorse or promote products derived
24 *       from this software without specific prior written permission.
25 *
26 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#ifndef _RTE_LPM_NEON_H_
40#define _RTE_LPM_NEON_H_
41
42#include <rte_branch_prediction.h>
43#include <rte_byteorder.h>
44#include <rte_common.h>
45#include <rte_vect.h>
46#include <rte_lpm.h>
47
48#ifdef __cplusplus
49extern "C" {
50#endif
51
52static inline void
53rte_lpm_lookupx4(const struct rte_lpm *lpm, xmm_t ip, uint32_t hop[4],
54	uint32_t defv)
55{
56	uint32x4_t i24;
57	rte_xmm_t i8;
58	uint32_t tbl[4];
59	uint64_t idx, pt, pt2;
60	const uint32_t *ptbl;
61
62	const uint32_t mask = UINT8_MAX;
63	const int32x4_t mask8 = vdupq_n_s32(mask);
64
65	/*
66	 * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 2 LPM entries
67	 * as one 64-bit value (0x0300000003000000).
68	 */
69	const uint64_t mask_xv =
70		((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
71		(uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32);
72
73	/*
74	 * RTE_LPM_LOOKUP_SUCCESS for 2 LPM entries
75	 * as one 64-bit value (0x0100000001000000).
76	 */
77	const uint64_t mask_v =
78		((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
79		(uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32);
80
81	/* get 4 indexes for tbl24[]. */
82	i24 = vshrq_n_u32((uint32x4_t)ip, CHAR_BIT);
83
84	/* extract values from tbl24[] */
85	idx = vgetq_lane_u64((uint64x2_t)i24, 0);
86
87	ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
88	tbl[0] = *ptbl;
89	ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32];
90	tbl[1] = *ptbl;
91
92	idx = vgetq_lane_u64((uint64x2_t)i24, 1);
93
94	ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
95	tbl[2] = *ptbl;
96	ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32];
97	tbl[3] = *ptbl;
98
99	/* get 4 indexes for tbl8[]. */
100	i8.x = vandq_s32(ip, mask8);
101
102	pt = (uint64_t)tbl[0] |
103		(uint64_t)tbl[1] << 32;
104	pt2 = (uint64_t)tbl[2] |
105		(uint64_t)tbl[3] << 32;
106
107	/* search successfully finished for all 4 IP addresses. */
108	if (likely((pt & mask_xv) == mask_v) &&
109			likely((pt2 & mask_xv) == mask_v)) {
110		*(uint64_t *)hop = pt & RTE_LPM_MASKX4_RES;
111		*(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX4_RES;
112		return;
113	}
114
115	if (unlikely((pt & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
116			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
117		i8.u32[0] = i8.u32[0] +
118			(uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
119		ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]];
120		tbl[0] = *ptbl;
121	}
122	if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
123			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
124		i8.u32[1] = i8.u32[1] +
125			(uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
126		ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]];
127		tbl[1] = *ptbl;
128	}
129	if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
130			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
131		i8.u32[2] = i8.u32[2] +
132			(uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
133		ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]];
134		tbl[2] = *ptbl;
135	}
136	if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
137			RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
138		i8.u32[3] = i8.u32[3] +
139			(uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
140		ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]];
141		tbl[3] = *ptbl;
142	}
143
144	hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[0] & 0x00FFFFFF : defv;
145	hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[1] & 0x00FFFFFF : defv;
146	hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[2] & 0x00FFFFFF : defv;
147	hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[3] & 0x00FFFFFF : defv;
148}
149
150#ifdef __cplusplus
151}
152#endif
153
154#endif /* _RTE_LPM_NEON_H_ */
155