malloc_heap.c revision 6b3e017e
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33#include <stdint.h>
34#include <stddef.h>
35#include <stdlib.h>
36#include <stdio.h>
37#include <stdarg.h>
38#include <errno.h>
39#include <sys/queue.h>
40
41#include <rte_memory.h>
42#include <rte_eal.h>
43#include <rte_eal_memconfig.h>
44#include <rte_launch.h>
45#include <rte_per_lcore.h>
46#include <rte_lcore.h>
47#include <rte_common.h>
48#include <rte_string_fns.h>
49#include <rte_spinlock.h>
50#include <rte_memcpy.h>
51#include <rte_atomic.h>
52
53#include "malloc_elem.h"
54#include "malloc_heap.h"
55
56static unsigned
57check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
58{
59	unsigned check_flag = 0;
60
61	if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
62		return 1;
63
64	switch (hugepage_sz) {
65	case RTE_PGSIZE_256K:
66		check_flag = RTE_MEMZONE_256KB;
67		break;
68	case RTE_PGSIZE_2M:
69		check_flag = RTE_MEMZONE_2MB;
70		break;
71	case RTE_PGSIZE_16M:
72		check_flag = RTE_MEMZONE_16MB;
73		break;
74	case RTE_PGSIZE_256M:
75		check_flag = RTE_MEMZONE_256MB;
76		break;
77	case RTE_PGSIZE_512M:
78		check_flag = RTE_MEMZONE_512MB;
79		break;
80	case RTE_PGSIZE_1G:
81		check_flag = RTE_MEMZONE_1GB;
82		break;
83	case RTE_PGSIZE_4G:
84		check_flag = RTE_MEMZONE_4GB;
85		break;
86	case RTE_PGSIZE_16G:
87		check_flag = RTE_MEMZONE_16GB;
88	}
89
90	return check_flag & flags;
91}
92
93/*
94 * Expand the heap with a memseg.
95 * This reserves the zone and sets a dummy malloc_elem header at the end
96 * to prevent overflow. The rest of the zone is added to free list as a single
97 * large free block
98 */
99static void
100malloc_heap_add_memseg(struct malloc_heap *heap, struct rte_memseg *ms)
101{
102	/* allocate the memory block headers, one at end, one at start */
103	struct malloc_elem *start_elem = (struct malloc_elem *)ms->addr;
104	struct malloc_elem *end_elem = RTE_PTR_ADD(ms->addr,
105			ms->len - MALLOC_ELEM_OVERHEAD);
106	end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, RTE_CACHE_LINE_SIZE);
107	const size_t elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
108
109	malloc_elem_init(start_elem, heap, ms, elem_size);
110	malloc_elem_mkend(end_elem, start_elem);
111	malloc_elem_free_list_insert(start_elem);
112
113	heap->total_size += elem_size;
114}
115
116/*
117 * Iterates through the freelist for a heap to find a free element
118 * which can store data of the required size and with the requested alignment.
119 * If size is 0, find the biggest available elem.
120 * Returns null on failure, or pointer to element on success.
121 */
122static struct malloc_elem *
123find_suitable_element(struct malloc_heap *heap, size_t size,
124		unsigned flags, size_t align, size_t bound)
125{
126	size_t idx;
127	struct malloc_elem *elem, *alt_elem = NULL;
128
129	for (idx = malloc_elem_free_list_index(size);
130			idx < RTE_HEAP_NUM_FREELISTS; idx++) {
131		for (elem = LIST_FIRST(&heap->free_head[idx]);
132				!!elem; elem = LIST_NEXT(elem, free_list)) {
133			if (malloc_elem_can_hold(elem, size, align, bound)) {
134				if (check_hugepage_sz(flags, elem->ms->hugepage_sz))
135					return elem;
136				if (alt_elem == NULL)
137					alt_elem = elem;
138			}
139		}
140	}
141
142	if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
143		return alt_elem;
144
145	return NULL;
146}
147
148/*
149 * Main function to allocate a block of memory from the heap.
150 * It locks the free list, scans it, and adds a new memseg if the
151 * scan fails. Once the new memseg is added, it re-scans and should return
152 * the new element after releasing the lock.
153 */
154void *
155malloc_heap_alloc(struct malloc_heap *heap,
156		const char *type __attribute__((unused)), size_t size, unsigned flags,
157		size_t align, size_t bound)
158{
159	struct malloc_elem *elem;
160
161	size = RTE_CACHE_LINE_ROUNDUP(size);
162	align = RTE_CACHE_LINE_ROUNDUP(align);
163
164	rte_spinlock_lock(&heap->lock);
165
166	elem = find_suitable_element(heap, size, flags, align, bound);
167	if (elem != NULL) {
168		elem = malloc_elem_alloc(elem, size, align, bound);
169		/* increase heap's count of allocated elements */
170		heap->alloc_count++;
171	}
172	rte_spinlock_unlock(&heap->lock);
173
174	return elem == NULL ? NULL : (void *)(&elem[1]);
175}
176
177/*
178 * Function to retrieve data for heap on given socket
179 */
180int
181malloc_heap_get_stats(const struct malloc_heap *heap,
182		struct rte_malloc_socket_stats *socket_stats)
183{
184	size_t idx;
185	struct malloc_elem *elem;
186
187	/* Initialise variables for heap */
188	socket_stats->free_count = 0;
189	socket_stats->heap_freesz_bytes = 0;
190	socket_stats->greatest_free_size = 0;
191
192	/* Iterate through free list */
193	for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
194		for (elem = LIST_FIRST(&heap->free_head[idx]);
195			!!elem; elem = LIST_NEXT(elem, free_list))
196		{
197			socket_stats->free_count++;
198			socket_stats->heap_freesz_bytes += elem->size;
199			if (elem->size > socket_stats->greatest_free_size)
200				socket_stats->greatest_free_size = elem->size;
201		}
202	}
203	/* Get stats on overall heap and allocated memory on this heap */
204	socket_stats->heap_totalsz_bytes = heap->total_size;
205	socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
206			socket_stats->heap_freesz_bytes);
207	socket_stats->alloc_count = heap->alloc_count;
208	return 0;
209}
210
211int
212rte_eal_malloc_heap_init(void)
213{
214	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
215	unsigned ms_cnt;
216	struct rte_memseg *ms;
217
218	if (mcfg == NULL)
219		return -1;
220
221	for (ms = &mcfg->memseg[0], ms_cnt = 0;
222			(ms_cnt < RTE_MAX_MEMSEG) && (ms->len > 0);
223			ms_cnt++, ms++) {
224		malloc_heap_add_memseg(&mcfg->malloc_heaps[ms->socket_id], ms);
225	}
226
227	return 0;
228}
229