mem.h revision 78af0a8c
1/*
2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15/*
16  Copyright (c) 2001, 2002, 2003 Eliot Dresselhaus
17
18  Permission is hereby granted, free of charge, to any person obtaining
19  a copy of this software and associated documentation files (the
20  "Software"), to deal in the Software without restriction, including
21  without limitation the rights to use, copy, modify, merge, publish,
22  distribute, sublicense, and/or sell copies of the Software, and to
23  permit persons to whom the Software is furnished to do so, subject to
24  the following conditions:
25
26  The above copyright notice and this permission notice shall be
27  included in all copies or substantial portions of the Software.
28
29  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36*/
37
38#ifndef _included_clib_mem_h
39#define _included_clib_mem_h
40
41#include <stdarg.h>
42#include <unistd.h>
43#include <sys/mman.h>
44
45#include <vppinfra/clib.h>	/* uword, etc */
46#include <vppinfra/clib_error.h>
47
48#if USE_DLMALLOC == 0
49#include <vppinfra/mheap_bootstrap.h>
50#else
51#include <vppinfra/dlmalloc.h>
52#endif
53
54#include <vppinfra/os.h>
55#include <vppinfra/string.h>	/* memcpy, clib_memset */
56#include <vppinfra/valgrind.h>
57
58#define CLIB_MAX_MHEAPS 256
59
60/* Per CPU heaps. */
61extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
62
63always_inline void
64clib_mem_set_thread_index (void)
65{
66  /*
67   * Find an unused slot in the per-cpu-mheaps array,
68   * and grab it for this thread. We need to be able to
69   * push/pop the thread heap without affecting other thread(s).
70   */
71  int i;
72  if (__os_thread_index != 0)
73    return;
74  for (i = 0; i < ARRAY_LEN (clib_per_cpu_mheaps); i++)
75    if (clib_atomic_bool_cmp_and_swap (&clib_per_cpu_mheaps[i],
76				       0, clib_per_cpu_mheaps[0]))
77      {
78	os_set_thread_index (i);
79	break;
80      }
81  ASSERT (__os_thread_index > 0);
82}
83
84always_inline void *
85clib_mem_get_per_cpu_heap (void)
86{
87  int cpu = os_get_thread_index ();
88  return clib_per_cpu_mheaps[cpu];
89}
90
91always_inline void *
92clib_mem_set_per_cpu_heap (u8 * new_heap)
93{
94  int cpu = os_get_thread_index ();
95  void *old = clib_per_cpu_mheaps[cpu];
96  clib_per_cpu_mheaps[cpu] = new_heap;
97  return old;
98}
99
100/* Memory allocator which may call os_out_of_memory() if it fails */
101always_inline void *
102clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
103				  int os_out_of_memory_on_failure)
104{
105  void *heap, *p;
106  uword cpu;
107
108  if (align_offset > align)
109    {
110      if (align > 0)
111	align_offset %= align;
112      else
113	align_offset = align;
114    }
115
116  cpu = os_get_thread_index ();
117  heap = clib_per_cpu_mheaps[cpu];
118
119#if USE_DLMALLOC == 0
120  uword offset;
121  heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
122  clib_per_cpu_mheaps[cpu] = heap;
123
124  if (offset != ~0)
125    {
126      p = heap + offset;
127#if CLIB_DEBUG > 0
128      VALGRIND_MALLOCLIKE_BLOCK (p, mheap_data_bytes (heap, offset), 0, 0);
129#endif
130      return p;
131    }
132  else
133    {
134      if (os_out_of_memory_on_failure)
135	os_out_of_memory ();
136      return 0;
137    }
138#else
139  p = mspace_get_aligned (heap, size, align, align_offset);
140  if (PREDICT_FALSE (p == 0))
141    {
142      if (os_out_of_memory_on_failure)
143	os_out_of_memory ();
144      return 0;
145    }
146
147  return p;
148#endif /* USE_DLMALLOC */
149}
150
151/* Memory allocator which calls os_out_of_memory() when it fails */
152always_inline void *
153clib_mem_alloc (uword size)
154{
155  return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
156					   /* align_offset */ 0,
157					   /* os_out_of_memory */ 1);
158}
159
160always_inline void *
161clib_mem_alloc_aligned (uword size, uword align)
162{
163  return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
164					   /* os_out_of_memory */ 1);
165}
166
167/* Memory allocator which calls os_out_of_memory() when it fails */
168always_inline void *
169clib_mem_alloc_or_null (uword size)
170{
171  return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
172					   /* align_offset */ 0,
173					   /* os_out_of_memory */ 0);
174}
175
176always_inline void *
177clib_mem_alloc_aligned_or_null (uword size, uword align)
178{
179  return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0,
180					   /* os_out_of_memory */ 0);
181}
182
183
184
185/* Memory allocator which panics when it fails.
186   Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
187#define clib_mem_alloc_aligned_no_fail(size,align)				\
188({										\
189  uword _clib_mem_alloc_size = (size);						\
190  void * _clib_mem_alloc_p;							\
191  _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align));	\
192  if (! _clib_mem_alloc_p)							\
193    clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size);		\
194  _clib_mem_alloc_p;								\
195})
196
197#define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
198
199/* Alias to stack allocator for naming consistency. */
200#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
201
202always_inline uword
203clib_mem_is_heap_object (void *p)
204{
205#if USE_DLMALLOC == 0
206  void *heap = clib_mem_get_per_cpu_heap ();
207  uword offset = (uword) p - (uword) heap;
208  mheap_elt_t *e, *n;
209
210  if (offset >= vec_len (heap))
211    return 0;
212
213  e = mheap_elt_at_uoffset (heap, offset);
214  n = mheap_next_elt (e);
215
216  /* Check that heap forward and reverse pointers agree. */
217  return e->n_user_data == n->prev_n_user_data;
218#else
219  void *heap = clib_mem_get_per_cpu_heap ();
220
221  return mspace_is_heap_object (heap, p);
222#endif /* USE_DLMALLOC */
223}
224
225always_inline void
226clib_mem_free (void *p)
227{
228  u8 *heap = clib_mem_get_per_cpu_heap ();
229
230  /* Make sure object is in the correct heap. */
231  ASSERT (clib_mem_is_heap_object (p));
232
233#if USE_DLMALLOC == 0
234  mheap_put (heap, (u8 *) p - heap);
235#else
236  mspace_put (heap, p);
237#endif
238
239#if CLIB_DEBUG > 0
240  VALGRIND_FREELIKE_BLOCK (p, 0);
241#endif
242}
243
244always_inline void *
245clib_mem_realloc (void *p, uword new_size, uword old_size)
246{
247  /* By default use alloc, copy and free to emulate realloc. */
248  void *q = clib_mem_alloc (new_size);
249  if (q)
250    {
251      uword copy_size;
252      if (old_size < new_size)
253	copy_size = old_size;
254      else
255	copy_size = new_size;
256      clib_memcpy_fast (q, p, copy_size);
257      clib_mem_free (p);
258    }
259  return q;
260}
261
262always_inline uword
263clib_mem_size (void *p)
264{
265#if USE_DLMALLOC == 0
266  mheap_elt_t *e = mheap_user_pointer_to_elt (p);
267  ASSERT (clib_mem_is_heap_object (p));
268  return mheap_elt_data_bytes (e);
269#else
270  ASSERT (clib_mem_is_heap_object (p));
271  return mspace_usable_size_with_delta (p);
272#endif
273}
274
275always_inline void
276clib_mem_free_s (void *p)
277{
278  uword size = clib_mem_size (p);
279  memset_s_inline (p, size, 0, size);
280  clib_mem_free (p);
281}
282
283always_inline void *
284clib_mem_get_heap (void)
285{
286  return clib_mem_get_per_cpu_heap ();
287}
288
289always_inline void *
290clib_mem_set_heap (void *heap)
291{
292  return clib_mem_set_per_cpu_heap (heap);
293}
294
295void *clib_mem_init (void *heap, uword size);
296void *clib_mem_init_thread_safe (void *memory, uword memory_size);
297
298void clib_mem_exit (void);
299
300uword clib_mem_get_page_size (void);
301
302void clib_mem_validate (void);
303
304void clib_mem_trace (int enable);
305
306int clib_mem_is_traced (void);
307
308typedef struct
309{
310  /* Total number of objects allocated. */
311  uword object_count;
312
313  /* Total allocated bytes.  Bytes used and free.
314     used + free = total */
315  uword bytes_total, bytes_used, bytes_free;
316
317  /* Number of bytes used by mheap data structure overhead
318     (e.g. free lists, mheap header). */
319  uword bytes_overhead;
320
321  /* Amount of free space returned to operating system. */
322  uword bytes_free_reclaimed;
323
324  /* For malloc which puts small objects in sbrk region and
325     large objects in mmap'ed regions. */
326  uword bytes_used_sbrk;
327  uword bytes_used_mmap;
328
329  /* Max. number of bytes in this heap. */
330  uword bytes_max;
331} clib_mem_usage_t;
332
333void clib_mem_usage (clib_mem_usage_t * usage);
334
335u8 *format_clib_mem_usage (u8 * s, va_list * args);
336
337/* Allocate virtual address space. */
338always_inline void *
339clib_mem_vm_alloc (uword size)
340{
341  void *mmap_addr;
342  uword flags = MAP_PRIVATE;
343
344#ifdef MAP_ANONYMOUS
345  flags |= MAP_ANONYMOUS;
346#endif
347
348  mmap_addr = mmap (0, size, PROT_READ | PROT_WRITE, flags, -1, 0);
349  if (mmap_addr == (void *) -1)
350    mmap_addr = 0;
351
352  return mmap_addr;
353}
354
355always_inline void
356clib_mem_vm_free (void *addr, uword size)
357{
358  munmap (addr, size);
359}
360
361always_inline void *
362clib_mem_vm_unmap (void *addr, uword size)
363{
364  void *mmap_addr;
365  uword flags = MAP_PRIVATE | MAP_FIXED;
366
367  /* To unmap we "map" with no protection.  If we actually called
368     munmap then other callers could steal the address space.  By
369     changing to PROT_NONE the kernel can free up the pages which is
370     really what we want "unmap" to mean. */
371  mmap_addr = mmap (addr, size, PROT_NONE, flags, -1, 0);
372  if (mmap_addr == (void *) -1)
373    mmap_addr = 0;
374
375  return mmap_addr;
376}
377
378always_inline void *
379clib_mem_vm_map (void *addr, uword size)
380{
381  void *mmap_addr;
382  uword flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
383
384  mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
385  if (mmap_addr == (void *) -1)
386    mmap_addr = 0;
387
388  return mmap_addr;
389}
390
391typedef struct
392{
393#define CLIB_MEM_VM_F_SHARED (1 << 0)
394#define CLIB_MEM_VM_F_HUGETLB (1 << 1)
395#define CLIB_MEM_VM_F_NUMA_PREFER (1 << 2)
396#define CLIB_MEM_VM_F_NUMA_FORCE (1 << 3)
397#define CLIB_MEM_VM_F_HUGETLB_PREALLOC (1 << 4)
398#define CLIB_MEM_VM_F_LOCKED (1 << 5)
399  u32 flags; /**< vm allocation flags:
400                <br> CLIB_MEM_VM_F_SHARED: request shared memory, file
401		descriptor will be provided on successful allocation.
402                <br> CLIB_MEM_VM_F_HUGETLB: request hugepages.
403		<br> CLIB_MEM_VM_F_NUMA_PREFER: numa_node field contains valid
404		numa node preference.
405		<br> CLIB_MEM_VM_F_NUMA_FORCE: fail if setting numa policy fails.
406		<br> CLIB_MEM_VM_F_HUGETLB_PREALLOC: pre-allocate hugepages if
407		number of available pages is not sufficient.
408		<br> CLIB_MEM_VM_F_LOCKED: request locked memory.
409             */
410  char *name; /**< Name for memory allocation, set by caller. */
411  uword size; /**< Allocation size, set by caller. */
412  int numa_node; /**< numa node preference. Valid if CLIB_MEM_VM_F_NUMA_PREFER set. */
413  void *addr; /**< Pointer to allocated memory, set on successful allocation. */
414  int fd; /**< File descriptor, set on successful allocation if CLIB_MEM_VM_F_SHARED is set. */
415  int log2_page_size;		/* Page size in log2 format, set on successful allocation. */
416  int n_pages;			/* Number of pages. */
417  uword requested_va;		/**< Request fixed position mapping */
418} clib_mem_vm_alloc_t;
419
420clib_error_t *clib_mem_create_fd (char *name, int *fdp);
421clib_error_t *clib_mem_create_hugetlb_fd (char *name, int *fdp);
422clib_error_t *clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a);
423void clib_mem_vm_ext_free (clib_mem_vm_alloc_t * a);
424u64 clib_mem_get_fd_page_size (int fd);
425uword clib_mem_get_default_hugepage_size (void);
426int clib_mem_get_fd_log2_page_size (int fd);
427u64 *clib_mem_vm_get_paddr (void *mem, int log2_page_size, int n_pages);
428
429typedef struct
430{
431  uword size;		/**< Map size */
432  int fd;		/**< File descriptor to be mapped */
433  uword requested_va;	/**< Request fixed position mapping */
434  void *addr;		/**< Pointer to mapped memory, if successful */
435} clib_mem_vm_map_t;
436
437clib_error_t *clib_mem_vm_ext_map (clib_mem_vm_map_t * a);
438void clib_mem_vm_randomize_va (uword * requested_va, u32 log2_page_size);
439void mheap_trace (void *v, int enable);
440uword clib_mem_trace_enable_disable (uword enable);
441void clib_mem_trace (int enable);
442
443#include <vppinfra/error.h>	/* clib_panic */
444
445#endif /* _included_clib_mem_h */
446
447/*
448 * fd.io coding-style-patch-verification: ON
449 *
450 * Local Variables:
451 * eval: (c-set-style "gnu")
452 * End:
453 */
454