1/*
2 * Electric Fence - Red-Zone memory allocator.
3 * Bruce Perens, 1988, 1993
4 *
5 * This is a special version of malloc() and company for debugging software
6 * that is suspected of overrunning or underrunning the boundaries of a
7 * malloc buffer, or touching free memory.
8 *
9 * It arranges for each malloc buffer to be followed (or preceded)
10 * in the address space by an inaccessable virtual memory page,
11 * and for free memory to be inaccessable. If software touches the
12 * inaccessable page, it will get an immediate segmentation
13 * fault. It is then trivial to uncover the offending code using a debugger.
14 *
15 * An advantage of this product over most malloc debuggers is that this one
16 * detects reading out of bounds as well as writing, and this one stops on
17 * the exact instruction that causes the error, rather than waiting until the
18 * next boundary check.
19 *
20 * There is one product that debugs malloc buffer overruns
21 * better than Electric Fence: "Purify" from Purify Systems, and that's only
22 * a small part of what Purify does. I'm not affiliated with Purify, I just
23 * respect a job well done.
24 *
25 * This version of malloc() should not be linked into production software,
26 * since it tremendously increases the time and memory overhead of malloc().
27 * Each malloc buffer will consume a minimum of two virtual memory pages,
28 * this is 16 kilobytes on many systems. On some systems it will be necessary
29 * to increase the amount of swap space in order to debug large programs that
30 * perform lots of allocation, because of the per-buffer overhead.
31 */
32#include "efence.h"
33#include <stdlib.h>
34#include <unistd.h>
35#include <memory.h>
36#include <string.h>
37#include <pthread.h>
38#include <stdio.h>
39#include <stdint.h>
40
41
42extern C_LINKAGE void * ef_malloc(size_t size);
43extern C_LINKAGE void ef_free(void * address);
44extern C_LINKAGE void * ef_memalign(size_t alignment, size_t userSize);
45extern C_LINKAGE void * ef_calloc(size_t nelem, size_t elsize);
46extern C_LINKAGE void * ef_valloc (size_t size);
47extern C_LINKAGE void * ef_realloc(void * oldBuffer, size_t newSize);
48extern C_LINKAGE void ef_init(void);
49
50
51
52
53
54#ifdef	malloc
55#undef	malloc
56#endif
57
58#ifdef	calloc
59#undef	calloc
60#endif
61
62static const char	version[] = "\n  Electric Fence 2.1"
63 " Copyright (C) 1987-1998 Bruce Perens.\n";
64
65/*
66 * MEMORY_CREATION_SIZE is the amount of memory to get from the operating
67 * system at one time. We'll break that memory down into smaller pieces for
68 * malloc buffers. One megabyte is probably a good value.
69 */
70#define			MEMORY_CREATION_SIZE	10* 1024 * 1024
71
72/*
73 * Enum Mode indicates the status of a malloc buffer.
74 */
75enum _Mode {
76	NOT_IN_USE = 0,	/* Available to represent a malloc buffer. */
77	FREE,		/* A free buffer. */
78	ALLOCATED,	/* A buffer that is in use. */
79	PROTECTED,	/* A freed buffer that can not be allocated again. */
80	INTERNAL_USE	/* A buffer used internally by malloc(). */
81};
82typedef enum _Mode	Mode;
83
84/*
85 * Struct Slot contains all of the information about a malloc buffer except
86 * for the contents of its memory.
87 */
88struct _Slot {
89	void *		userAddress;
90	void *		internalAddress;
91	size_t		userSize;
92	size_t		internalSize;
93	Mode		mode;
94};
95typedef struct _Slot	Slot;
96
97 /*
98 * EF_DISABLE_BANNER is a global variable used to control whether
99 * Electric Fence prints its usual startup message.  If the value is
100 * -1, it will be set from the environment default to 0 at run time.
101 */
102int            EF_DISABLE_BANNER = 1;
103
104
105/*
106 * EF_ALIGNMENT is a global variable used to control the default alignment
107 * of buffers returned by malloc(), calloc(), and realloc(). It is all-caps
108 * so that its name matches the name of the environment variable that is used
109 * to set it. This gives the programmer one less name to remember.
110 * If the value is -1, it will be set from the environment or sizeof(int)
111 * at run time.
112 */
113int		EF_ALIGNMENT = 8;
114
115/*
116 * EF_PROTECT_FREE is a global variable used to control the disposition of
117 * memory that is released using free(). It is all-caps so that its name
118 * matches the name of the environment variable that is used to set it.
119 * If its value is greater non-zero, memory released by free is made
120 * inaccessable and never allocated again. Any software that touches free
121 * memory will then get a segmentation fault. If its value is zero, freed
122 * memory will be available for reallocation, but will still be inaccessable
123 * until it is reallocated.
124 * If the value is -1, it will be set from the environment or to 0 at run-time.
125 */
126int		EF_PROTECT_FREE = -1;
127
128/*
129 * EF_PROTECT_BELOW is used to modify the behavior of the allocator. When
130 * its value is non-zero, the allocator will place an inaccessable page
131 * immediately _before_ the malloc buffer in the address space, instead
132 * of _after_ it. Use this to detect malloc buffer under-runs, rather than
133 * over-runs. It won't detect both at the same time, so you should test your
134 * software twice, once with this value clear, and once with it set.
135 * If the value is -1, it will be set from the environment or to zero at
136 * run-time
137 */
138int		EF_PROTECT_BELOW = -1;
139
140/*
141 * EF_ALLOW_MALLOC_0 is set if Electric Fence is to allow malloc(0). I
142 * trap malloc(0) by default because it is a common source of bugs.
143 */
144int		EF_ALLOW_MALLOC_0 = 0;
145
146/*
147 * EF_FREE_WIPES is set if Electric Fence is to wipe the memory content
148 * of freed blocks.  This makes it easier to check if memory is freed or
149 * not
150 */
151int            EF_FREE_WIPES = 1;
152
153
154static int malloc_init =0;
155/*
156
157 * allocationList points to the array of slot structures used to manage the
158 * malloc arena.
159 */
160static Slot *		allocationList = 0;
161
162/*
163 * allocationListSize is the size of the allocation list. This will always
164 * be a multiple of the page size.
165 */
166static size_t		allocationListSize = 0;
167
168/*
169 * slotCount is the number of Slot structures in allocationList.
170 */
171static size_t		slotCount = 0;
172
173/*
174 * unUsedSlots is the number of Slot structures that are currently available
175 * to represent new malloc buffers. When this number gets too low, we will
176 * create new slots.
177 */
178static size_t		unUsedSlots = 0;
179
180/*
181 * slotsPerPage is the number of slot structures that fit in a virtual
182 * memory page.
183 */
184static size_t		slotsPerPage = 0;
185
186/*
187 * internalUse is set when allocating and freeing the allocatior-internal
188 * data structures.
189 */
190static int		internalUse = 0;
191
192/*
193 * noAllocationListProtection is set to tell malloc() and free() not to
194 * manipulate the protection of the allocation list. This is only set in
195 * realloc(), which does it to save on slow system calls, and in
196 * allocateMoreSlots(), which does it because it changes the allocation list.
197 */
198static int		noAllocationListProtection = 0;
199
200/*
201 * bytesPerPage is set at run-time to the number of bytes per virtual-memory
202 * page, as returned by Page_Size().
203 */
204static size_t		bytesPerPage = 0;
205
206 /*
207 * mutex to enable multithreaded operation
208 */
209static pthread_mutex_t mutex ;
210
211
212static void lock() {
213    /* reentrant mutex -see init  */
214    pthread_mutex_lock(&mutex);
215}
216
217static void unlock() {
218    pthread_mutex_unlock(&mutex);
219}
220
221
222
223/*
224 * internalError is called for those "shouldn't happen" errors in the
225 * allocator.
226 */
227static void
228internalError(void)
229{
230	EF_Abort("Internal error in allocator.");
231}
232
233/*
234 * initialize sets up the memory allocation arena and the run-time
235 * configuration information.
236 */
237static void
238initialize(void)
239{
240	size_t	size = MEMORY_CREATION_SIZE;
241	size_t	slack;
242	char *	string;
243	Slot *	slot;
244
245       if ( EF_DISABLE_BANNER == -1 ) {
246               if ( (string = getenv("EF_DISABLE_BANNER")) != 0 )
247                       EF_DISABLE_BANNER = atoi(string);
248               else
249                       EF_DISABLE_BANNER = 0;
250       }
251
252       if ( EF_DISABLE_BANNER == 0 )
253               EF_Print(version);
254
255	/*
256	 * Import the user's environment specification of the default
257	 * alignment for malloc(). We want that alignment to be under
258	 * user control, since smaller alignment lets us catch more bugs,
259	 * however some software will break if malloc() returns a buffer
260	 * that is not word-aligned.
261	 *
262	 * I would like
263	 * alignment to be zero so that we could catch all one-byte
264	 * overruns, however if malloc() is asked to allocate an odd-size
265	 * buffer and returns an address that is not word-aligned, or whose
266	 * size is not a multiple of the word size, software breaks.
267	 * This was the case with the Sun string-handling routines,
268	 * which can do word fetches up to three bytes beyond the end of a
269	 * string. I handle this problem in part by providing
270	 * byte-reference-only versions of the string library functions, but
271	 * there are other functions that break, too. Some in X Windows, one
272	 * in Sam Leffler's TIFF library, and doubtless many others.
273	 */
274	if ( EF_ALIGNMENT == -1 ) {
275		if ( (string = getenv("EF_ALIGNMENT")) != 0 )
276			EF_ALIGNMENT = (size_t)atoi(string);
277		else
278			EF_ALIGNMENT = sizeof(int);
279	}
280
281	/*
282	 * See if the user wants to protect the address space below a buffer,
283	 * rather than that above a buffer.
284	 */
285	if ( EF_PROTECT_BELOW == -1 ) {
286		if ( (string = getenv("EF_PROTECT_BELOW")) != 0 )
287			EF_PROTECT_BELOW = (atoi(string) != 0);
288		else
289			EF_PROTECT_BELOW = 0;
290	}
291
292	/*
293	 * See if the user wants to protect memory that has been freed until
294	 * the program exits, rather than until it is re-allocated.
295	 */
296	if ( EF_PROTECT_FREE == -1 ) {
297		if ( (string = getenv("EF_PROTECT_FREE")) != 0 )
298			EF_PROTECT_FREE = (atoi(string) != 0);
299		else
300			EF_PROTECT_FREE = 0;
301	}
302
303	/*
304	 * See if the user wants to allow malloc(0).
305	 */
306	if ( EF_ALLOW_MALLOC_0 == -1 ) {
307		if ( (string = getenv("EF_ALLOW_MALLOC_0")) != 0 )
308			EF_ALLOW_MALLOC_0 = (atoi(string) != 0);
309		else
310			EF_ALLOW_MALLOC_0 = 0;
311	}
312
313	/*
314	 * See if the user wants us to wipe out freed memory.
315	 */
316	if ( EF_FREE_WIPES == -1 ) {
317	        if ( (string = getenv("EF_FREE_WIPES")) != 0 )
318	                EF_FREE_WIPES = (atoi(string) != 0);
319	        else
320	                EF_FREE_WIPES = 0;
321	}
322
323	/*
324	 * Get the run-time configuration of the virtual memory page size.
325 	 */
326	bytesPerPage = Page_Size();
327
328	/*
329	 * Figure out how many Slot structures to allocate at one time.
330	 */
331	slotCount = slotsPerPage = bytesPerPage / sizeof(Slot);
332	allocationListSize = bytesPerPage;
333
334	if ( allocationListSize > size )
335		size = allocationListSize;
336
337	if ( (slack = size % bytesPerPage) != 0 )
338		size += bytesPerPage - slack;
339
340	/*
341	 * Allocate memory, and break it up into two malloc buffers. The
342	 * first buffer will be used for Slot structures, the second will
343	 * be marked free.
344	 */
345	slot = allocationList = (Slot *)Page_Create(size);
346	memset((char *)allocationList, 0, allocationListSize);
347
348	slot[0].internalSize = slot[0].userSize = allocationListSize;
349	slot[0].internalAddress = slot[0].userAddress = allocationList;
350	slot[0].mode = INTERNAL_USE;
351	if ( size > allocationListSize ) {
352		slot[1].internalAddress = slot[1].userAddress
353		 = ((char *)slot[0].internalAddress) + slot[0].internalSize;
354		slot[1].internalSize
355		 = slot[1].userSize = size - slot[0].internalSize;
356		slot[1].mode = FREE;
357	}
358
359	/*
360	 * Deny access to the free page, so that we will detect any software
361	 * that treads upon free memory.
362	 */
363	Page_DenyAccess(slot[1].internalAddress, slot[1].internalSize);
364
365	/*
366	 * Account for the two slot structures that we've used.
367	 */
368	unUsedSlots = slotCount - 2;
369}
370
371/*
372 * allocateMoreSlots is called when there are only enough slot structures
373 * left to support the allocation of a single malloc buffer.
374 */
375static void
376allocateMoreSlots(void)
377{
378	size_t	newSize = allocationListSize + bytesPerPage;
379	void *	newAllocation;
380	void *	oldAllocation = allocationList;
381
382	Page_AllowAccess(allocationList, allocationListSize);
383	noAllocationListProtection = 1;
384	internalUse = 1;
385
386	newAllocation = ef_malloc(newSize);
387	memcpy(newAllocation, allocationList, allocationListSize);
388	memset(&(((char *)newAllocation)[allocationListSize]), 0, bytesPerPage);
389
390	allocationList = (Slot *)newAllocation;
391	allocationListSize = newSize;
392	slotCount += slotsPerPage;
393	unUsedSlots += slotsPerPage;
394
395	ef_free(oldAllocation);
396
397	/*
398	 * Keep access to the allocation list open at this point, because
399	 * I am returning to memalign(), which needs that access.
400 	 */
401	noAllocationListProtection = 0;
402	internalUse = 0;
403}
404
405/*
406 * This is the memory allocator. When asked to allocate a buffer, allocate
407 * it in such a way that the end of the buffer is followed by an inaccessable
408 * memory page. If software overruns that buffer, it will touch the bad page
409 * and get an immediate segmentation fault. It's then easy to zero in on the
410 * offending code with a debugger.
411 *
412 * There are a few complications. If the user asks for an odd-sized buffer,
413 * we would have to have that buffer start on an odd address if the byte after
414 * the end of the buffer was to be on the inaccessable page. Unfortunately,
415 * there is lots of software that asks for odd-sized buffers and then
416 * requires that the returned address be word-aligned, or the size of the
417 * buffer be a multiple of the word size. An example are the string-processing
418 * functions on Sun systems, which do word references to the string memory
419 * and may refer to memory up to three bytes beyond the end of the string.
420 * For this reason, I take the alignment requests to memalign() and valloc()
421 * seriously, and
422 *
423 * Electric Fence wastes lots of memory. I do a best-fit allocator here
424 * so that it won't waste even more. It's slow, but thrashing because your
425 * working set is too big for a system's RAM is even slower.
426 */
427extern C_LINKAGE void *
428ef_memalign(size_t alignment, size_t userSize)
429{
430	register Slot *	slot;
431	register size_t	count;
432	Slot *		fullSlot = 0;
433	Slot *		emptySlots[2];
434	size_t		internalSize;
435	size_t		slack;
436	char *		address;
437
438
439	if ( userSize == 0 && !EF_ALLOW_MALLOC_0 )
440		EF_Abort("Allocating 0 bytes, probably a bug.");
441
442	/*
443	 * If EF_PROTECT_BELOW is set, all addresses returned by malloc()
444	 * and company will be page-aligned.
445 	 */
446	if ( !EF_PROTECT_BELOW && alignment > 1 ) {
447		if ( (slack = userSize % alignment) != 0 )
448			userSize += alignment - slack;
449	}
450
451	/*
452	 * The internal size of the buffer is rounded up to the next page-size
453	 * boudary, and then we add another page's worth of memory for the
454	 * dead page.
455	 */
456	internalSize = userSize + bytesPerPage;
457	if ( (slack = internalSize % bytesPerPage) != 0 )
458		internalSize += bytesPerPage - slack;
459
460	/*
461	 * These will hold the addresses of two empty Slot structures, that
462	 * can be used to hold information for any memory I create, and any
463	 * memory that I mark free.
464	 */
465	emptySlots[0] = 0;
466	emptySlots[1] = 0;
467
468	/*
469	 * The internal memory used by the allocator is currently
470	 * inaccessable, so that errant programs won't scrawl on the
471	 * allocator's arena. I'll un-protect it here so that I can make
472	 * a new allocation. I'll re-protect it before I return.
473 	 */
474	if ( !noAllocationListProtection )
475		Page_AllowAccess(allocationList, allocationListSize);
476
477	/*
478	 * If I'm running out of empty slots, create some more before
479	 * I don't have enough slots left to make an allocation.
480	 */
481	if ( !internalUse && unUsedSlots < 7 ) {
482		allocateMoreSlots();
483	}
484
485	/*
486	 * Iterate through all of the slot structures. Attempt to find a slot
487	 * containing free memory of the exact right size. Accept a slot with
488	 * more memory than we want, if the exact right size is not available.
489	 * Find two slot structures that are not in use. We will need one if
490	 * we split a buffer into free and allocated parts, and the second if
491	 * we have to create new memory and mark it as free.
492	 *
493	 */
494
495	for ( slot = allocationList, count = slotCount ; count > 0; count-- ) {
496		if ( slot->mode == FREE
497		 && slot->internalSize >= internalSize ) {
498			if ( !fullSlot
499			 ||slot->internalSize < fullSlot->internalSize){
500				fullSlot = slot;
501				if ( slot->internalSize == internalSize
502				 && emptySlots[0] )
503					break;	/* All done, */
504			}
505		}
506		else if ( slot->mode == NOT_IN_USE ) {
507			if ( !emptySlots[0] )
508				emptySlots[0] = slot;
509			else if ( !emptySlots[1] )
510				emptySlots[1] = slot;
511			else if ( fullSlot
512			 && fullSlot->internalSize == internalSize )
513				break;	/* All done. */
514		}
515		slot++;
516	}
517	if ( !emptySlots[0] )
518		internalError();
519
520	if ( !fullSlot ) {
521		/*
522		 * I get here if I haven't been able to find a free buffer
523		 * with all of the memory I need. I'll have to create more
524		 * memory. I'll mark it all as free, and then split it into
525		 * free and allocated portions later.
526		 */
527		size_t	chunkSize = MEMORY_CREATION_SIZE;
528
529		if ( !emptySlots[1] )
530			internalError();
531
532		if ( chunkSize < internalSize )
533			chunkSize = internalSize;
534
535		if ( (slack = chunkSize % bytesPerPage) != 0 )
536			chunkSize += bytesPerPage - slack;
537
538		/* Use up one of the empty slots to make the full slot. */
539		fullSlot = emptySlots[0];
540		emptySlots[0] = emptySlots[1];
541		fullSlot->internalAddress = Page_Create(chunkSize);
542		fullSlot->internalSize = chunkSize;
543		fullSlot->mode = FREE;
544		unUsedSlots--;
545	}
546
547	/*
548	 * If I'm allocating memory for the allocator's own data structures,
549	 * mark it INTERNAL_USE so that no errant software will be able to
550	 * free it.
551	 */
552	if ( internalUse )
553		fullSlot->mode = INTERNAL_USE;
554	else
555		fullSlot->mode = ALLOCATED;
556
557	/*
558	 * If the buffer I've found is larger than I need, split it into
559	 * an allocated buffer with the exact amount of memory I need, and
560	 * a free buffer containing the surplus memory.
561	 */
562	if ( fullSlot->internalSize > internalSize ) {
563		emptySlots[0]->internalSize
564		 = fullSlot->internalSize - internalSize;
565		emptySlots[0]->internalAddress
566		 = ((char *)fullSlot->internalAddress) + internalSize;
567		emptySlots[0]->mode = FREE;
568		fullSlot->internalSize = internalSize;
569		unUsedSlots--;
570	}
571
572	if ( !EF_PROTECT_BELOW ) {
573		/*
574		 * Arrange the buffer so that it is followed by an inaccessable
575		 * memory page. A buffer overrun that touches that page will
576		 * cause a segmentation fault.
577		 */
578		address = (char *)fullSlot->internalAddress;
579
580		/* Set up the "live" page. */
581		if ( internalSize - bytesPerPage > 0 )
582				Page_AllowAccess(
583				 fullSlot->internalAddress
584				,internalSize - bytesPerPage);
585
586		address += internalSize - bytesPerPage;
587
588		/* Set up the "dead" page. */
589		Page_DenyAccess(address, bytesPerPage);
590
591		/* Figure out what address to give the user. */
592		address -= userSize;
593	}
594	else {	/* EF_PROTECT_BELOW != 0 */
595		/*
596		 * Arrange the buffer so that it is preceded by an inaccessable
597		 * memory page. A buffer underrun that touches that page will
598		 * cause a segmentation fault.
599		 */
600		address = (char *)fullSlot->internalAddress;
601
602		/* Set up the "dead" page. */
603		Page_DenyAccess(address, bytesPerPage);
604
605		address += bytesPerPage;
606
607		/* Set up the "live" page. */
608		if ( internalSize - bytesPerPage > 0 )
609			Page_AllowAccess(address, internalSize - bytesPerPage);
610	}
611
612	fullSlot->userAddress = address;
613	fullSlot->userSize = userSize;
614
615	/*
616	 * Make the pool's internal memory inaccessable, so that the program
617	 * being debugged can't stomp on it.
618	 */
619	if ( !internalUse )
620		Page_DenyAccess(allocationList, allocationListSize);
621
622	return address;
623}
624
625/*
626 * Find the slot structure for a user address.
627 */
628static Slot *
629slotForUserAddress(void * address)
630{
631	register Slot *	slot = allocationList;
632	register size_t	count = slotCount;
633
634	for ( ; count > 0; count-- ) {
635		if ( slot->userAddress == address )
636			return slot;
637		slot++;
638	}
639
640	return 0;
641}
642
643/*
644 * Find the slot structure for an internal address.
645 */
646static Slot *
647slotForInternalAddress(void * address)
648{
649	register Slot *	slot = allocationList;
650	register size_t	count = slotCount;
651
652	for ( ; count > 0; count-- ) {
653		if ( slot->internalAddress == address )
654			return slot;
655		slot++;
656	}
657	return 0;
658}
659
660/*
661 * Given the internal address of a buffer, find the buffer immediately
662 * before that buffer in the address space. This is used by free() to
663 * coalesce two free buffers into one.
664 */
665static Slot *
666slotForInternalAddressPreviousTo(void * address)
667{
668	register Slot *	slot = allocationList;
669	register size_t	count = slotCount;
670
671	for ( ; count > 0; count-- ) {
672		if ( ((char *)slot->internalAddress)
673		 + slot->internalSize == address )
674			return slot;
675		slot++;
676	}
677	return 0;
678}
679
680extern C_LINKAGE void
681ef_free(void * address)
682{
683	Slot *	slot;
684	Slot *	previousSlot = 0;
685	Slot *	nextSlot = 0;
686
687    //printf(" ::free %p \n",address);
688    lock();
689
690    if ( address == 0 ) {
691            unlock();
692            return;
693    }
694
695	if ( allocationList == 0 )
696		EF_Abort("free() called before first malloc().");
697
698	if ( !noAllocationListProtection )
699		Page_AllowAccess(allocationList, allocationListSize);
700
701	slot = slotForUserAddress(address);
702
703	if ( !slot )
704		EF_Abort("free(%a): address not from malloc().", address);
705
706	if ( slot->mode != ALLOCATED ) {
707		if ( internalUse && slot->mode == INTERNAL_USE )
708			/* Do nothing. */;
709		else {
710			EF_Abort(
711			 "free(%a): freeing free memory."
712			,address);
713		}
714	}
715
716	if ( EF_PROTECT_FREE )
717		slot->mode = PROTECTED;
718	else
719		slot->mode = FREE;
720
721       if ( EF_FREE_WIPES )
722               memset(slot->userAddress, 0xbd, slot->userSize);
723
724	previousSlot = slotForInternalAddressPreviousTo(slot->internalAddress);
725	nextSlot = slotForInternalAddress(
726	 ((char *)slot->internalAddress) + slot->internalSize);
727
728	if ( previousSlot
729	 && (previousSlot->mode == FREE || previousSlot->mode == PROTECTED) ) {
730		/* Coalesce previous slot with this one. */
731		previousSlot->internalSize += slot->internalSize;
732		if ( EF_PROTECT_FREE )
733			previousSlot->mode = PROTECTED;
734
735		slot->internalAddress = slot->userAddress = 0;
736		slot->internalSize = slot->userSize = 0;
737		slot->mode = NOT_IN_USE;
738		slot = previousSlot;
739		unUsedSlots++;
740	}
741	if ( nextSlot
742	 && (nextSlot->mode == FREE || nextSlot->mode == PROTECTED) ) {
743		/* Coalesce next slot with this one. */
744		slot->internalSize += nextSlot->internalSize;
745		nextSlot->internalAddress = nextSlot->userAddress = 0;
746		nextSlot->internalSize = nextSlot->userSize = 0;
747		nextSlot->mode = NOT_IN_USE;
748		unUsedSlots++;
749	}
750
751	slot->userAddress = slot->internalAddress;
752	slot->userSize = slot->internalSize;
753
754	/*
755	 * Free memory is _always_ set to deny access. When EF_PROTECT_FREE
756	 * is true, free memory is never reallocated, so it remains access
757	 * denied for the life of the process. When EF_PROTECT_FREE is false,
758	 * the memory may be re-allocated, at which time access to it will be
759	 * allowed again.
760	 */
761	Page_DenyAccess(slot->internalAddress, slot->internalSize);
762
763	if ( !noAllocationListProtection )
764		Page_DenyAccess(allocationList, allocationListSize);
765
766        unlock();
767}
768
769extern C_LINKAGE void *
770ef_realloc(void * oldBuffer, size_t newSize)
771{
772	void *	newBuffer = ef_malloc(newSize);
773
774        lock();
775
776	if ( oldBuffer ) {
777		size_t	size;
778		Slot *	slot;
779
780		Page_AllowAccess(allocationList, allocationListSize);
781		noAllocationListProtection = 1;
782
783		slot = slotForUserAddress(oldBuffer);
784
785		if ( slot == 0 )
786			EF_Abort(
787			 "realloc(%a, %d): address not from malloc()."
788			,oldBuffer
789			,newSize);
790
791		if ( newSize < (size = slot->userSize) )
792			size = newSize;
793
794		if ( size > 0 )
795			memcpy(newBuffer, oldBuffer, size);
796
797		ef_free(oldBuffer);
798		noAllocationListProtection = 0;
799		Page_DenyAccess(allocationList, allocationListSize);
800
801		if ( size < newSize )
802			memset(&(((char *)newBuffer)[size]), 0, newSize - size);
803
804		/* Internal memory was re-protected in free() */
805	}
806	unlock();
807
808	return newBuffer;
809}
810
811extern C_LINKAGE void *
812ef_malloc(size_t size)
813{
814
815     if  ( malloc_init == 0 ){
816            ef_init();
817     }
818
819
820     void  *allocation;
821
822    lock();
823    allocation=ef_memalign(EF_ALIGNMENT, size);
824    /* put 0xaa into the memset to find uninit issues */
825    memset(allocation,0xaa,size);
826    #if 0
827    int i;
828    uint8_t *p=(uint8_t *)allocation;
829    for (i=0; i<size; i++) {
830        p[i]=(rand()&0xff);
831    }
832    #endif
833
834    unlock();
835    //printf(":: alloc %p %d \n",allocation,(int)size);
836	return allocation;
837}
838
839extern C_LINKAGE void *
840ef_calloc(size_t nelem, size_t elsize)
841{
842	size_t	size = nelem * elsize;
843        void * allocation;
844
845        lock();
846
847        allocation = ef_malloc(size);
848        memset(allocation, 0, size);
849        unlock();
850
851	return allocation;
852}
853
854/*
855 * This will catch more bugs if you remove the page alignment, but it
856 * will break some software.
857 */
858extern C_LINKAGE void *
859ef_valloc (size_t size)
860{
861        void * allocation;
862
863        lock();
864        allocation= ef_memalign(bytesPerPage, size);
865        unlock();
866
867        return allocation;
868}
869
870
871#define REPLACE_MALLOC
872
873#ifdef REPLACE_MALLOC
874
875extern C_LINKAGE void
876free(void * address)
877{
878    ef_free(address);
879}
880
881extern C_LINKAGE void *
882realloc(void * oldBuffer, size_t newSize)
883{
884    return (ef_realloc(oldBuffer, newSize));
885}
886
887extern C_LINKAGE void *
888malloc(size_t size)
889{
890    return (ef_malloc(size));
891}
892
893extern C_LINKAGE void *
894calloc(size_t nelem, size_t elsize)
895{
896    return (ef_calloc(nelem, elsize));
897}
898
899extern C_LINKAGE int
900posix_memalign(void **memptr, size_t alignment, size_t size)
901{
902    *memptr=ef_malloc(size);
903    return(0);
904}
905
906
907/*
908 * This will catch more bugs if you remove the page alignment, but it
909 * will break some software.
910 */
911extern C_LINKAGE void *
912valloc (size_t size)
913{
914    return (ef_valloc(size));
915
916}
917#endif
918
919
920
921extern C_LINKAGE void ef_init(void ){
922
923    if  ( malloc_init == 0 ){
924        malloc_init=1;
925        pthread_mutexattr_t Attr;
926
927        pthread_mutexattr_init(&Attr);
928        pthread_mutexattr_settype(&Attr, PTHREAD_MUTEX_RECURSIVE);
929
930        if ( pthread_mutex_init(&mutex, &Attr) != 0 ){
931            exit(-1);
932        }
933        initialize();
934    }
935
936}
937
938