]> code.delx.au - gnu-emacs/blob - src/gmalloc.c
Merge from origin/emacs-25
[gnu-emacs] / src / gmalloc.c
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2016 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
18
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
21
22 #include <config.h>
23
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
27
28 #include <stddef.h>
29 #include <string.h>
30 #include <limits.h>
31 #include <stdint.h>
32 #include <unistd.h>
33
34 #ifdef USE_PTHREAD
35 #include <pthread.h>
36 #endif
37
38 #ifdef WINDOWSNT
39 #include <w32heap.h> /* for sbrk */
40 #endif
41
42 #ifdef emacs
43 # include "lisp.h"
44 #endif
45
46 #ifdef HAVE_MALLOC_H
47 # if 4 < __GNUC__ + (2 <= __GNUC_MINOR__)
48 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
49 # endif
50 # include <malloc.h>
51 #endif
52 #ifndef __MALLOC_HOOK_VOLATILE
53 # define __MALLOC_HOOK_VOLATILE volatile
54 #endif
55 #ifndef HAVE_MALLOC_H
56 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
57 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
58 extern void *(*__morecore) (ptrdiff_t);
59 #endif
60
61 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
62 realloc... as defined in this file (and renamed gmalloc,
63 grealloc... via the macros that follow). The dumped emacs,
64 however, will use the system malloc, realloc.... In other source
65 files, malloc, realloc... are renamed hybrid_malloc,
66 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
67 friends are wrapper functions defined later in this file. */
68 #undef malloc
69 #undef realloc
70 #undef calloc
71 #undef aligned_alloc
72 #undef free
73 #define malloc gmalloc
74 #define realloc grealloc
75 #define calloc do_not_call_me /* Emacs never calls calloc. */
76 #define aligned_alloc galigned_alloc
77 #define free gfree
78 #define malloc_info gmalloc_info
79
80 #ifdef HYBRID_MALLOC
81 # include "sheap.h"
82 # define DUMPED bss_sbrk_did_unexec
83 static bool
84 ALLOCATED_BEFORE_DUMPING (char *p)
85 {
86 return bss_sbrk_buffer <= p && p < bss_sbrk_buffer + STATIC_HEAP_SIZE;
87 }
88 #endif
89
90 #ifdef __cplusplus
91 extern "C"
92 {
93 #endif
94
95 #ifdef HYBRID_MALLOC
96 #define extern static
97 #endif
98
99 /* Allocate SIZE bytes of memory. */
100 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
101 /* Re-allocate the previously allocated block
102 in ptr, making the new block SIZE bytes long. */
103 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
104 /* Free a block. */
105 extern void free (void *ptr);
106
107 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
108 extern void *aligned_alloc (size_t, size_t);
109 #ifdef MSDOS
110 extern void *memalign (size_t, size_t);
111 extern int posix_memalign (void **, size_t, size_t);
112 #endif
113
114 /* The allocator divides the heap into blocks of fixed size; large
115 requests receive one or more whole blocks, and small requests
116 receive a fragment of a block. Fragment sizes are powers of two,
117 and all fragments of a block are the same size. When all the
118 fragments in a block have been freed, the block itself is freed. */
119 #define INT_BIT (CHAR_BIT * sizeof (int))
120 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
121 #define BLOCKSIZE (1 << BLOCKLOG)
122 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
123
124 /* Determine the amount of memory spanned by the initial heap table
125 (not an absolute limit). */
126 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
127
128 /* Number of contiguous free blocks allowed to build up at the end of
129 memory before they will be returned to the system. */
130 #define FINAL_FREE_BLOCKS 8
131
132 /* Data structure giving per-block information. */
133 typedef union
134 {
135 /* Heap information for a busy block. */
136 struct
137 {
138 /* Zero for a large (multiblock) object, or positive giving the
139 logarithm to the base two of the fragment size. */
140 int type;
141 union
142 {
143 struct
144 {
145 size_t nfree; /* Free frags in a fragmented block. */
146 size_t first; /* First free fragment of the block. */
147 } frag;
148 /* For a large object, in its first block, this has the number
149 of blocks in the object. In the other blocks, this has a
150 negative number which says how far back the first block is. */
151 ptrdiff_t size;
152 } info;
153 } busy;
154 /* Heap information for a free block
155 (that may be the first of a free cluster). */
156 struct
157 {
158 size_t size; /* Size (in blocks) of a free cluster. */
159 size_t next; /* Index of next free cluster. */
160 size_t prev; /* Index of previous free cluster. */
161 } free;
162 } malloc_info;
163
164 /* Pointer to first block of the heap. */
165 extern char *_heapbase;
166
167 /* Table indexed by block number giving per-block information. */
168 extern malloc_info *_heapinfo;
169
170 /* Address to block number and vice versa. */
171 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
172 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
173
174 /* Current search index for the heap table. */
175 extern size_t _heapindex;
176
177 /* Limit of valid info table indices. */
178 extern size_t _heaplimit;
179
180 /* Doubly linked lists of free fragments. */
181 struct list
182 {
183 struct list *next;
184 struct list *prev;
185 };
186
187 /* Free list headers for each fragment size. */
188 extern struct list _fraghead[];
189
190 /* List of blocks allocated with aligned_alloc and friends. */
191 struct alignlist
192 {
193 struct alignlist *next;
194 void *aligned; /* The address that aligned_alloc returned. */
195 void *exact; /* The address that malloc returned. */
196 };
197 extern struct alignlist *_aligned_blocks;
198
199 /* Instrumentation. */
200 extern size_t _chunks_used;
201 extern size_t _bytes_used;
202 extern size_t _chunks_free;
203 extern size_t _bytes_free;
204
205 /* Internal versions of `malloc', `realloc', and `free'
206 used when these functions need to call each other.
207 They are the same but don't call the hooks. */
208 extern void *_malloc_internal (size_t);
209 extern void *_realloc_internal (void *, size_t);
210 extern void _free_internal (void *);
211 extern void *_malloc_internal_nolock (size_t);
212 extern void *_realloc_internal_nolock (void *, size_t);
213 extern void _free_internal_nolock (void *);
214
215 #ifdef USE_PTHREAD
216 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
217 extern int _malloc_thread_enabled_p;
218 #define LOCK() \
219 do { \
220 if (_malloc_thread_enabled_p) \
221 pthread_mutex_lock (&_malloc_mutex); \
222 } while (0)
223 #define UNLOCK() \
224 do { \
225 if (_malloc_thread_enabled_p) \
226 pthread_mutex_unlock (&_malloc_mutex); \
227 } while (0)
228 #define LOCK_ALIGNED_BLOCKS() \
229 do { \
230 if (_malloc_thread_enabled_p) \
231 pthread_mutex_lock (&_aligned_blocks_mutex); \
232 } while (0)
233 #define UNLOCK_ALIGNED_BLOCKS() \
234 do { \
235 if (_malloc_thread_enabled_p) \
236 pthread_mutex_unlock (&_aligned_blocks_mutex); \
237 } while (0)
238 #else
239 #define LOCK()
240 #define UNLOCK()
241 #define LOCK_ALIGNED_BLOCKS()
242 #define UNLOCK_ALIGNED_BLOCKS()
243 #endif
244
245 /* Nonzero if `malloc' has been called and done its initialization. */
246 extern int __malloc_initialized;
247 /* Function called to initialize malloc data structures. */
248 extern int __malloc_initialize (void);
249
250 #ifdef GC_MCHECK
251
252 /* Return values for `mprobe': these are the kinds of inconsistencies that
253 `mcheck' enables detection of. */
254 enum mcheck_status
255 {
256 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
257 MCHECK_OK, /* Block is fine. */
258 MCHECK_FREE, /* Block freed twice. */
259 MCHECK_HEAD, /* Memory before the block was clobbered. */
260 MCHECK_TAIL /* Memory after the block was clobbered. */
261 };
262
263 /* Activate a standard collection of debugging hooks. This must be called
264 before `malloc' is ever called. ABORTFUNC is called with an error code
265 (see enum above) when an inconsistency is detected. If ABORTFUNC is
266 null, the standard function prints on stderr and then calls `abort'. */
267 extern int mcheck (void (*abortfunc) (enum mcheck_status));
268
269 /* Check for aberrations in a particular malloc'd block. You must have
270 called `mcheck' already. These are the same checks that `mcheck' does
271 when you free or reallocate a block. */
272 extern enum mcheck_status mprobe (void *ptr);
273
274 /* Activate a standard collection of tracing hooks. */
275 extern void mtrace (void);
276 extern void muntrace (void);
277
278 /* Statistics available to the user. */
279 struct mstats
280 {
281 size_t bytes_total; /* Total size of the heap. */
282 size_t chunks_used; /* Chunks allocated by the user. */
283 size_t bytes_used; /* Byte total of user-allocated chunks. */
284 size_t chunks_free; /* Chunks in the free list. */
285 size_t bytes_free; /* Byte total of chunks in the free list. */
286 };
287
288 /* Pick up the current statistics. */
289 extern struct mstats mstats (void);
290
291 #endif
292
293 #undef extern
294
295 #ifdef __cplusplus
296 }
297 #endif
298
299 /* Memory allocator `malloc'.
300 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
301 Written May 1989 by Mike Haertel.
302
303 This library is free software; you can redistribute it and/or
304 modify it under the terms of the GNU General Public License as
305 published by the Free Software Foundation; either version 2 of the
306 License, or (at your option) any later version.
307
308 This library is distributed in the hope that it will be useful,
309 but WITHOUT ANY WARRANTY; without even the implied warranty of
310 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
311 General Public License for more details.
312
313 You should have received a copy of the GNU General Public
314 License along with this library. If not, see <http://www.gnu.org/licenses/>.
315
316 The author may be reached (Email) at the address mike@ai.mit.edu,
317 or (US mail) as Mike Haertel c/o Free Software Foundation. */
318
319 #include <errno.h>
320
321 /* Debugging hook for 'malloc'. */
322 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
323
324 /* Replacements for traditional glibc malloc hooks, for platforms that
325 do not already have these hooks. Platforms with these hooks all
326 used relaxed ref/def, so it is OK to define them here too. */
327 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
328 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
329 void *(*__morecore) (ptrdiff_t);
330
331 #ifndef HYBRID_MALLOC
332
333 /* Pointer to the base of the first block. */
334 char *_heapbase;
335
336 /* Block information table. Allocated with align/__free (not malloc/free). */
337 malloc_info *_heapinfo;
338
339 /* Search index in the info table. */
340 size_t _heapindex;
341
342 /* Limit of valid info table indices. */
343 size_t _heaplimit;
344
345 /* Free lists for each fragment size. */
346 struct list _fraghead[BLOCKLOG];
347
348 /* Instrumentation. */
349 size_t _chunks_used;
350 size_t _bytes_used;
351 size_t _chunks_free;
352 size_t _bytes_free;
353
354 /* Are you experienced? */
355 int __malloc_initialized;
356
357 #else
358
359 static struct list _fraghead[BLOCKLOG];
360
361 #endif /* HYBRID_MALLOC */
362
363 /* Number of extra blocks to get each time we ask for more core.
364 This reduces the frequency of calling `(*__morecore)'. */
365 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
366 static
367 #endif
368 size_t __malloc_extra_blocks;
369
370 /* Number of info entries. */
371 static size_t heapsize;
372
373 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
374
375 /* Some code for hunting a bug writing into _heapinfo.
376
377 Call this macro with argument PROT non-zero to protect internal
378 malloc state against writing to it, call it with a zero argument to
379 make it readable and writable.
380
381 Note that this only works if BLOCKSIZE == page size, which is
382 the case on the i386. */
383
384 #include <sys/types.h>
385 #include <sys/mman.h>
386
387 static int state_protected_p;
388 static size_t last_state_size;
389 static malloc_info *last_heapinfo;
390
391 void
392 protect_malloc_state (int protect_p)
393 {
394 /* If _heapinfo has been relocated, make sure its old location
395 isn't left read-only; it will be reused by malloc. */
396 if (_heapinfo != last_heapinfo
397 && last_heapinfo
398 && state_protected_p)
399 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
400
401 last_state_size = _heaplimit * sizeof *_heapinfo;
402 last_heapinfo = _heapinfo;
403
404 if (protect_p != state_protected_p)
405 {
406 state_protected_p = protect_p;
407 if (mprotect (_heapinfo, last_state_size,
408 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
409 abort ();
410 }
411 }
412
413 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
414
415 #else
416 #define PROTECT_MALLOC_STATE(PROT) /* empty */
417 #endif
418
419
420 /* Aligned allocation. */
421 static void *
422 align (size_t size)
423 {
424 void *result;
425 ptrdiff_t adj;
426
427 /* align accepts an unsigned argument, but __morecore accepts a
428 signed one. This could lead to trouble if SIZE overflows the
429 ptrdiff_t type accepted by __morecore. We just punt in that
430 case, since they are requesting a ludicrous amount anyway. */
431 if (PTRDIFF_MAX < size)
432 result = 0;
433 else
434 result = (*__morecore) (size);
435 adj = (uintptr_t) result % BLOCKSIZE;
436 if (adj != 0)
437 {
438 adj = BLOCKSIZE - adj;
439 (*__morecore) (adj);
440 result = (char *) result + adj;
441 }
442
443 if (__after_morecore_hook)
444 (*__after_morecore_hook) ();
445
446 return result;
447 }
448
449 /* Get SIZE bytes, if we can get them starting at END.
450 Return the address of the space we got.
451 If we cannot get space at END, fail and return 0. */
452 static void *
453 get_contiguous_space (ptrdiff_t size, void *position)
454 {
455 void *before;
456 void *after;
457
458 before = (*__morecore) (0);
459 /* If we can tell in advance that the break is at the wrong place,
460 fail now. */
461 if (before != position)
462 return 0;
463
464 /* Allocate SIZE bytes and get the address of them. */
465 after = (*__morecore) (size);
466 if (!after)
467 return 0;
468
469 /* It was not contiguous--reject it. */
470 if (after != position)
471 {
472 (*__morecore) (- size);
473 return 0;
474 }
475
476 return after;
477 }
478
479
480 /* This is called when `_heapinfo' and `heapsize' have just
481 been set to describe a new info table. Set up the table
482 to describe itself and account for it in the statistics. */
483 static void
484 register_heapinfo (void)
485 {
486 size_t block, blocks;
487
488 block = BLOCK (_heapinfo);
489 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
490
491 /* Account for the _heapinfo block itself in the statistics. */
492 _bytes_used += blocks * BLOCKSIZE;
493 ++_chunks_used;
494
495 /* Describe the heapinfo block itself in the heapinfo. */
496 _heapinfo[block].busy.type = 0;
497 _heapinfo[block].busy.info.size = blocks;
498 /* Leave back-pointers for malloc_find_address. */
499 while (--blocks > 0)
500 _heapinfo[block + blocks].busy.info.size = -blocks;
501 }
502
503 #ifdef USE_PTHREAD
504 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
505 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
506 int _malloc_thread_enabled_p;
507
508 static void
509 malloc_atfork_handler_prepare (void)
510 {
511 LOCK ();
512 LOCK_ALIGNED_BLOCKS ();
513 }
514
515 static void
516 malloc_atfork_handler_parent (void)
517 {
518 UNLOCK_ALIGNED_BLOCKS ();
519 UNLOCK ();
520 }
521
522 static void
523 malloc_atfork_handler_child (void)
524 {
525 UNLOCK_ALIGNED_BLOCKS ();
526 UNLOCK ();
527 }
528
529 /* Set up mutexes and make malloc etc. thread-safe. */
530 void
531 malloc_enable_thread (void)
532 {
533 if (_malloc_thread_enabled_p)
534 return;
535
536 /* Some pthread implementations call malloc for statically
537 initialized mutexes when they are used first. To avoid such a
538 situation, we initialize mutexes here while their use is
539 disabled in malloc etc. */
540 pthread_mutex_init (&_malloc_mutex, NULL);
541 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
542 pthread_atfork (malloc_atfork_handler_prepare,
543 malloc_atfork_handler_parent,
544 malloc_atfork_handler_child);
545 _malloc_thread_enabled_p = 1;
546 }
547 #endif /* USE_PTHREAD */
548
549 static void
550 malloc_initialize_1 (void)
551 {
552 #ifdef GC_MCHECK
553 mcheck (NULL);
554 #endif
555
556 if (__malloc_initialize_hook)
557 (*__malloc_initialize_hook) ();
558
559 heapsize = HEAP / BLOCKSIZE;
560 _heapinfo = align (heapsize * sizeof (malloc_info));
561 if (_heapinfo == NULL)
562 return;
563 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
564 _heapinfo[0].free.size = 0;
565 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
566 _heapindex = 0;
567 _heapbase = (char *) _heapinfo;
568 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
569
570 register_heapinfo ();
571
572 __malloc_initialized = 1;
573 PROTECT_MALLOC_STATE (1);
574 return;
575 }
576
577 /* Set everything up and remember that we have.
578 main will call malloc which calls this function. That is before any threads
579 or signal handlers has been set up, so we don't need thread protection. */
580 int
581 __malloc_initialize (void)
582 {
583 if (__malloc_initialized)
584 return 0;
585
586 malloc_initialize_1 ();
587
588 return __malloc_initialized;
589 }
590
591 static int morecore_recursing;
592
593 /* Get neatly aligned memory, initializing or
594 growing the heap info table as necessary. */
595 static void *
596 morecore_nolock (size_t size)
597 {
598 void *result;
599 malloc_info *newinfo, *oldinfo;
600 size_t newsize;
601
602 if (morecore_recursing)
603 /* Avoid recursion. The caller will know how to handle a null return. */
604 return NULL;
605
606 result = align (size);
607 if (result == NULL)
608 return NULL;
609
610 PROTECT_MALLOC_STATE (0);
611
612 /* Check if we need to grow the info table. */
613 if ((size_t) BLOCK ((char *) result + size) > heapsize)
614 {
615 /* Calculate the new _heapinfo table size. We do not account for the
616 added blocks in the table itself, as we hope to place them in
617 existing free space, which is already covered by part of the
618 existing table. */
619 newsize = heapsize;
620 do
621 newsize *= 2;
622 while ((size_t) BLOCK ((char *) result + size) > newsize);
623
624 /* We must not reuse existing core for the new info table when called
625 from realloc in the case of growing a large block, because the
626 block being grown is momentarily marked as free. In this case
627 _heaplimit is zero so we know not to reuse space for internal
628 allocation. */
629 if (_heaplimit != 0)
630 {
631 /* First try to allocate the new info table in core we already
632 have, in the usual way using realloc. If realloc cannot
633 extend it in place or relocate it to existing sufficient core,
634 we will get called again, and the code above will notice the
635 `morecore_recursing' flag and return null. */
636 int save = errno; /* Don't want to clobber errno with ENOMEM. */
637 morecore_recursing = 1;
638 newinfo = _realloc_internal_nolock (_heapinfo,
639 newsize * sizeof (malloc_info));
640 morecore_recursing = 0;
641 if (newinfo == NULL)
642 errno = save;
643 else
644 {
645 /* We found some space in core, and realloc has put the old
646 table's blocks on the free list. Now zero the new part
647 of the table and install the new table location. */
648 memset (&newinfo[heapsize], 0,
649 (newsize - heapsize) * sizeof (malloc_info));
650 _heapinfo = newinfo;
651 heapsize = newsize;
652 goto got_heap;
653 }
654 }
655
656 /* Allocate new space for the malloc info table. */
657 while (1)
658 {
659 newinfo = align (newsize * sizeof (malloc_info));
660
661 /* Did it fail? */
662 if (newinfo == NULL)
663 {
664 (*__morecore) (-size);
665 return NULL;
666 }
667
668 /* Is it big enough to record status for its own space?
669 If so, we win. */
670 if ((size_t) BLOCK ((char *) newinfo
671 + newsize * sizeof (malloc_info))
672 < newsize)
673 break;
674
675 /* Must try again. First give back most of what we just got. */
676 (*__morecore) (- newsize * sizeof (malloc_info));
677 newsize *= 2;
678 }
679
680 /* Copy the old table to the beginning of the new,
681 and zero the rest of the new table. */
682 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
683 memset (&newinfo[heapsize], 0,
684 (newsize - heapsize) * sizeof (malloc_info));
685 oldinfo = _heapinfo;
686 _heapinfo = newinfo;
687 heapsize = newsize;
688
689 register_heapinfo ();
690
691 /* Reset _heaplimit so _free_internal never decides
692 it can relocate or resize the info table. */
693 _heaplimit = 0;
694 _free_internal_nolock (oldinfo);
695 PROTECT_MALLOC_STATE (0);
696
697 /* The new heap limit includes the new table just allocated. */
698 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
699 return result;
700 }
701
702 got_heap:
703 _heaplimit = BLOCK ((char *) result + size);
704 return result;
705 }
706
707 /* Allocate memory from the heap. */
708 void *
709 _malloc_internal_nolock (size_t size)
710 {
711 void *result;
712 size_t block, blocks, lastblocks, start;
713 register size_t i;
714 struct list *next;
715
716 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
717 valid address you can realloc and free (though not dereference).
718
719 It turns out that some extant code (sunrpc, at least Ultrix's version)
720 expects `malloc (0)' to return non-NULL and breaks otherwise.
721 Be compatible. */
722
723 #if 0
724 if (size == 0)
725 return NULL;
726 #endif
727
728 PROTECT_MALLOC_STATE (0);
729
730 if (size < sizeof (struct list))
731 size = sizeof (struct list);
732
733 /* Determine the allocation policy based on the request size. */
734 if (size <= BLOCKSIZE / 2)
735 {
736 /* Small allocation to receive a fragment of a block.
737 Determine the logarithm to base two of the fragment size. */
738 register size_t log = 1;
739 --size;
740 while ((size /= 2) != 0)
741 ++log;
742
743 /* Look in the fragment lists for a
744 free fragment of the desired size. */
745 next = _fraghead[log].next;
746 if (next != NULL)
747 {
748 /* There are free fragments of this size.
749 Pop a fragment out of the fragment list and return it.
750 Update the block's nfree and first counters. */
751 result = next;
752 next->prev->next = next->next;
753 if (next->next != NULL)
754 next->next->prev = next->prev;
755 block = BLOCK (result);
756 if (--_heapinfo[block].busy.info.frag.nfree != 0)
757 _heapinfo[block].busy.info.frag.first =
758 (uintptr_t) next->next % BLOCKSIZE >> log;
759
760 /* Update the statistics. */
761 ++_chunks_used;
762 _bytes_used += 1 << log;
763 --_chunks_free;
764 _bytes_free -= 1 << log;
765 }
766 else
767 {
768 /* No free fragments of the desired size, so get a new block
769 and break it into fragments, returning the first. */
770 #ifdef GC_MALLOC_CHECK
771 result = _malloc_internal_nolock (BLOCKSIZE);
772 PROTECT_MALLOC_STATE (0);
773 #elif defined (USE_PTHREAD)
774 result = _malloc_internal_nolock (BLOCKSIZE);
775 #else
776 result = malloc (BLOCKSIZE);
777 #endif
778 if (result == NULL)
779 {
780 PROTECT_MALLOC_STATE (1);
781 goto out;
782 }
783
784 /* Link all fragments but the first into the free list. */
785 next = (struct list *) ((char *) result + (1 << log));
786 next->next = NULL;
787 next->prev = &_fraghead[log];
788 _fraghead[log].next = next;
789
790 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
791 {
792 next = (struct list *) ((char *) result + (i << log));
793 next->next = _fraghead[log].next;
794 next->prev = &_fraghead[log];
795 next->prev->next = next;
796 next->next->prev = next;
797 }
798
799 /* Initialize the nfree and first counters for this block. */
800 block = BLOCK (result);
801 _heapinfo[block].busy.type = log;
802 _heapinfo[block].busy.info.frag.nfree = i - 1;
803 _heapinfo[block].busy.info.frag.first = i - 1;
804
805 _chunks_free += (BLOCKSIZE >> log) - 1;
806 _bytes_free += BLOCKSIZE - (1 << log);
807 _bytes_used -= BLOCKSIZE - (1 << log);
808 }
809 }
810 else
811 {
812 /* Large allocation to receive one or more blocks.
813 Search the free list in a circle starting at the last place visited.
814 If we loop completely around without finding a large enough
815 space we will have to get more memory from the system. */
816 blocks = BLOCKIFY (size);
817 start = block = _heapindex;
818 while (_heapinfo[block].free.size < blocks)
819 {
820 block = _heapinfo[block].free.next;
821 if (block == start)
822 {
823 /* Need to get more from the system. Get a little extra. */
824 size_t wantblocks = blocks + __malloc_extra_blocks;
825 block = _heapinfo[0].free.prev;
826 lastblocks = _heapinfo[block].free.size;
827 /* Check to see if the new core will be contiguous with the
828 final free block; if so we don't need to get as much. */
829 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
830 /* We can't do this if we will have to make the heap info
831 table bigger to accommodate the new space. */
832 block + wantblocks <= heapsize &&
833 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
834 ADDRESS (block + lastblocks)))
835 {
836 /* We got it contiguously. Which block we are extending
837 (the `final free block' referred to above) might have
838 changed, if it got combined with a freed info table. */
839 block = _heapinfo[0].free.prev;
840 _heapinfo[block].free.size += (wantblocks - lastblocks);
841 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
842 _heaplimit += wantblocks - lastblocks;
843 continue;
844 }
845 result = morecore_nolock (wantblocks * BLOCKSIZE);
846 if (result == NULL)
847 goto out;
848 block = BLOCK (result);
849 /* Put the new block at the end of the free list. */
850 _heapinfo[block].free.size = wantblocks;
851 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
852 _heapinfo[block].free.next = 0;
853 _heapinfo[0].free.prev = block;
854 _heapinfo[_heapinfo[block].free.prev].free.next = block;
855 ++_chunks_free;
856 /* Now loop to use some of that block for this allocation. */
857 }
858 }
859
860 /* At this point we have found a suitable free list entry.
861 Figure out how to remove what we need from the list. */
862 result = ADDRESS (block);
863 if (_heapinfo[block].free.size > blocks)
864 {
865 /* The block we found has a bit left over,
866 so relink the tail end back into the free list. */
867 _heapinfo[block + blocks].free.size
868 = _heapinfo[block].free.size - blocks;
869 _heapinfo[block + blocks].free.next
870 = _heapinfo[block].free.next;
871 _heapinfo[block + blocks].free.prev
872 = _heapinfo[block].free.prev;
873 _heapinfo[_heapinfo[block].free.prev].free.next
874 = _heapinfo[_heapinfo[block].free.next].free.prev
875 = _heapindex = block + blocks;
876 }
877 else
878 {
879 /* The block exactly matches our requirements,
880 so just remove it from the list. */
881 _heapinfo[_heapinfo[block].free.next].free.prev
882 = _heapinfo[block].free.prev;
883 _heapinfo[_heapinfo[block].free.prev].free.next
884 = _heapindex = _heapinfo[block].free.next;
885 --_chunks_free;
886 }
887
888 _heapinfo[block].busy.type = 0;
889 _heapinfo[block].busy.info.size = blocks;
890 ++_chunks_used;
891 _bytes_used += blocks * BLOCKSIZE;
892 _bytes_free -= blocks * BLOCKSIZE;
893
894 /* Mark all the blocks of the object just allocated except for the
895 first with a negative number so you can find the first block by
896 adding that adjustment. */
897 while (--blocks > 0)
898 _heapinfo[block + blocks].busy.info.size = -blocks;
899 }
900
901 PROTECT_MALLOC_STATE (1);
902 out:
903 return result;
904 }
905
906 void *
907 _malloc_internal (size_t size)
908 {
909 void *result;
910
911 LOCK ();
912 result = _malloc_internal_nolock (size);
913 UNLOCK ();
914
915 return result;
916 }
917
918 void *
919 malloc (size_t size)
920 {
921 void *(*hook) (size_t);
922
923 if (!__malloc_initialized && !__malloc_initialize ())
924 return NULL;
925
926 /* Copy the value of gmalloc_hook to an automatic variable in case
927 gmalloc_hook is modified in another thread between its
928 NULL-check and the use.
929
930 Note: Strictly speaking, this is not a right solution. We should
931 use mutexes to access non-read-only variables that are shared
932 among multiple threads. We just leave it for compatibility with
933 glibc malloc (i.e., assignments to gmalloc_hook) for now. */
934 hook = gmalloc_hook;
935 return (hook != NULL ? *hook : _malloc_internal) (size);
936 }
937 \f
938 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
939
940 /* On some ANSI C systems, some libc functions call _malloc, _free
941 and _realloc. Make them use the GNU functions. */
942
943 extern void *_malloc (size_t);
944 extern void _free (void *);
945 extern void *_realloc (void *, size_t);
946
947 void *
948 _malloc (size_t size)
949 {
950 return malloc (size);
951 }
952
953 void
954 _free (void *ptr)
955 {
956 free (ptr);
957 }
958
959 void *
960 _realloc (void *ptr, size_t size)
961 {
962 return realloc (ptr, size);
963 }
964
965 #endif
966 /* Free a block of memory allocated by `malloc'.
967 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
968 Written May 1989 by Mike Haertel.
969
970 This library is free software; you can redistribute it and/or
971 modify it under the terms of the GNU General Public License as
972 published by the Free Software Foundation; either version 2 of the
973 License, or (at your option) any later version.
974
975 This library is distributed in the hope that it will be useful,
976 but WITHOUT ANY WARRANTY; without even the implied warranty of
977 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
978 General Public License for more details.
979
980 You should have received a copy of the GNU General Public
981 License along with this library. If not, see <http://www.gnu.org/licenses/>.
982
983 The author may be reached (Email) at the address mike@ai.mit.edu,
984 or (US mail) as Mike Haertel c/o Free Software Foundation. */
985
986 /* Debugging hook for free. */
987 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
988
989 #ifndef HYBRID_MALLOC
990
991 /* List of blocks allocated by aligned_alloc. */
992 struct alignlist *_aligned_blocks = NULL;
993 #endif
994
995 /* Return memory to the heap.
996 Like `_free_internal' but don't lock mutex. */
997 void
998 _free_internal_nolock (void *ptr)
999 {
1000 int type;
1001 size_t block, blocks;
1002 register size_t i;
1003 struct list *prev, *next;
1004 void *curbrk;
1005 const size_t lesscore_threshold
1006 /* Threshold of free space at which we will return some to the system. */
1007 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1008
1009 register struct alignlist *l;
1010
1011 if (ptr == NULL)
1012 return;
1013
1014 PROTECT_MALLOC_STATE (0);
1015
1016 LOCK_ALIGNED_BLOCKS ();
1017 for (l = _aligned_blocks; l != NULL; l = l->next)
1018 if (l->aligned == ptr)
1019 {
1020 l->aligned = NULL; /* Mark the slot in the list as free. */
1021 ptr = l->exact;
1022 break;
1023 }
1024 UNLOCK_ALIGNED_BLOCKS ();
1025
1026 block = BLOCK (ptr);
1027
1028 type = _heapinfo[block].busy.type;
1029 switch (type)
1030 {
1031 case 0:
1032 /* Get as many statistics as early as we can. */
1033 --_chunks_used;
1034 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1035 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1036
1037 /* Find the free cluster previous to this one in the free list.
1038 Start searching at the last block referenced; this may benefit
1039 programs with locality of allocation. */
1040 i = _heapindex;
1041 if (i > block)
1042 while (i > block)
1043 i = _heapinfo[i].free.prev;
1044 else
1045 {
1046 do
1047 i = _heapinfo[i].free.next;
1048 while (i > 0 && i < block);
1049 i = _heapinfo[i].free.prev;
1050 }
1051
1052 /* Determine how to link this block into the free list. */
1053 if (block == i + _heapinfo[i].free.size)
1054 {
1055 /* Coalesce this block with its predecessor. */
1056 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1057 block = i;
1058 }
1059 else
1060 {
1061 /* Really link this block back into the free list. */
1062 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1063 _heapinfo[block].free.next = _heapinfo[i].free.next;
1064 _heapinfo[block].free.prev = i;
1065 _heapinfo[i].free.next = block;
1066 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1067 ++_chunks_free;
1068 }
1069
1070 /* Now that the block is linked in, see if we can coalesce it
1071 with its successor (by deleting its successor from the list
1072 and adding in its size). */
1073 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1074 {
1075 _heapinfo[block].free.size
1076 += _heapinfo[_heapinfo[block].free.next].free.size;
1077 _heapinfo[block].free.next
1078 = _heapinfo[_heapinfo[block].free.next].free.next;
1079 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1080 --_chunks_free;
1081 }
1082
1083 /* How many trailing free blocks are there now? */
1084 blocks = _heapinfo[block].free.size;
1085
1086 /* Where is the current end of accessible core? */
1087 curbrk = (*__morecore) (0);
1088
1089 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1090 {
1091 /* The end of the malloc heap is at the end of accessible core.
1092 It's possible that moving _heapinfo will allow us to
1093 return some space to the system. */
1094
1095 size_t info_block = BLOCK (_heapinfo);
1096 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1097 size_t prev_block = _heapinfo[block].free.prev;
1098 size_t prev_blocks = _heapinfo[prev_block].free.size;
1099 size_t next_block = _heapinfo[block].free.next;
1100 size_t next_blocks = _heapinfo[next_block].free.size;
1101
1102 if (/* Win if this block being freed is last in core, the info table
1103 is just before it, the previous free block is just before the
1104 info table, and the two free blocks together form a useful
1105 amount to return to the system. */
1106 (block + blocks == _heaplimit &&
1107 info_block + info_blocks == block &&
1108 prev_block != 0 && prev_block + prev_blocks == info_block &&
1109 blocks + prev_blocks >= lesscore_threshold) ||
1110 /* Nope, not the case. We can also win if this block being
1111 freed is just before the info table, and the table extends
1112 to the end of core or is followed only by a free block,
1113 and the total free space is worth returning to the system. */
1114 (block + blocks == info_block &&
1115 ((info_block + info_blocks == _heaplimit &&
1116 blocks >= lesscore_threshold) ||
1117 (info_block + info_blocks == next_block &&
1118 next_block + next_blocks == _heaplimit &&
1119 blocks + next_blocks >= lesscore_threshold)))
1120 )
1121 {
1122 malloc_info *newinfo;
1123 size_t oldlimit = _heaplimit;
1124
1125 /* Free the old info table, clearing _heaplimit to avoid
1126 recursion into this code. We don't want to return the
1127 table's blocks to the system before we have copied them to
1128 the new location. */
1129 _heaplimit = 0;
1130 _free_internal_nolock (_heapinfo);
1131 _heaplimit = oldlimit;
1132
1133 /* Tell malloc to search from the beginning of the heap for
1134 free blocks, so it doesn't reuse the ones just freed. */
1135 _heapindex = 0;
1136
1137 /* Allocate new space for the info table and move its data. */
1138 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1139 PROTECT_MALLOC_STATE (0);
1140 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1141 _heapinfo = newinfo;
1142
1143 /* We should now have coalesced the free block with the
1144 blocks freed from the old info table. Examine the entire
1145 trailing free block to decide below whether to return some
1146 to the system. */
1147 block = _heapinfo[0].free.prev;
1148 blocks = _heapinfo[block].free.size;
1149 }
1150
1151 /* Now see if we can return stuff to the system. */
1152 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1153 {
1154 register size_t bytes = blocks * BLOCKSIZE;
1155 _heaplimit -= blocks;
1156 (*__morecore) (-bytes);
1157 _heapinfo[_heapinfo[block].free.prev].free.next
1158 = _heapinfo[block].free.next;
1159 _heapinfo[_heapinfo[block].free.next].free.prev
1160 = _heapinfo[block].free.prev;
1161 block = _heapinfo[block].free.prev;
1162 --_chunks_free;
1163 _bytes_free -= bytes;
1164 }
1165 }
1166
1167 /* Set the next search to begin at this block. */
1168 _heapindex = block;
1169 break;
1170
1171 default:
1172 /* Do some of the statistics. */
1173 --_chunks_used;
1174 _bytes_used -= 1 << type;
1175 ++_chunks_free;
1176 _bytes_free += 1 << type;
1177
1178 /* Get the address of the first free fragment in this block. */
1179 prev = (struct list *) ((char *) ADDRESS (block) +
1180 (_heapinfo[block].busy.info.frag.first << type));
1181
1182 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1183 {
1184 /* If all fragments of this block are free, remove them
1185 from the fragment list and free the whole block. */
1186 next = prev;
1187 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1188 next = next->next;
1189 prev->prev->next = next;
1190 if (next != NULL)
1191 next->prev = prev->prev;
1192 _heapinfo[block].busy.type = 0;
1193 _heapinfo[block].busy.info.size = 1;
1194
1195 /* Keep the statistics accurate. */
1196 ++_chunks_used;
1197 _bytes_used += BLOCKSIZE;
1198 _chunks_free -= BLOCKSIZE >> type;
1199 _bytes_free -= BLOCKSIZE;
1200
1201 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1202 _free_internal_nolock (ADDRESS (block));
1203 #else
1204 free (ADDRESS (block));
1205 #endif
1206 }
1207 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1208 {
1209 /* If some fragments of this block are free, link this
1210 fragment into the fragment list after the first free
1211 fragment of this block. */
1212 next = ptr;
1213 next->next = prev->next;
1214 next->prev = prev;
1215 prev->next = next;
1216 if (next->next != NULL)
1217 next->next->prev = next;
1218 ++_heapinfo[block].busy.info.frag.nfree;
1219 }
1220 else
1221 {
1222 /* No fragments of this block are free, so link this
1223 fragment into the fragment list and announce that
1224 it is the first free fragment of this block. */
1225 prev = ptr;
1226 _heapinfo[block].busy.info.frag.nfree = 1;
1227 _heapinfo[block].busy.info.frag.first =
1228 (uintptr_t) ptr % BLOCKSIZE >> type;
1229 prev->next = _fraghead[type].next;
1230 prev->prev = &_fraghead[type];
1231 prev->prev->next = prev;
1232 if (prev->next != NULL)
1233 prev->next->prev = prev;
1234 }
1235 break;
1236 }
1237
1238 PROTECT_MALLOC_STATE (1);
1239 }
1240
1241 /* Return memory to the heap.
1242 Like 'free' but don't call a hook if there is one. */
1243 void
1244 _free_internal (void *ptr)
1245 {
1246 LOCK ();
1247 _free_internal_nolock (ptr);
1248 UNLOCK ();
1249 }
1250
1251 /* Return memory to the heap. */
1252
1253 void
1254 free (void *ptr)
1255 {
1256 void (*hook) (void *) = gfree_hook;
1257
1258 if (hook != NULL)
1259 (*hook) (ptr);
1260 else
1261 _free_internal (ptr);
1262 }
1263
1264 #ifndef HYBRID_MALLOC
1265 /* Define the `cfree' alias for `free'. */
1266 #ifdef weak_alias
1267 weak_alias (free, cfree)
1268 #else
1269 void
1270 cfree (void *ptr)
1271 {
1272 free (ptr);
1273 }
1274 #endif
1275 #endif
1276 /* Change the size of a block allocated by `malloc'.
1277 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1278 Written May 1989 by Mike Haertel.
1279
1280 This library is free software; you can redistribute it and/or
1281 modify it under the terms of the GNU General Public License as
1282 published by the Free Software Foundation; either version 2 of the
1283 License, or (at your option) any later version.
1284
1285 This library is distributed in the hope that it will be useful,
1286 but WITHOUT ANY WARRANTY; without even the implied warranty of
1287 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1288 General Public License for more details.
1289
1290 You should have received a copy of the GNU General Public
1291 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1292
1293 The author may be reached (Email) at the address mike@ai.mit.edu,
1294 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1295
1296 #ifndef min
1297 #define min(a, b) ((a) < (b) ? (a) : (b))
1298 #endif
1299
1300 /* Debugging hook for realloc. */
1301 static void *(*grealloc_hook) (void *, size_t);
1302
1303 /* Resize the given region to the new size, returning a pointer
1304 to the (possibly moved) region. This is optimized for speed;
1305 some benchmarks seem to indicate that greater compactness is
1306 achieved by unconditionally allocating and copying to a
1307 new region. This module has incestuous knowledge of the
1308 internals of both free and malloc. */
1309 void *
1310 _realloc_internal_nolock (void *ptr, size_t size)
1311 {
1312 void *result;
1313 int type;
1314 size_t block, blocks, oldlimit;
1315
1316 if (size == 0)
1317 {
1318 _free_internal_nolock (ptr);
1319 return _malloc_internal_nolock (0);
1320 }
1321 else if (ptr == NULL)
1322 return _malloc_internal_nolock (size);
1323
1324 block = BLOCK (ptr);
1325
1326 PROTECT_MALLOC_STATE (0);
1327
1328 type = _heapinfo[block].busy.type;
1329 switch (type)
1330 {
1331 case 0:
1332 /* Maybe reallocate a large block to a small fragment. */
1333 if (size <= BLOCKSIZE / 2)
1334 {
1335 result = _malloc_internal_nolock (size);
1336 if (result != NULL)
1337 {
1338 memcpy (result, ptr, size);
1339 _free_internal_nolock (ptr);
1340 goto out;
1341 }
1342 }
1343
1344 /* The new size is a large allocation as well;
1345 see if we can hold it in place. */
1346 blocks = BLOCKIFY (size);
1347 if (blocks < _heapinfo[block].busy.info.size)
1348 {
1349 /* The new size is smaller; return
1350 excess memory to the free list. */
1351 _heapinfo[block + blocks].busy.type = 0;
1352 _heapinfo[block + blocks].busy.info.size
1353 = _heapinfo[block].busy.info.size - blocks;
1354 _heapinfo[block].busy.info.size = blocks;
1355 /* We have just created a new chunk by splitting a chunk in two.
1356 Now we will free this chunk; increment the statistics counter
1357 so it doesn't become wrong when _free_internal decrements it. */
1358 ++_chunks_used;
1359 _free_internal_nolock (ADDRESS (block + blocks));
1360 result = ptr;
1361 }
1362 else if (blocks == _heapinfo[block].busy.info.size)
1363 /* No size change necessary. */
1364 result = ptr;
1365 else
1366 {
1367 /* Won't fit, so allocate a new region that will.
1368 Free the old region first in case there is sufficient
1369 adjacent free space to grow without moving. */
1370 blocks = _heapinfo[block].busy.info.size;
1371 /* Prevent free from actually returning memory to the system. */
1372 oldlimit = _heaplimit;
1373 _heaplimit = 0;
1374 _free_internal_nolock (ptr);
1375 result = _malloc_internal_nolock (size);
1376 PROTECT_MALLOC_STATE (0);
1377 if (_heaplimit == 0)
1378 _heaplimit = oldlimit;
1379 if (result == NULL)
1380 {
1381 /* Now we're really in trouble. We have to unfree
1382 the thing we just freed. Unfortunately it might
1383 have been coalesced with its neighbors. */
1384 if (_heapindex == block)
1385 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1386 else
1387 {
1388 void *previous
1389 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1390 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1391 _free_internal_nolock (previous);
1392 }
1393 goto out;
1394 }
1395 if (ptr != result)
1396 memmove (result, ptr, blocks * BLOCKSIZE);
1397 }
1398 break;
1399
1400 default:
1401 /* Old size is a fragment; type is logarithm
1402 to base two of the fragment size. */
1403 if (size > (size_t) (1 << (type - 1)) &&
1404 size <= (size_t) (1 << type))
1405 /* The new size is the same kind of fragment. */
1406 result = ptr;
1407 else
1408 {
1409 /* The new size is different; allocate a new space,
1410 and copy the lesser of the new size and the old. */
1411 result = _malloc_internal_nolock (size);
1412 if (result == NULL)
1413 goto out;
1414 memcpy (result, ptr, min (size, (size_t) 1 << type));
1415 _free_internal_nolock (ptr);
1416 }
1417 break;
1418 }
1419
1420 PROTECT_MALLOC_STATE (1);
1421 out:
1422 return result;
1423 }
1424
1425 void *
1426 _realloc_internal (void *ptr, size_t size)
1427 {
1428 void *result;
1429
1430 LOCK ();
1431 result = _realloc_internal_nolock (ptr, size);
1432 UNLOCK ();
1433
1434 return result;
1435 }
1436
1437 void *
1438 realloc (void *ptr, size_t size)
1439 {
1440 void *(*hook) (void *, size_t);
1441
1442 if (!__malloc_initialized && !__malloc_initialize ())
1443 return NULL;
1444
1445 hook = grealloc_hook;
1446 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1447 }
1448 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1449
1450 This library is free software; you can redistribute it and/or
1451 modify it under the terms of the GNU General Public License as
1452 published by the Free Software Foundation; either version 2 of the
1453 License, or (at your option) any later version.
1454
1455 This library is distributed in the hope that it will be useful,
1456 but WITHOUT ANY WARRANTY; without even the implied warranty of
1457 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1458 General Public License for more details.
1459
1460 You should have received a copy of the GNU General Public
1461 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1462
1463 The author may be reached (Email) at the address mike@ai.mit.edu,
1464 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1465
1466 /* Allocate an array of NMEMB elements each SIZE bytes long.
1467 The entire array is initialized to zeros. */
1468 #ifndef calloc
1469 void *
1470 calloc (size_t nmemb, size_t size)
1471 {
1472 void *result;
1473 size_t bytes = nmemb * size;
1474
1475 if (size != 0 && bytes / size != nmemb)
1476 {
1477 errno = ENOMEM;
1478 return NULL;
1479 }
1480
1481 result = malloc (bytes);
1482 if (result)
1483 return memset (result, 0, bytes);
1484 return result;
1485 }
1486 #endif
1487 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1488 This file is part of the GNU C Library.
1489
1490 The GNU C Library is free software; you can redistribute it and/or modify
1491 it under the terms of the GNU General Public License as published by
1492 the Free Software Foundation; either version 2, or (at your option)
1493 any later version.
1494
1495 The GNU C Library is distributed in the hope that it will be useful,
1496 but WITHOUT ANY WARRANTY; without even the implied warranty of
1497 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1498 GNU General Public License for more details.
1499
1500 You should have received a copy of the GNU General Public License
1501 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1502
1503 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1504 compatible. */
1505 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1506 #define __sbrk sbrk
1507 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1508 /* It is best not to declare this and cast its result on foreign operating
1509 systems with potentially hostile include files. */
1510
1511 extern void *__sbrk (ptrdiff_t increment);
1512 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1513
1514 /* Allocate INCREMENT more bytes of data space,
1515 and return the start of data space, or NULL on errors.
1516 If INCREMENT is negative, shrink data space. */
1517 static void *
1518 gdefault_morecore (ptrdiff_t increment)
1519 {
1520 void *result;
1521 #ifdef HYBRID_MALLOC
1522 if (!DUMPED)
1523 {
1524 return bss_sbrk (increment);
1525 }
1526 #endif
1527 result = (void *) __sbrk (increment);
1528 if (result == (void *) -1)
1529 return NULL;
1530 return result;
1531 }
1532
1533 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
1534
1535 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1536
1537 This library is free software; you can redistribute it and/or
1538 modify it under the terms of the GNU General Public License as
1539 published by the Free Software Foundation; either version 2 of the
1540 License, or (at your option) any later version.
1541
1542 This library is distributed in the hope that it will be useful,
1543 but WITHOUT ANY WARRANTY; without even the implied warranty of
1544 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1545 General Public License for more details.
1546
1547 You should have received a copy of the GNU General Public
1548 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1549
1550 void *
1551 aligned_alloc (size_t alignment, size_t size)
1552 {
1553 void *result;
1554 size_t adj, lastadj;
1555
1556 /* Allocate a block with enough extra space to pad the block with up to
1557 (ALIGNMENT - 1) bytes if necessary. */
1558 if (- size < alignment)
1559 {
1560 errno = ENOMEM;
1561 return NULL;
1562 }
1563 result = malloc (size + alignment - 1);
1564 if (result == NULL)
1565 return NULL;
1566
1567 /* Figure out how much we will need to pad this particular block
1568 to achieve the required alignment. */
1569 adj = alignment - (uintptr_t) result % alignment;
1570 if (adj == alignment)
1571 adj = 0;
1572
1573 if (adj != alignment - 1)
1574 {
1575 do
1576 {
1577 /* Reallocate the block with only as much excess as it
1578 needs. */
1579 free (result);
1580 result = malloc (size + adj);
1581 if (result == NULL) /* Impossible unless interrupted. */
1582 return NULL;
1583
1584 lastadj = adj;
1585 adj = alignment - (uintptr_t) result % alignment;
1586 if (adj == alignment)
1587 adj = 0;
1588 /* It's conceivable we might have been so unlucky as to get
1589 a different block with weaker alignment. If so, this
1590 block is too short to contain SIZE after alignment
1591 correction. So we must try again and get another block,
1592 slightly larger. */
1593 } while (adj > lastadj);
1594 }
1595
1596 if (adj != 0)
1597 {
1598 /* Record this block in the list of aligned blocks, so that `free'
1599 can identify the pointer it is passed, which will be in the middle
1600 of an allocated block. */
1601
1602 struct alignlist *l;
1603 LOCK_ALIGNED_BLOCKS ();
1604 for (l = _aligned_blocks; l != NULL; l = l->next)
1605 if (l->aligned == NULL)
1606 /* This slot is free. Use it. */
1607 break;
1608 if (l == NULL)
1609 {
1610 l = malloc (sizeof *l);
1611 if (l != NULL)
1612 {
1613 l->next = _aligned_blocks;
1614 _aligned_blocks = l;
1615 }
1616 }
1617 if (l != NULL)
1618 {
1619 l->exact = result;
1620 result = l->aligned = (char *) result + adj;
1621 }
1622 UNLOCK_ALIGNED_BLOCKS ();
1623 if (l == NULL)
1624 {
1625 free (result);
1626 result = NULL;
1627 }
1628 }
1629
1630 return result;
1631 }
1632
1633 /* Note that memalign and posix_memalign are not used in Emacs. */
1634 #ifndef HYBRID_MALLOC
1635 /* An obsolete alias for aligned_alloc, for any old libraries that use
1636 this alias. */
1637
1638 void *
1639 memalign (size_t alignment, size_t size)
1640 {
1641 return aligned_alloc (alignment, size);
1642 }
1643
1644 /* If HYBRID_MALLOC is defined, we may want to use the system
1645 posix_memalign below. */
1646 int
1647 posix_memalign (void **memptr, size_t alignment, size_t size)
1648 {
1649 void *mem;
1650
1651 if (alignment == 0
1652 || alignment % sizeof (void *) != 0
1653 || (alignment & (alignment - 1)) != 0)
1654 return EINVAL;
1655
1656 mem = aligned_alloc (alignment, size);
1657 if (mem == NULL)
1658 return ENOMEM;
1659
1660 *memptr = mem;
1661
1662 return 0;
1663 }
1664 #endif
1665
1666 /* Allocate memory on a page boundary.
1667 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1668
1669 This library is free software; you can redistribute it and/or
1670 modify it under the terms of the GNU General Public License as
1671 published by the Free Software Foundation; either version 2 of the
1672 License, or (at your option) any later version.
1673
1674 This library is distributed in the hope that it will be useful,
1675 but WITHOUT ANY WARRANTY; without even the implied warranty of
1676 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1677 General Public License for more details.
1678
1679 You should have received a copy of the GNU General Public
1680 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1681
1682 The author may be reached (Email) at the address mike@ai.mit.edu,
1683 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1684
1685 #ifndef HYBRID_MALLOC
1686 /* Allocate SIZE bytes on a page boundary. */
1687 extern void *valloc (size_t);
1688
1689 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1690 # include "getpagesize.h"
1691 #elif !defined getpagesize
1692 extern int getpagesize (void);
1693 #endif
1694
1695 static size_t pagesize;
1696
1697 void *
1698 valloc (size_t size)
1699 {
1700 if (pagesize == 0)
1701 pagesize = getpagesize ();
1702
1703 return aligned_alloc (pagesize, size);
1704 }
1705 #endif /* HYBRID_MALLOC */
1706
1707 #undef malloc
1708 #undef realloc
1709 #undef calloc
1710 #undef aligned_alloc
1711 #undef free
1712
1713 #ifdef HYBRID_MALLOC
1714 /* Declare system malloc and friends. */
1715 extern void *malloc (size_t size);
1716 extern void *realloc (void *ptr, size_t size);
1717 extern void free (void *ptr);
1718 #ifdef HAVE_ALIGNED_ALLOC
1719 extern void *aligned_alloc (size_t alignment, size_t size);
1720 #elif defined HAVE_POSIX_MEMALIGN
1721 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1722 #endif
1723
1724 /* See the comments near the beginning of this file for explanations
1725 of the following functions. */
1726
1727 void *
1728 hybrid_malloc (size_t size)
1729 {
1730 if (DUMPED)
1731 return malloc (size);
1732 return gmalloc (size);
1733 }
1734
1735 void
1736 hybrid_free (void *ptr)
1737 {
1738 if (!DUMPED)
1739 gfree (ptr);
1740 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1741 free (ptr);
1742 /* Otherwise the dumped emacs is trying to free something allocated
1743 before dumping; do nothing. */
1744 return;
1745 }
1746
1747 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1748 void *
1749 hybrid_aligned_alloc (size_t alignment, size_t size)
1750 {
1751 if (!DUMPED)
1752 return galigned_alloc (alignment, size);
1753 /* The following is copied from alloc.c */
1754 #ifdef HAVE_ALIGNED_ALLOC
1755 return aligned_alloc (alignment, size);
1756 #else /* HAVE_POSIX_MEMALIGN */
1757 void *p;
1758 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1759 #endif
1760 }
1761 #endif
1762
1763 void *
1764 hybrid_realloc (void *ptr, size_t size)
1765 {
1766 void *result;
1767 int type;
1768 size_t block, oldsize;
1769
1770 if (!DUMPED)
1771 return grealloc (ptr, size);
1772 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1773 return realloc (ptr, size);
1774
1775 /* The dumped emacs is trying to realloc storage allocated before
1776 dumping. We just malloc new space and copy the data. */
1777 if (size == 0 || ptr == NULL)
1778 return malloc (size);
1779 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1780 type = _heapinfo[block].busy.type;
1781 oldsize =
1782 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1783 : (size_t) 1 << type;
1784 result = malloc (size);
1785 if (result)
1786 return memcpy (result, ptr, min (oldsize, size));
1787 return result;
1788 }
1789
1790 #else /* ! HYBRID_MALLOC */
1791
1792 void *
1793 malloc (size_t size)
1794 {
1795 return gmalloc (size);
1796 }
1797
1798 void *
1799 calloc (size_t nmemb, size_t size)
1800 {
1801 return gcalloc (nmemb, size);
1802 }
1803
1804 void
1805 free (void *ptr)
1806 {
1807 gfree (ptr);
1808 }
1809
1810 void *
1811 aligned_alloc (size_t alignment, size_t size)
1812 {
1813 return galigned_alloc (alignment, size);
1814 }
1815
1816 void *
1817 realloc (void *ptr, size_t size)
1818 {
1819 return grealloc (ptr, size);
1820 }
1821
1822 #endif /* HYBRID_MALLOC */
1823
1824 #ifdef GC_MCHECK
1825
1826 /* Standard debugging hooks for `malloc'.
1827 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1828 Written May 1989 by Mike Haertel.
1829
1830 This library is free software; you can redistribute it and/or
1831 modify it under the terms of the GNU General Public License as
1832 published by the Free Software Foundation; either version 2 of the
1833 License, or (at your option) any later version.
1834
1835 This library is distributed in the hope that it will be useful,
1836 but WITHOUT ANY WARRANTY; without even the implied warranty of
1837 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1838 General Public License for more details.
1839
1840 You should have received a copy of the GNU General Public
1841 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1842
1843 The author may be reached (Email) at the address mike@ai.mit.edu,
1844 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1845
1846 #include <stdio.h>
1847
1848 /* Old hook values. */
1849 static void (*old_free_hook) (void *ptr);
1850 static void *(*old_malloc_hook) (size_t size);
1851 static void *(*old_realloc_hook) (void *ptr, size_t size);
1852
1853 /* Function to call when something awful happens. */
1854 static void (*abortfunc) (enum mcheck_status);
1855
1856 /* Arbitrary magical numbers. */
1857 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1858 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1859 #define MAGICBYTE ((char) 0xd7)
1860 #define MALLOCFLOOD ((char) 0x93)
1861 #define FREEFLOOD ((char) 0x95)
1862
1863 struct hdr
1864 {
1865 size_t size; /* Exact size requested by user. */
1866 size_t magic; /* Magic number to check header integrity. */
1867 };
1868
1869 static enum mcheck_status
1870 checkhdr (const struct hdr *hdr)
1871 {
1872 enum mcheck_status status;
1873 switch (hdr->magic)
1874 {
1875 default:
1876 status = MCHECK_HEAD;
1877 break;
1878 case MAGICFREE:
1879 status = MCHECK_FREE;
1880 break;
1881 case MAGICWORD:
1882 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1883 status = MCHECK_TAIL;
1884 else
1885 status = MCHECK_OK;
1886 break;
1887 }
1888 if (status != MCHECK_OK)
1889 (*abortfunc) (status);
1890 return status;
1891 }
1892
1893 static void
1894 freehook (void *ptr)
1895 {
1896 struct hdr *hdr;
1897
1898 if (ptr)
1899 {
1900 struct alignlist *l;
1901
1902 /* If the block was allocated by aligned_alloc, its real pointer
1903 to free is recorded in _aligned_blocks; find that. */
1904 PROTECT_MALLOC_STATE (0);
1905 LOCK_ALIGNED_BLOCKS ();
1906 for (l = _aligned_blocks; l != NULL; l = l->next)
1907 if (l->aligned == ptr)
1908 {
1909 l->aligned = NULL; /* Mark the slot in the list as free. */
1910 ptr = l->exact;
1911 break;
1912 }
1913 UNLOCK_ALIGNED_BLOCKS ();
1914 PROTECT_MALLOC_STATE (1);
1915
1916 hdr = ((struct hdr *) ptr) - 1;
1917 checkhdr (hdr);
1918 hdr->magic = MAGICFREE;
1919 memset (ptr, FREEFLOOD, hdr->size);
1920 }
1921 else
1922 hdr = NULL;
1923
1924 gfree_hook = old_free_hook;
1925 free (hdr);
1926 gfree_hook = freehook;
1927 }
1928
1929 static void *
1930 mallochook (size_t size)
1931 {
1932 struct hdr *hdr;
1933
1934 gmalloc_hook = old_malloc_hook;
1935 hdr = malloc (sizeof *hdr + size + 1);
1936 gmalloc_hook = mallochook;
1937 if (hdr == NULL)
1938 return NULL;
1939
1940 hdr->size = size;
1941 hdr->magic = MAGICWORD;
1942 ((char *) &hdr[1])[size] = MAGICBYTE;
1943 return memset (hdr + 1, MALLOCFLOOD, size);
1944 }
1945
1946 static void *
1947 reallochook (void *ptr, size_t size)
1948 {
1949 struct hdr *hdr = NULL;
1950 size_t osize = 0;
1951
1952 if (ptr)
1953 {
1954 hdr = ((struct hdr *) ptr) - 1;
1955 osize = hdr->size;
1956
1957 checkhdr (hdr);
1958 if (size < osize)
1959 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1960 }
1961
1962 gfree_hook = old_free_hook;
1963 gmalloc_hook = old_malloc_hook;
1964 grealloc_hook = old_realloc_hook;
1965 hdr = realloc (hdr, sizeof *hdr + size + 1);
1966 gfree_hook = freehook;
1967 gmalloc_hook = mallochook;
1968 grealloc_hook = reallochook;
1969 if (hdr == NULL)
1970 return NULL;
1971
1972 hdr->size = size;
1973 hdr->magic = MAGICWORD;
1974 ((char *) &hdr[1])[size] = MAGICBYTE;
1975 if (size > osize)
1976 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1977 return hdr + 1;
1978 }
1979
1980 static void
1981 mabort (enum mcheck_status status)
1982 {
1983 const char *msg;
1984 switch (status)
1985 {
1986 case MCHECK_OK:
1987 msg = "memory is consistent, library is buggy";
1988 break;
1989 case MCHECK_HEAD:
1990 msg = "memory clobbered before allocated block";
1991 break;
1992 case MCHECK_TAIL:
1993 msg = "memory clobbered past end of allocated block";
1994 break;
1995 case MCHECK_FREE:
1996 msg = "block freed twice";
1997 break;
1998 default:
1999 msg = "bogus mcheck_status, library is buggy";
2000 break;
2001 }
2002 #ifdef __GNU_LIBRARY__
2003 __libc_fatal (msg);
2004 #else
2005 fprintf (stderr, "mcheck: %s\n", msg);
2006 fflush (stderr);
2007 # ifdef emacs
2008 emacs_abort ();
2009 # else
2010 abort ();
2011 # endif
2012 #endif
2013 }
2014
2015 static int mcheck_used = 0;
2016
2017 int
2018 mcheck (void (*func) (enum mcheck_status))
2019 {
2020 abortfunc = (func != NULL) ? func : &mabort;
2021
2022 /* These hooks may not be safely inserted if malloc is already in use. */
2023 if (!__malloc_initialized && !mcheck_used)
2024 {
2025 old_free_hook = gfree_hook;
2026 gfree_hook = freehook;
2027 old_malloc_hook = gmalloc_hook;
2028 gmalloc_hook = mallochook;
2029 old_realloc_hook = grealloc_hook;
2030 grealloc_hook = reallochook;
2031 mcheck_used = 1;
2032 }
2033
2034 return mcheck_used ? 0 : -1;
2035 }
2036
2037 enum mcheck_status
2038 mprobe (void *ptr)
2039 {
2040 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2041 }
2042
2043 #endif /* GC_MCHECK */