]> code.delx.au - gnu-emacs/blob - src/gmalloc.c
Add musl patch to support HYBRID_MALLOC on elf systems
[gnu-emacs] / src / gmalloc.c
1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2016 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
18
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
21
22 #include <config.h>
23
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
27
28 #include <string.h>
29 #include <limits.h>
30 #include <stdint.h>
31
32 #ifdef HYBRID_GET_CURRENT_DIR_NAME
33 #undef get_current_dir_name
34 #endif
35
36 #include <unistd.h>
37
38 #ifdef USE_PTHREAD
39 #include <pthread.h>
40 #endif
41
42 #ifdef WINDOWSNT
43 #include <w32heap.h> /* for sbrk */
44 #endif
45
46 #ifdef emacs
47 extern void emacs_abort (void);
48 #endif
49
50 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
51 realloc... as defined in this file (and renamed gmalloc,
52 grealloc... via the macros that follow). The dumped emacs,
53 however, will use the system malloc, realloc.... In other source
54 files, malloc, realloc... are renamed hybrid_malloc,
55 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
56 friends are wrapper functions defined later in this file.
57 aligned_alloc is defined as a macro only in alloc.c.
58
59 As of this writing (August 2014), Cygwin is the only platform on
60 which HYBRID_MACRO is defined. Any other platform that wants to
61 define it will have to define the macros DUMPED and
62 ALLOCATED_BEFORE_DUMPING, defined below for Cygwin. */
63 #undef malloc
64 #undef realloc
65 #undef calloc
66 #undef free
67 #define malloc gmalloc
68 #define realloc grealloc
69 #define calloc gcalloc
70 #define aligned_alloc galigned_alloc
71 #define free gfree
72
73 #ifdef HYBRID_MALLOC
74 extern void *bss_sbrk (ptrdiff_t size);
75 extern int bss_sbrk_did_unexec;
76 extern char bss_sbrk_buffer[];
77 extern void *bss_sbrk_buffer_end;
78 #define DUMPED bss_sbrk_did_unexec
79 #define ALLOCATED_BEFORE_DUMPING(P) \
80 ((P) < bss_sbrk_buffer_end && (P) >= (void *) bss_sbrk_buffer)
81 #endif
82
83 #ifdef __cplusplus
84 extern "C"
85 {
86 #endif
87
88 #include <stddef.h>
89
90 #ifdef emacs
91 extern void emacs_abort (void);
92 #endif
93
94 /* Underlying allocation function; successive calls should
95 return contiguous pieces of memory. */
96 extern void *(*__morecore) (ptrdiff_t size);
97
98 /* Default value of `__morecore'. */
99 extern void *__default_morecore (ptrdiff_t size);
100
101 #ifdef HYBRID_MALLOC
102 #define extern static
103 #endif
104
105 /* Allocate SIZE bytes of memory. */
106 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
107 /* Re-allocate the previously allocated block
108 in ptr, making the new block SIZE bytes long. */
109 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
110 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
111 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
112 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
113 extern void free (void *ptr);
114
115 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
116 extern void *aligned_alloc (size_t, size_t);
117 #ifdef MSDOS
118 extern void *memalign (size_t, size_t);
119 extern int posix_memalign (void **, size_t, size_t);
120 #endif
121
122 #ifdef USE_PTHREAD
123 /* Set up mutexes and make malloc etc. thread-safe. */
124 extern void malloc_enable_thread (void);
125 #endif
126
127 /* The allocator divides the heap into blocks of fixed size; large
128 requests receive one or more whole blocks, and small requests
129 receive a fragment of a block. Fragment sizes are powers of two,
130 and all fragments of a block are the same size. When all the
131 fragments in a block have been freed, the block itself is freed. */
132 #define INT_BIT (CHAR_BIT * sizeof (int))
133 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
134 #define BLOCKSIZE (1 << BLOCKLOG)
135 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
136
137 /* Determine the amount of memory spanned by the initial heap table
138 (not an absolute limit). */
139 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
140
141 /* Number of contiguous free blocks allowed to build up at the end of
142 memory before they will be returned to the system. */
143 #define FINAL_FREE_BLOCKS 8
144
145 /* Data structure giving per-block information. */
146 typedef union
147 {
148 /* Heap information for a busy block. */
149 struct
150 {
151 /* Zero for a large (multiblock) object, or positive giving the
152 logarithm to the base two of the fragment size. */
153 int type;
154 union
155 {
156 struct
157 {
158 size_t nfree; /* Free frags in a fragmented block. */
159 size_t first; /* First free fragment of the block. */
160 } frag;
161 /* For a large object, in its first block, this has the number
162 of blocks in the object. In the other blocks, this has a
163 negative number which says how far back the first block is. */
164 ptrdiff_t size;
165 } info;
166 } busy;
167 /* Heap information for a free block
168 (that may be the first of a free cluster). */
169 struct
170 {
171 size_t size; /* Size (in blocks) of a free cluster. */
172 size_t next; /* Index of next free cluster. */
173 size_t prev; /* Index of previous free cluster. */
174 } free;
175 } malloc_info;
176
177 /* Pointer to first block of the heap. */
178 extern char *_heapbase;
179
180 /* Table indexed by block number giving per-block information. */
181 extern malloc_info *_heapinfo;
182
183 /* Address to block number and vice versa. */
184 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
185 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
186
187 /* Current search index for the heap table. */
188 extern size_t _heapindex;
189
190 /* Limit of valid info table indices. */
191 extern size_t _heaplimit;
192
193 /* Doubly linked lists of free fragments. */
194 struct list
195 {
196 struct list *next;
197 struct list *prev;
198 };
199
200 /* Free list headers for each fragment size. */
201 extern struct list _fraghead[];
202
203 /* List of blocks allocated with aligned_alloc and friends. */
204 struct alignlist
205 {
206 struct alignlist *next;
207 void *aligned; /* The address that aligned_alloc returned. */
208 void *exact; /* The address that malloc returned. */
209 };
210 extern struct alignlist *_aligned_blocks;
211
212 /* Instrumentation. */
213 extern size_t _chunks_used;
214 extern size_t _bytes_used;
215 extern size_t _chunks_free;
216 extern size_t _bytes_free;
217
218 /* Internal versions of `malloc', `realloc', and `free'
219 used when these functions need to call each other.
220 They are the same but don't call the hooks. */
221 extern void *_malloc_internal (size_t);
222 extern void *_realloc_internal (void *, size_t);
223 extern void _free_internal (void *);
224 extern void *_malloc_internal_nolock (size_t);
225 extern void *_realloc_internal_nolock (void *, size_t);
226 extern void _free_internal_nolock (void *);
227
228 #ifdef USE_PTHREAD
229 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
230 extern int _malloc_thread_enabled_p;
231 #define LOCK() \
232 do { \
233 if (_malloc_thread_enabled_p) \
234 pthread_mutex_lock (&_malloc_mutex); \
235 } while (0)
236 #define UNLOCK() \
237 do { \
238 if (_malloc_thread_enabled_p) \
239 pthread_mutex_unlock (&_malloc_mutex); \
240 } while (0)
241 #define LOCK_ALIGNED_BLOCKS() \
242 do { \
243 if (_malloc_thread_enabled_p) \
244 pthread_mutex_lock (&_aligned_blocks_mutex); \
245 } while (0)
246 #define UNLOCK_ALIGNED_BLOCKS() \
247 do { \
248 if (_malloc_thread_enabled_p) \
249 pthread_mutex_unlock (&_aligned_blocks_mutex); \
250 } while (0)
251 #else
252 #define LOCK()
253 #define UNLOCK()
254 #define LOCK_ALIGNED_BLOCKS()
255 #define UNLOCK_ALIGNED_BLOCKS()
256 #endif
257
258 /* Given an address in the middle of a malloc'd object,
259 return the address of the beginning of the object. */
260 extern void *malloc_find_object_address (void *ptr);
261
262 /* If not NULL, this function is called after each time
263 `__morecore' is called to increase the data size. */
264 extern void (*__after_morecore_hook) (void);
265
266 /* Number of extra blocks to get each time we ask for more core.
267 This reduces the frequency of calling `(*__morecore)'. */
268 extern size_t __malloc_extra_blocks;
269
270 /* Nonzero if `malloc' has been called and done its initialization. */
271 extern int __malloc_initialized;
272 /* Function called to initialize malloc data structures. */
273 extern int __malloc_initialize (void);
274
275 /* Hooks for debugging versions. */
276 extern void (*__malloc_initialize_hook) (void);
277 extern void (*__free_hook) (void *ptr);
278 extern void *(*__malloc_hook) (size_t size);
279 extern void *(*__realloc_hook) (void *ptr, size_t size);
280 extern void *(*__memalign_hook) (size_t size, size_t alignment);
281
282 /* Return values for `mprobe': these are the kinds of inconsistencies that
283 `mcheck' enables detection of. */
284 enum mcheck_status
285 {
286 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
287 MCHECK_OK, /* Block is fine. */
288 MCHECK_FREE, /* Block freed twice. */
289 MCHECK_HEAD, /* Memory before the block was clobbered. */
290 MCHECK_TAIL /* Memory after the block was clobbered. */
291 };
292
293 /* Activate a standard collection of debugging hooks. This must be called
294 before `malloc' is ever called. ABORTFUNC is called with an error code
295 (see enum above) when an inconsistency is detected. If ABORTFUNC is
296 null, the standard function prints on stderr and then calls `abort'. */
297 extern int mcheck (void (*abortfunc) (enum mcheck_status));
298
299 /* Check for aberrations in a particular malloc'd block. You must have
300 called `mcheck' already. These are the same checks that `mcheck' does
301 when you free or reallocate a block. */
302 extern enum mcheck_status mprobe (void *ptr);
303
304 /* Activate a standard collection of tracing hooks. */
305 extern void mtrace (void);
306 extern void muntrace (void);
307
308 /* Statistics available to the user. */
309 struct mstats
310 {
311 size_t bytes_total; /* Total size of the heap. */
312 size_t chunks_used; /* Chunks allocated by the user. */
313 size_t bytes_used; /* Byte total of user-allocated chunks. */
314 size_t chunks_free; /* Chunks in the free list. */
315 size_t bytes_free; /* Byte total of chunks in the free list. */
316 };
317
318 /* Pick up the current statistics. */
319 extern struct mstats mstats (void);
320
321 /* Call WARNFUN with a warning message when memory usage is high. */
322 extern void memory_warnings (void *start, void (*warnfun) (const char *));
323
324 #undef extern
325
326 #ifdef __cplusplus
327 }
328 #endif
329
330 /* Memory allocator `malloc'.
331 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
332 Written May 1989 by Mike Haertel.
333
334 This library is free software; you can redistribute it and/or
335 modify it under the terms of the GNU General Public License as
336 published by the Free Software Foundation; either version 2 of the
337 License, or (at your option) any later version.
338
339 This library is distributed in the hope that it will be useful,
340 but WITHOUT ANY WARRANTY; without even the implied warranty of
341 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
342 General Public License for more details.
343
344 You should have received a copy of the GNU General Public
345 License along with this library. If not, see <http://www.gnu.org/licenses/>.
346
347 The author may be reached (Email) at the address mike@ai.mit.edu,
348 or (US mail) as Mike Haertel c/o Free Software Foundation. */
349
350 #include <errno.h>
351
352 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
353
354 #ifndef HYBRID_MALLOC
355
356 /* Debugging hook for `malloc'. */
357 void *(*__malloc_hook) (size_t size);
358
359 /* Pointer to the base of the first block. */
360 char *_heapbase;
361
362 /* Block information table. Allocated with align/__free (not malloc/free). */
363 malloc_info *_heapinfo;
364
365 /* Search index in the info table. */
366 size_t _heapindex;
367
368 /* Limit of valid info table indices. */
369 size_t _heaplimit;
370
371 /* Free lists for each fragment size. */
372 struct list _fraghead[BLOCKLOG];
373
374 /* Instrumentation. */
375 size_t _chunks_used;
376 size_t _bytes_used;
377 size_t _chunks_free;
378 size_t _bytes_free;
379
380 /* Are you experienced? */
381 int __malloc_initialized;
382
383 size_t __malloc_extra_blocks;
384
385 void (*__malloc_initialize_hook) (void);
386 void (*__after_morecore_hook) (void);
387
388 #else
389
390 static struct list _fraghead[BLOCKLOG];
391
392 #endif /* HYBRID_MALLOC */
393
394 /* Number of info entries. */
395 static size_t heapsize;
396
397 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
398
399 /* Some code for hunting a bug writing into _heapinfo.
400
401 Call this macro with argument PROT non-zero to protect internal
402 malloc state against writing to it, call it with a zero argument to
403 make it readable and writable.
404
405 Note that this only works if BLOCKSIZE == page size, which is
406 the case on the i386. */
407
408 #include <sys/types.h>
409 #include <sys/mman.h>
410
411 static int state_protected_p;
412 static size_t last_state_size;
413 static malloc_info *last_heapinfo;
414
415 void
416 protect_malloc_state (int protect_p)
417 {
418 /* If _heapinfo has been relocated, make sure its old location
419 isn't left read-only; it will be reused by malloc. */
420 if (_heapinfo != last_heapinfo
421 && last_heapinfo
422 && state_protected_p)
423 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
424
425 last_state_size = _heaplimit * sizeof *_heapinfo;
426 last_heapinfo = _heapinfo;
427
428 if (protect_p != state_protected_p)
429 {
430 state_protected_p = protect_p;
431 if (mprotect (_heapinfo, last_state_size,
432 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
433 abort ();
434 }
435 }
436
437 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
438
439 #else
440 #define PROTECT_MALLOC_STATE(PROT) /* empty */
441 #endif
442
443
444 /* Aligned allocation. */
445 static void *
446 align (size_t size)
447 {
448 void *result;
449 ptrdiff_t adj;
450
451 /* align accepts an unsigned argument, but __morecore accepts a
452 signed one. This could lead to trouble if SIZE overflows the
453 ptrdiff_t type accepted by __morecore. We just punt in that
454 case, since they are requesting a ludicrous amount anyway. */
455 if (PTRDIFF_MAX < size)
456 result = 0;
457 else
458 result = (*__morecore) (size);
459 adj = (uintptr_t) result % BLOCKSIZE;
460 if (adj != 0)
461 {
462 adj = BLOCKSIZE - adj;
463 (*__morecore) (adj);
464 result = (char *) result + adj;
465 }
466
467 if (__after_morecore_hook)
468 (*__after_morecore_hook) ();
469
470 return result;
471 }
472
473 /* Get SIZE bytes, if we can get them starting at END.
474 Return the address of the space we got.
475 If we cannot get space at END, fail and return 0. */
476 static void *
477 get_contiguous_space (ptrdiff_t size, void *position)
478 {
479 void *before;
480 void *after;
481
482 before = (*__morecore) (0);
483 /* If we can tell in advance that the break is at the wrong place,
484 fail now. */
485 if (before != position)
486 return 0;
487
488 /* Allocate SIZE bytes and get the address of them. */
489 after = (*__morecore) (size);
490 if (!after)
491 return 0;
492
493 /* It was not contiguous--reject it. */
494 if (after != position)
495 {
496 (*__morecore) (- size);
497 return 0;
498 }
499
500 return after;
501 }
502
503
504 /* This is called when `_heapinfo' and `heapsize' have just
505 been set to describe a new info table. Set up the table
506 to describe itself and account for it in the statistics. */
507 static void
508 register_heapinfo (void)
509 {
510 size_t block, blocks;
511
512 block = BLOCK (_heapinfo);
513 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
514
515 /* Account for the _heapinfo block itself in the statistics. */
516 _bytes_used += blocks * BLOCKSIZE;
517 ++_chunks_used;
518
519 /* Describe the heapinfo block itself in the heapinfo. */
520 _heapinfo[block].busy.type = 0;
521 _heapinfo[block].busy.info.size = blocks;
522 /* Leave back-pointers for malloc_find_address. */
523 while (--blocks > 0)
524 _heapinfo[block + blocks].busy.info.size = -blocks;
525 }
526
527 #ifdef USE_PTHREAD
528 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
529 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
530 int _malloc_thread_enabled_p;
531
532 static void
533 malloc_atfork_handler_prepare (void)
534 {
535 LOCK ();
536 LOCK_ALIGNED_BLOCKS ();
537 }
538
539 static void
540 malloc_atfork_handler_parent (void)
541 {
542 UNLOCK_ALIGNED_BLOCKS ();
543 UNLOCK ();
544 }
545
546 static void
547 malloc_atfork_handler_child (void)
548 {
549 UNLOCK_ALIGNED_BLOCKS ();
550 UNLOCK ();
551 }
552
553 /* Set up mutexes and make malloc etc. thread-safe. */
554 void
555 malloc_enable_thread (void)
556 {
557 if (_malloc_thread_enabled_p)
558 return;
559
560 /* Some pthread implementations call malloc for statically
561 initialized mutexes when they are used first. To avoid such a
562 situation, we initialize mutexes here while their use is
563 disabled in malloc etc. */
564 pthread_mutex_init (&_malloc_mutex, NULL);
565 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
566 pthread_atfork (malloc_atfork_handler_prepare,
567 malloc_atfork_handler_parent,
568 malloc_atfork_handler_child);
569 _malloc_thread_enabled_p = 1;
570 }
571 #endif /* USE_PTHREAD */
572
573 static void
574 malloc_initialize_1 (void)
575 {
576 #ifdef GC_MCHECK
577 mcheck (NULL);
578 #endif
579
580 if (__malloc_initialize_hook)
581 (*__malloc_initialize_hook) ();
582
583 heapsize = HEAP / BLOCKSIZE;
584 _heapinfo = align (heapsize * sizeof (malloc_info));
585 if (_heapinfo == NULL)
586 return;
587 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
588 _heapinfo[0].free.size = 0;
589 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
590 _heapindex = 0;
591 _heapbase = (char *) _heapinfo;
592 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
593
594 register_heapinfo ();
595
596 __malloc_initialized = 1;
597 PROTECT_MALLOC_STATE (1);
598 return;
599 }
600
601 /* Set everything up and remember that we have.
602 main will call malloc which calls this function. That is before any threads
603 or signal handlers has been set up, so we don't need thread protection. */
604 int
605 __malloc_initialize (void)
606 {
607 if (__malloc_initialized)
608 return 0;
609
610 malloc_initialize_1 ();
611
612 return __malloc_initialized;
613 }
614
615 static int morecore_recursing;
616
617 /* Get neatly aligned memory, initializing or
618 growing the heap info table as necessary. */
619 static void *
620 morecore_nolock (size_t size)
621 {
622 void *result;
623 malloc_info *newinfo, *oldinfo;
624 size_t newsize;
625
626 if (morecore_recursing)
627 /* Avoid recursion. The caller will know how to handle a null return. */
628 return NULL;
629
630 result = align (size);
631 if (result == NULL)
632 return NULL;
633
634 PROTECT_MALLOC_STATE (0);
635
636 /* Check if we need to grow the info table. */
637 if ((size_t) BLOCK ((char *) result + size) > heapsize)
638 {
639 /* Calculate the new _heapinfo table size. We do not account for the
640 added blocks in the table itself, as we hope to place them in
641 existing free space, which is already covered by part of the
642 existing table. */
643 newsize = heapsize;
644 do
645 newsize *= 2;
646 while ((size_t) BLOCK ((char *) result + size) > newsize);
647
648 /* We must not reuse existing core for the new info table when called
649 from realloc in the case of growing a large block, because the
650 block being grown is momentarily marked as free. In this case
651 _heaplimit is zero so we know not to reuse space for internal
652 allocation. */
653 if (_heaplimit != 0)
654 {
655 /* First try to allocate the new info table in core we already
656 have, in the usual way using realloc. If realloc cannot
657 extend it in place or relocate it to existing sufficient core,
658 we will get called again, and the code above will notice the
659 `morecore_recursing' flag and return null. */
660 int save = errno; /* Don't want to clobber errno with ENOMEM. */
661 morecore_recursing = 1;
662 newinfo = _realloc_internal_nolock (_heapinfo,
663 newsize * sizeof (malloc_info));
664 morecore_recursing = 0;
665 if (newinfo == NULL)
666 errno = save;
667 else
668 {
669 /* We found some space in core, and realloc has put the old
670 table's blocks on the free list. Now zero the new part
671 of the table and install the new table location. */
672 memset (&newinfo[heapsize], 0,
673 (newsize - heapsize) * sizeof (malloc_info));
674 _heapinfo = newinfo;
675 heapsize = newsize;
676 goto got_heap;
677 }
678 }
679
680 /* Allocate new space for the malloc info table. */
681 while (1)
682 {
683 newinfo = align (newsize * sizeof (malloc_info));
684
685 /* Did it fail? */
686 if (newinfo == NULL)
687 {
688 (*__morecore) (-size);
689 return NULL;
690 }
691
692 /* Is it big enough to record status for its own space?
693 If so, we win. */
694 if ((size_t) BLOCK ((char *) newinfo
695 + newsize * sizeof (malloc_info))
696 < newsize)
697 break;
698
699 /* Must try again. First give back most of what we just got. */
700 (*__morecore) (- newsize * sizeof (malloc_info));
701 newsize *= 2;
702 }
703
704 /* Copy the old table to the beginning of the new,
705 and zero the rest of the new table. */
706 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
707 memset (&newinfo[heapsize], 0,
708 (newsize - heapsize) * sizeof (malloc_info));
709 oldinfo = _heapinfo;
710 _heapinfo = newinfo;
711 heapsize = newsize;
712
713 register_heapinfo ();
714
715 /* Reset _heaplimit so _free_internal never decides
716 it can relocate or resize the info table. */
717 _heaplimit = 0;
718 _free_internal_nolock (oldinfo);
719 PROTECT_MALLOC_STATE (0);
720
721 /* The new heap limit includes the new table just allocated. */
722 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
723 return result;
724 }
725
726 got_heap:
727 _heaplimit = BLOCK ((char *) result + size);
728 return result;
729 }
730
731 /* Allocate memory from the heap. */
732 void *
733 _malloc_internal_nolock (size_t size)
734 {
735 void *result;
736 size_t block, blocks, lastblocks, start;
737 register size_t i;
738 struct list *next;
739
740 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
741 valid address you can realloc and free (though not dereference).
742
743 It turns out that some extant code (sunrpc, at least Ultrix's version)
744 expects `malloc (0)' to return non-NULL and breaks otherwise.
745 Be compatible. */
746
747 #if 0
748 if (size == 0)
749 return NULL;
750 #endif
751
752 PROTECT_MALLOC_STATE (0);
753
754 if (size < sizeof (struct list))
755 size = sizeof (struct list);
756
757 /* Determine the allocation policy based on the request size. */
758 if (size <= BLOCKSIZE / 2)
759 {
760 /* Small allocation to receive a fragment of a block.
761 Determine the logarithm to base two of the fragment size. */
762 register size_t log = 1;
763 --size;
764 while ((size /= 2) != 0)
765 ++log;
766
767 /* Look in the fragment lists for a
768 free fragment of the desired size. */
769 next = _fraghead[log].next;
770 if (next != NULL)
771 {
772 /* There are free fragments of this size.
773 Pop a fragment out of the fragment list and return it.
774 Update the block's nfree and first counters. */
775 result = next;
776 next->prev->next = next->next;
777 if (next->next != NULL)
778 next->next->prev = next->prev;
779 block = BLOCK (result);
780 if (--_heapinfo[block].busy.info.frag.nfree != 0)
781 _heapinfo[block].busy.info.frag.first =
782 (uintptr_t) next->next % BLOCKSIZE >> log;
783
784 /* Update the statistics. */
785 ++_chunks_used;
786 _bytes_used += 1 << log;
787 --_chunks_free;
788 _bytes_free -= 1 << log;
789 }
790 else
791 {
792 /* No free fragments of the desired size, so get a new block
793 and break it into fragments, returning the first. */
794 #ifdef GC_MALLOC_CHECK
795 result = _malloc_internal_nolock (BLOCKSIZE);
796 PROTECT_MALLOC_STATE (0);
797 #elif defined (USE_PTHREAD)
798 result = _malloc_internal_nolock (BLOCKSIZE);
799 #else
800 result = malloc (BLOCKSIZE);
801 #endif
802 if (result == NULL)
803 {
804 PROTECT_MALLOC_STATE (1);
805 goto out;
806 }
807
808 /* Link all fragments but the first into the free list. */
809 next = (struct list *) ((char *) result + (1 << log));
810 next->next = NULL;
811 next->prev = &_fraghead[log];
812 _fraghead[log].next = next;
813
814 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
815 {
816 next = (struct list *) ((char *) result + (i << log));
817 next->next = _fraghead[log].next;
818 next->prev = &_fraghead[log];
819 next->prev->next = next;
820 next->next->prev = next;
821 }
822
823 /* Initialize the nfree and first counters for this block. */
824 block = BLOCK (result);
825 _heapinfo[block].busy.type = log;
826 _heapinfo[block].busy.info.frag.nfree = i - 1;
827 _heapinfo[block].busy.info.frag.first = i - 1;
828
829 _chunks_free += (BLOCKSIZE >> log) - 1;
830 _bytes_free += BLOCKSIZE - (1 << log);
831 _bytes_used -= BLOCKSIZE - (1 << log);
832 }
833 }
834 else
835 {
836 /* Large allocation to receive one or more blocks.
837 Search the free list in a circle starting at the last place visited.
838 If we loop completely around without finding a large enough
839 space we will have to get more memory from the system. */
840 blocks = BLOCKIFY (size);
841 start = block = _heapindex;
842 while (_heapinfo[block].free.size < blocks)
843 {
844 block = _heapinfo[block].free.next;
845 if (block == start)
846 {
847 /* Need to get more from the system. Get a little extra. */
848 size_t wantblocks = blocks + __malloc_extra_blocks;
849 block = _heapinfo[0].free.prev;
850 lastblocks = _heapinfo[block].free.size;
851 /* Check to see if the new core will be contiguous with the
852 final free block; if so we don't need to get as much. */
853 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
854 /* We can't do this if we will have to make the heap info
855 table bigger to accommodate the new space. */
856 block + wantblocks <= heapsize &&
857 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
858 ADDRESS (block + lastblocks)))
859 {
860 /* We got it contiguously. Which block we are extending
861 (the `final free block' referred to above) might have
862 changed, if it got combined with a freed info table. */
863 block = _heapinfo[0].free.prev;
864 _heapinfo[block].free.size += (wantblocks - lastblocks);
865 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
866 _heaplimit += wantblocks - lastblocks;
867 continue;
868 }
869 result = morecore_nolock (wantblocks * BLOCKSIZE);
870 if (result == NULL)
871 goto out;
872 block = BLOCK (result);
873 /* Put the new block at the end of the free list. */
874 _heapinfo[block].free.size = wantblocks;
875 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
876 _heapinfo[block].free.next = 0;
877 _heapinfo[0].free.prev = block;
878 _heapinfo[_heapinfo[block].free.prev].free.next = block;
879 ++_chunks_free;
880 /* Now loop to use some of that block for this allocation. */
881 }
882 }
883
884 /* At this point we have found a suitable free list entry.
885 Figure out how to remove what we need from the list. */
886 result = ADDRESS (block);
887 if (_heapinfo[block].free.size > blocks)
888 {
889 /* The block we found has a bit left over,
890 so relink the tail end back into the free list. */
891 _heapinfo[block + blocks].free.size
892 = _heapinfo[block].free.size - blocks;
893 _heapinfo[block + blocks].free.next
894 = _heapinfo[block].free.next;
895 _heapinfo[block + blocks].free.prev
896 = _heapinfo[block].free.prev;
897 _heapinfo[_heapinfo[block].free.prev].free.next
898 = _heapinfo[_heapinfo[block].free.next].free.prev
899 = _heapindex = block + blocks;
900 }
901 else
902 {
903 /* The block exactly matches our requirements,
904 so just remove it from the list. */
905 _heapinfo[_heapinfo[block].free.next].free.prev
906 = _heapinfo[block].free.prev;
907 _heapinfo[_heapinfo[block].free.prev].free.next
908 = _heapindex = _heapinfo[block].free.next;
909 --_chunks_free;
910 }
911
912 _heapinfo[block].busy.type = 0;
913 _heapinfo[block].busy.info.size = blocks;
914 ++_chunks_used;
915 _bytes_used += blocks * BLOCKSIZE;
916 _bytes_free -= blocks * BLOCKSIZE;
917
918 /* Mark all the blocks of the object just allocated except for the
919 first with a negative number so you can find the first block by
920 adding that adjustment. */
921 while (--blocks > 0)
922 _heapinfo[block + blocks].busy.info.size = -blocks;
923 }
924
925 PROTECT_MALLOC_STATE (1);
926 out:
927 return result;
928 }
929
930 void *
931 _malloc_internal (size_t size)
932 {
933 void *result;
934
935 LOCK ();
936 result = _malloc_internal_nolock (size);
937 UNLOCK ();
938
939 return result;
940 }
941
942 void *
943 malloc (size_t size)
944 {
945 void *(*hook) (size_t);
946
947 if (!__malloc_initialized && !__malloc_initialize ())
948 return NULL;
949
950 /* Copy the value of __malloc_hook to an automatic variable in case
951 __malloc_hook is modified in another thread between its
952 NULL-check and the use.
953
954 Note: Strictly speaking, this is not a right solution. We should
955 use mutexes to access non-read-only variables that are shared
956 among multiple threads. We just leave it for compatibility with
957 glibc malloc (i.e., assignments to __malloc_hook) for now. */
958 hook = __malloc_hook;
959 return (hook != NULL ? *hook : _malloc_internal) (size);
960 }
961 \f
962 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
963
964 /* On some ANSI C systems, some libc functions call _malloc, _free
965 and _realloc. Make them use the GNU functions. */
966
967 extern void *_malloc (size_t);
968 extern void _free (void *);
969 extern void *_realloc (void *, size_t);
970
971 void *
972 _malloc (size_t size)
973 {
974 return malloc (size);
975 }
976
977 void
978 _free (void *ptr)
979 {
980 free (ptr);
981 }
982
983 void *
984 _realloc (void *ptr, size_t size)
985 {
986 return realloc (ptr, size);
987 }
988
989 #endif
990 /* Free a block of memory allocated by `malloc'.
991 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
992 Written May 1989 by Mike Haertel.
993
994 This library is free software; you can redistribute it and/or
995 modify it under the terms of the GNU General Public License as
996 published by the Free Software Foundation; either version 2 of the
997 License, or (at your option) any later version.
998
999 This library is distributed in the hope that it will be useful,
1000 but WITHOUT ANY WARRANTY; without even the implied warranty of
1001 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1002 General Public License for more details.
1003
1004 You should have received a copy of the GNU General Public
1005 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1006
1007 The author may be reached (Email) at the address mike@ai.mit.edu,
1008 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1009
1010
1011 #ifndef HYBRID_MALLOC
1012 /* Debugging hook for free. */
1013 void (*__free_hook) (void *__ptr);
1014
1015 /* List of blocks allocated by aligned_alloc. */
1016 struct alignlist *_aligned_blocks = NULL;
1017 #endif
1018
1019 /* Return memory to the heap.
1020 Like `_free_internal' but don't lock mutex. */
1021 void
1022 _free_internal_nolock (void *ptr)
1023 {
1024 int type;
1025 size_t block, blocks;
1026 register size_t i;
1027 struct list *prev, *next;
1028 void *curbrk;
1029 const size_t lesscore_threshold
1030 /* Threshold of free space at which we will return some to the system. */
1031 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1032
1033 register struct alignlist *l;
1034
1035 if (ptr == NULL)
1036 return;
1037
1038 PROTECT_MALLOC_STATE (0);
1039
1040 LOCK_ALIGNED_BLOCKS ();
1041 for (l = _aligned_blocks; l != NULL; l = l->next)
1042 if (l->aligned == ptr)
1043 {
1044 l->aligned = NULL; /* Mark the slot in the list as free. */
1045 ptr = l->exact;
1046 break;
1047 }
1048 UNLOCK_ALIGNED_BLOCKS ();
1049
1050 block = BLOCK (ptr);
1051
1052 type = _heapinfo[block].busy.type;
1053 switch (type)
1054 {
1055 case 0:
1056 /* Get as many statistics as early as we can. */
1057 --_chunks_used;
1058 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1059 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1060
1061 /* Find the free cluster previous to this one in the free list.
1062 Start searching at the last block referenced; this may benefit
1063 programs with locality of allocation. */
1064 i = _heapindex;
1065 if (i > block)
1066 while (i > block)
1067 i = _heapinfo[i].free.prev;
1068 else
1069 {
1070 do
1071 i = _heapinfo[i].free.next;
1072 while (i > 0 && i < block);
1073 i = _heapinfo[i].free.prev;
1074 }
1075
1076 /* Determine how to link this block into the free list. */
1077 if (block == i + _heapinfo[i].free.size)
1078 {
1079 /* Coalesce this block with its predecessor. */
1080 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1081 block = i;
1082 }
1083 else
1084 {
1085 /* Really link this block back into the free list. */
1086 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1087 _heapinfo[block].free.next = _heapinfo[i].free.next;
1088 _heapinfo[block].free.prev = i;
1089 _heapinfo[i].free.next = block;
1090 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1091 ++_chunks_free;
1092 }
1093
1094 /* Now that the block is linked in, see if we can coalesce it
1095 with its successor (by deleting its successor from the list
1096 and adding in its size). */
1097 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1098 {
1099 _heapinfo[block].free.size
1100 += _heapinfo[_heapinfo[block].free.next].free.size;
1101 _heapinfo[block].free.next
1102 = _heapinfo[_heapinfo[block].free.next].free.next;
1103 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1104 --_chunks_free;
1105 }
1106
1107 /* How many trailing free blocks are there now? */
1108 blocks = _heapinfo[block].free.size;
1109
1110 /* Where is the current end of accessible core? */
1111 curbrk = (*__morecore) (0);
1112
1113 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1114 {
1115 /* The end of the malloc heap is at the end of accessible core.
1116 It's possible that moving _heapinfo will allow us to
1117 return some space to the system. */
1118
1119 size_t info_block = BLOCK (_heapinfo);
1120 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1121 size_t prev_block = _heapinfo[block].free.prev;
1122 size_t prev_blocks = _heapinfo[prev_block].free.size;
1123 size_t next_block = _heapinfo[block].free.next;
1124 size_t next_blocks = _heapinfo[next_block].free.size;
1125
1126 if (/* Win if this block being freed is last in core, the info table
1127 is just before it, the previous free block is just before the
1128 info table, and the two free blocks together form a useful
1129 amount to return to the system. */
1130 (block + blocks == _heaplimit &&
1131 info_block + info_blocks == block &&
1132 prev_block != 0 && prev_block + prev_blocks == info_block &&
1133 blocks + prev_blocks >= lesscore_threshold) ||
1134 /* Nope, not the case. We can also win if this block being
1135 freed is just before the info table, and the table extends
1136 to the end of core or is followed only by a free block,
1137 and the total free space is worth returning to the system. */
1138 (block + blocks == info_block &&
1139 ((info_block + info_blocks == _heaplimit &&
1140 blocks >= lesscore_threshold) ||
1141 (info_block + info_blocks == next_block &&
1142 next_block + next_blocks == _heaplimit &&
1143 blocks + next_blocks >= lesscore_threshold)))
1144 )
1145 {
1146 malloc_info *newinfo;
1147 size_t oldlimit = _heaplimit;
1148
1149 /* Free the old info table, clearing _heaplimit to avoid
1150 recursion into this code. We don't want to return the
1151 table's blocks to the system before we have copied them to
1152 the new location. */
1153 _heaplimit = 0;
1154 _free_internal_nolock (_heapinfo);
1155 _heaplimit = oldlimit;
1156
1157 /* Tell malloc to search from the beginning of the heap for
1158 free blocks, so it doesn't reuse the ones just freed. */
1159 _heapindex = 0;
1160
1161 /* Allocate new space for the info table and move its data. */
1162 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1163 PROTECT_MALLOC_STATE (0);
1164 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1165 _heapinfo = newinfo;
1166
1167 /* We should now have coalesced the free block with the
1168 blocks freed from the old info table. Examine the entire
1169 trailing free block to decide below whether to return some
1170 to the system. */
1171 block = _heapinfo[0].free.prev;
1172 blocks = _heapinfo[block].free.size;
1173 }
1174
1175 /* Now see if we can return stuff to the system. */
1176 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1177 {
1178 register size_t bytes = blocks * BLOCKSIZE;
1179 _heaplimit -= blocks;
1180 (*__morecore) (-bytes);
1181 _heapinfo[_heapinfo[block].free.prev].free.next
1182 = _heapinfo[block].free.next;
1183 _heapinfo[_heapinfo[block].free.next].free.prev
1184 = _heapinfo[block].free.prev;
1185 block = _heapinfo[block].free.prev;
1186 --_chunks_free;
1187 _bytes_free -= bytes;
1188 }
1189 }
1190
1191 /* Set the next search to begin at this block. */
1192 _heapindex = block;
1193 break;
1194
1195 default:
1196 /* Do some of the statistics. */
1197 --_chunks_used;
1198 _bytes_used -= 1 << type;
1199 ++_chunks_free;
1200 _bytes_free += 1 << type;
1201
1202 /* Get the address of the first free fragment in this block. */
1203 prev = (struct list *) ((char *) ADDRESS (block) +
1204 (_heapinfo[block].busy.info.frag.first << type));
1205
1206 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1207 {
1208 /* If all fragments of this block are free, remove them
1209 from the fragment list and free the whole block. */
1210 next = prev;
1211 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1212 next = next->next;
1213 prev->prev->next = next;
1214 if (next != NULL)
1215 next->prev = prev->prev;
1216 _heapinfo[block].busy.type = 0;
1217 _heapinfo[block].busy.info.size = 1;
1218
1219 /* Keep the statistics accurate. */
1220 ++_chunks_used;
1221 _bytes_used += BLOCKSIZE;
1222 _chunks_free -= BLOCKSIZE >> type;
1223 _bytes_free -= BLOCKSIZE;
1224
1225 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1226 _free_internal_nolock (ADDRESS (block));
1227 #else
1228 free (ADDRESS (block));
1229 #endif
1230 }
1231 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1232 {
1233 /* If some fragments of this block are free, link this
1234 fragment into the fragment list after the first free
1235 fragment of this block. */
1236 next = ptr;
1237 next->next = prev->next;
1238 next->prev = prev;
1239 prev->next = next;
1240 if (next->next != NULL)
1241 next->next->prev = next;
1242 ++_heapinfo[block].busy.info.frag.nfree;
1243 }
1244 else
1245 {
1246 /* No fragments of this block are free, so link this
1247 fragment into the fragment list and announce that
1248 it is the first free fragment of this block. */
1249 prev = ptr;
1250 _heapinfo[block].busy.info.frag.nfree = 1;
1251 _heapinfo[block].busy.info.frag.first =
1252 (uintptr_t) ptr % BLOCKSIZE >> type;
1253 prev->next = _fraghead[type].next;
1254 prev->prev = &_fraghead[type];
1255 prev->prev->next = prev;
1256 if (prev->next != NULL)
1257 prev->next->prev = prev;
1258 }
1259 break;
1260 }
1261
1262 PROTECT_MALLOC_STATE (1);
1263 }
1264
1265 /* Return memory to the heap.
1266 Like `free' but don't call a __free_hook if there is one. */
1267 void
1268 _free_internal (void *ptr)
1269 {
1270 LOCK ();
1271 _free_internal_nolock (ptr);
1272 UNLOCK ();
1273 }
1274
1275 /* Return memory to the heap. */
1276
1277 void
1278 free (void *ptr)
1279 {
1280 void (*hook) (void *) = __free_hook;
1281
1282 if (hook != NULL)
1283 (*hook) (ptr);
1284 else
1285 _free_internal (ptr);
1286 }
1287
1288 #ifndef HYBRID_MALLOC
1289 /* Define the `cfree' alias for `free'. */
1290 #ifdef weak_alias
1291 weak_alias (free, cfree)
1292 #else
1293 void
1294 cfree (void *ptr)
1295 {
1296 free (ptr);
1297 }
1298 #endif
1299 #endif
1300 /* Change the size of a block allocated by `malloc'.
1301 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1302 Written May 1989 by Mike Haertel.
1303
1304 This library is free software; you can redistribute it and/or
1305 modify it under the terms of the GNU General Public License as
1306 published by the Free Software Foundation; either version 2 of the
1307 License, or (at your option) any later version.
1308
1309 This library is distributed in the hope that it will be useful,
1310 but WITHOUT ANY WARRANTY; without even the implied warranty of
1311 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1312 General Public License for more details.
1313
1314 You should have received a copy of the GNU General Public
1315 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1316
1317 The author may be reached (Email) at the address mike@ai.mit.edu,
1318 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1319
1320 #ifndef min
1321 #define min(a, b) ((a) < (b) ? (a) : (b))
1322 #endif
1323
1324 #ifndef HYBRID_MALLOC
1325 /* Debugging hook for realloc. */
1326 void *(*__realloc_hook) (void *ptr, size_t size);
1327 #endif
1328
1329 /* Resize the given region to the new size, returning a pointer
1330 to the (possibly moved) region. This is optimized for speed;
1331 some benchmarks seem to indicate that greater compactness is
1332 achieved by unconditionally allocating and copying to a
1333 new region. This module has incestuous knowledge of the
1334 internals of both free and malloc. */
1335 void *
1336 _realloc_internal_nolock (void *ptr, size_t size)
1337 {
1338 void *result;
1339 int type;
1340 size_t block, blocks, oldlimit;
1341
1342 if (size == 0)
1343 {
1344 _free_internal_nolock (ptr);
1345 return _malloc_internal_nolock (0);
1346 }
1347 else if (ptr == NULL)
1348 return _malloc_internal_nolock (size);
1349
1350 block = BLOCK (ptr);
1351
1352 PROTECT_MALLOC_STATE (0);
1353
1354 type = _heapinfo[block].busy.type;
1355 switch (type)
1356 {
1357 case 0:
1358 /* Maybe reallocate a large block to a small fragment. */
1359 if (size <= BLOCKSIZE / 2)
1360 {
1361 result = _malloc_internal_nolock (size);
1362 if (result != NULL)
1363 {
1364 memcpy (result, ptr, size);
1365 _free_internal_nolock (ptr);
1366 goto out;
1367 }
1368 }
1369
1370 /* The new size is a large allocation as well;
1371 see if we can hold it in place. */
1372 blocks = BLOCKIFY (size);
1373 if (blocks < _heapinfo[block].busy.info.size)
1374 {
1375 /* The new size is smaller; return
1376 excess memory to the free list. */
1377 _heapinfo[block + blocks].busy.type = 0;
1378 _heapinfo[block + blocks].busy.info.size
1379 = _heapinfo[block].busy.info.size - blocks;
1380 _heapinfo[block].busy.info.size = blocks;
1381 /* We have just created a new chunk by splitting a chunk in two.
1382 Now we will free this chunk; increment the statistics counter
1383 so it doesn't become wrong when _free_internal decrements it. */
1384 ++_chunks_used;
1385 _free_internal_nolock (ADDRESS (block + blocks));
1386 result = ptr;
1387 }
1388 else if (blocks == _heapinfo[block].busy.info.size)
1389 /* No size change necessary. */
1390 result = ptr;
1391 else
1392 {
1393 /* Won't fit, so allocate a new region that will.
1394 Free the old region first in case there is sufficient
1395 adjacent free space to grow without moving. */
1396 blocks = _heapinfo[block].busy.info.size;
1397 /* Prevent free from actually returning memory to the system. */
1398 oldlimit = _heaplimit;
1399 _heaplimit = 0;
1400 _free_internal_nolock (ptr);
1401 result = _malloc_internal_nolock (size);
1402 PROTECT_MALLOC_STATE (0);
1403 if (_heaplimit == 0)
1404 _heaplimit = oldlimit;
1405 if (result == NULL)
1406 {
1407 /* Now we're really in trouble. We have to unfree
1408 the thing we just freed. Unfortunately it might
1409 have been coalesced with its neighbors. */
1410 if (_heapindex == block)
1411 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1412 else
1413 {
1414 void *previous
1415 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1416 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1417 _free_internal_nolock (previous);
1418 }
1419 goto out;
1420 }
1421 if (ptr != result)
1422 memmove (result, ptr, blocks * BLOCKSIZE);
1423 }
1424 break;
1425
1426 default:
1427 /* Old size is a fragment; type is logarithm
1428 to base two of the fragment size. */
1429 if (size > (size_t) (1 << (type - 1)) &&
1430 size <= (size_t) (1 << type))
1431 /* The new size is the same kind of fragment. */
1432 result = ptr;
1433 else
1434 {
1435 /* The new size is different; allocate a new space,
1436 and copy the lesser of the new size and the old. */
1437 result = _malloc_internal_nolock (size);
1438 if (result == NULL)
1439 goto out;
1440 memcpy (result, ptr, min (size, (size_t) 1 << type));
1441 _free_internal_nolock (ptr);
1442 }
1443 break;
1444 }
1445
1446 PROTECT_MALLOC_STATE (1);
1447 out:
1448 return result;
1449 }
1450
1451 void *
1452 _realloc_internal (void *ptr, size_t size)
1453 {
1454 void *result;
1455
1456 LOCK ();
1457 result = _realloc_internal_nolock (ptr, size);
1458 UNLOCK ();
1459
1460 return result;
1461 }
1462
1463 void *
1464 realloc (void *ptr, size_t size)
1465 {
1466 void *(*hook) (void *, size_t);
1467
1468 if (!__malloc_initialized && !__malloc_initialize ())
1469 return NULL;
1470
1471 hook = __realloc_hook;
1472 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1473 }
1474 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1475
1476 This library is free software; you can redistribute it and/or
1477 modify it under the terms of the GNU General Public License as
1478 published by the Free Software Foundation; either version 2 of the
1479 License, or (at your option) any later version.
1480
1481 This library is distributed in the hope that it will be useful,
1482 but WITHOUT ANY WARRANTY; without even the implied warranty of
1483 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1484 General Public License for more details.
1485
1486 You should have received a copy of the GNU General Public
1487 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1488
1489 The author may be reached (Email) at the address mike@ai.mit.edu,
1490 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1491
1492 /* Allocate an array of NMEMB elements each SIZE bytes long.
1493 The entire array is initialized to zeros. */
1494 void *
1495 calloc (size_t nmemb, size_t size)
1496 {
1497 void *result;
1498 size_t bytes = nmemb * size;
1499
1500 if (size != 0 && bytes / size != nmemb)
1501 {
1502 errno = ENOMEM;
1503 return NULL;
1504 }
1505
1506 result = malloc (bytes);
1507 if (result)
1508 return memset (result, 0, bytes);
1509 return result;
1510 }
1511 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1512 This file is part of the GNU C Library.
1513
1514 The GNU C Library is free software; you can redistribute it and/or modify
1515 it under the terms of the GNU General Public License as published by
1516 the Free Software Foundation; either version 2, or (at your option)
1517 any later version.
1518
1519 The GNU C Library is distributed in the hope that it will be useful,
1520 but WITHOUT ANY WARRANTY; without even the implied warranty of
1521 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1522 GNU General Public License for more details.
1523
1524 You should have received a copy of the GNU General Public License
1525 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1526
1527 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1528 compatible. */
1529 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1530 #define __sbrk sbrk
1531 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1532 /* It is best not to declare this and cast its result on foreign operating
1533 systems with potentially hostile include files. */
1534
1535 extern void *__sbrk (ptrdiff_t increment);
1536 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1537
1538 /* Allocate INCREMENT more bytes of data space,
1539 and return the start of data space, or NULL on errors.
1540 If INCREMENT is negative, shrink data space. */
1541 void *
1542 __default_morecore (ptrdiff_t increment)
1543 {
1544 void *result;
1545 #ifdef HYBRID_MALLOC
1546 if (!DUMPED)
1547 {
1548 return bss_sbrk (increment);
1549 }
1550 #endif
1551 result = (void *) __sbrk (increment);
1552 if (result == (void *) -1)
1553 return NULL;
1554 return result;
1555 }
1556 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1557
1558 This library is free software; you can redistribute it and/or
1559 modify it under the terms of the GNU General Public License as
1560 published by the Free Software Foundation; either version 2 of the
1561 License, or (at your option) any later version.
1562
1563 This library is distributed in the hope that it will be useful,
1564 but WITHOUT ANY WARRANTY; without even the implied warranty of
1565 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1566 General Public License for more details.
1567
1568 You should have received a copy of the GNU General Public
1569 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1570
1571 #ifndef HYBRID_MALLOC
1572 void *(*__memalign_hook) (size_t size, size_t alignment);
1573 #endif
1574
1575 void *
1576 aligned_alloc (size_t alignment, size_t size)
1577 {
1578 void *result;
1579 size_t adj, lastadj;
1580 void *(*hook) (size_t, size_t) = __memalign_hook;
1581
1582 if (hook)
1583 return (*hook) (alignment, size);
1584
1585 /* Allocate a block with enough extra space to pad the block with up to
1586 (ALIGNMENT - 1) bytes if necessary. */
1587 if (- size < alignment)
1588 {
1589 errno = ENOMEM;
1590 return NULL;
1591 }
1592 result = malloc (size + alignment - 1);
1593 if (result == NULL)
1594 return NULL;
1595
1596 /* Figure out how much we will need to pad this particular block
1597 to achieve the required alignment. */
1598 adj = alignment - (uintptr_t) result % alignment;
1599 if (adj == alignment)
1600 adj = 0;
1601
1602 if (adj != alignment - 1)
1603 {
1604 do
1605 {
1606 /* Reallocate the block with only as much excess as it
1607 needs. */
1608 free (result);
1609 result = malloc (size + adj);
1610 if (result == NULL) /* Impossible unless interrupted. */
1611 return NULL;
1612
1613 lastadj = adj;
1614 adj = alignment - (uintptr_t) result % alignment;
1615 if (adj == alignment)
1616 adj = 0;
1617 /* It's conceivable we might have been so unlucky as to get
1618 a different block with weaker alignment. If so, this
1619 block is too short to contain SIZE after alignment
1620 correction. So we must try again and get another block,
1621 slightly larger. */
1622 } while (adj > lastadj);
1623 }
1624
1625 if (adj != 0)
1626 {
1627 /* Record this block in the list of aligned blocks, so that `free'
1628 can identify the pointer it is passed, which will be in the middle
1629 of an allocated block. */
1630
1631 struct alignlist *l;
1632 LOCK_ALIGNED_BLOCKS ();
1633 for (l = _aligned_blocks; l != NULL; l = l->next)
1634 if (l->aligned == NULL)
1635 /* This slot is free. Use it. */
1636 break;
1637 if (l == NULL)
1638 {
1639 l = malloc (sizeof *l);
1640 if (l != NULL)
1641 {
1642 l->next = _aligned_blocks;
1643 _aligned_blocks = l;
1644 }
1645 }
1646 if (l != NULL)
1647 {
1648 l->exact = result;
1649 result = l->aligned = (char *) result + adj;
1650 }
1651 UNLOCK_ALIGNED_BLOCKS ();
1652 if (l == NULL)
1653 {
1654 free (result);
1655 result = NULL;
1656 }
1657 }
1658
1659 return result;
1660 }
1661
1662 /* Note that memalign and posix_memalign are not used in Emacs. */
1663 #ifndef HYBRID_MALLOC
1664 /* An obsolete alias for aligned_alloc, for any old libraries that use
1665 this alias. */
1666
1667 void *
1668 memalign (size_t alignment, size_t size)
1669 {
1670 return aligned_alloc (alignment, size);
1671 }
1672
1673 /* If HYBRID_MALLOC is defined, we may want to use the system
1674 posix_memalign below. */
1675 int
1676 posix_memalign (void **memptr, size_t alignment, size_t size)
1677 {
1678 void *mem;
1679
1680 if (alignment == 0
1681 || alignment % sizeof (void *) != 0
1682 || (alignment & (alignment - 1)) != 0)
1683 return EINVAL;
1684
1685 mem = aligned_alloc (alignment, size);
1686 if (mem == NULL)
1687 return ENOMEM;
1688
1689 *memptr = mem;
1690
1691 return 0;
1692 }
1693 #endif
1694
1695 /* Allocate memory on a page boundary.
1696 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1697
1698 This library is free software; you can redistribute it and/or
1699 modify it under the terms of the GNU General Public License as
1700 published by the Free Software Foundation; either version 2 of the
1701 License, or (at your option) any later version.
1702
1703 This library is distributed in the hope that it will be useful,
1704 but WITHOUT ANY WARRANTY; without even the implied warranty of
1705 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1706 General Public License for more details.
1707
1708 You should have received a copy of the GNU General Public
1709 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1710
1711 The author may be reached (Email) at the address mike@ai.mit.edu,
1712 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1713
1714 #ifndef HYBRID_MALLOC
1715 /* Allocate SIZE bytes on a page boundary. */
1716 extern void *valloc (size_t);
1717
1718 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1719 # include "getpagesize.h"
1720 #elif !defined getpagesize
1721 extern int getpagesize (void);
1722 #endif
1723
1724 static size_t pagesize;
1725
1726 void *
1727 valloc (size_t size)
1728 {
1729 if (pagesize == 0)
1730 pagesize = getpagesize ();
1731
1732 return aligned_alloc (pagesize, size);
1733 }
1734 #endif /* HYBRID_MALLOC */
1735
1736 #undef malloc
1737 #undef realloc
1738 #undef calloc
1739 #undef aligned_alloc
1740 #undef free
1741
1742 #ifdef HYBRID_MALLOC
1743 /* Declare system malloc and friends. */
1744 extern void *malloc (size_t size);
1745 extern void *realloc (void *ptr, size_t size);
1746 extern void *calloc (size_t nmemb, size_t size);
1747 extern void free (void *ptr);
1748 #ifdef HAVE_ALIGNED_ALLOC
1749 extern void *aligned_alloc (size_t alignment, size_t size);
1750 #elif defined HAVE_POSIX_MEMALIGN
1751 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1752 #endif
1753
1754 /* See the comments near the beginning of this file for explanations
1755 of the following functions. */
1756
1757 void *
1758 hybrid_malloc (size_t size)
1759 {
1760 if (DUMPED)
1761 return malloc (size);
1762 return gmalloc (size);
1763 }
1764
1765 void *
1766 hybrid_calloc (size_t nmemb, size_t size)
1767 {
1768 if (DUMPED)
1769 return calloc (nmemb, size);
1770 return gcalloc (nmemb, size);
1771 }
1772
1773 void
1774 hybrid_free (void *ptr)
1775 {
1776 if (!DUMPED)
1777 gfree (ptr);
1778 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1779 free (ptr);
1780 /* Otherwise the dumped emacs is trying to free something allocated
1781 before dumping; do nothing. */
1782 return;
1783 }
1784
1785 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1786 void *
1787 hybrid_aligned_alloc (size_t alignment, size_t size)
1788 {
1789 if (!DUMPED)
1790 return galigned_alloc (alignment, size);
1791 /* The following is copied from alloc.c */
1792 #ifdef HAVE_ALIGNED_ALLOC
1793 return aligned_alloc (alignment, size);
1794 #else /* HAVE_POSIX_MEMALIGN */
1795 void *p;
1796 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1797 #endif
1798 }
1799 #endif
1800
1801 void *
1802 hybrid_realloc (void *ptr, size_t size)
1803 {
1804 void *result;
1805 int type;
1806 size_t block, oldsize;
1807
1808 if (!DUMPED)
1809 return grealloc (ptr, size);
1810 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1811 return realloc (ptr, size);
1812
1813 /* The dumped emacs is trying to realloc storage allocated before
1814 dumping. We just malloc new space and copy the data. */
1815 if (size == 0 || ptr == NULL)
1816 return malloc (size);
1817 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1818 type = _heapinfo[block].busy.type;
1819 oldsize =
1820 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1821 : (size_t) 1 << type;
1822 result = malloc (size);
1823 if (result)
1824 return memcpy (result, ptr, min (oldsize, size));
1825 return result;
1826 }
1827
1828 #ifdef HYBRID_GET_CURRENT_DIR_NAME
1829 /* Defined in sysdep.c. */
1830 char *gget_current_dir_name (void);
1831
1832 char *
1833 hybrid_get_current_dir_name (void)
1834 {
1835 if (DUMPED)
1836 return get_current_dir_name ();
1837 return gget_current_dir_name ();
1838 }
1839 #endif
1840
1841 #else /* ! HYBRID_MALLOC */
1842
1843 void *
1844 malloc (size_t size)
1845 {
1846 return gmalloc (size);
1847 }
1848
1849 void *
1850 calloc (size_t nmemb, size_t size)
1851 {
1852 return gcalloc (nmemb, size);
1853 }
1854
1855 void
1856 free (void *ptr)
1857 {
1858 gfree (ptr);
1859 }
1860
1861 void *
1862 aligned_alloc (size_t alignment, size_t size)
1863 {
1864 return galigned_alloc (alignment, size);
1865 }
1866
1867 void *
1868 realloc (void *ptr, size_t size)
1869 {
1870 return grealloc (ptr, size);
1871 }
1872
1873 #endif /* HYBRID_MALLOC */
1874
1875 #ifdef GC_MCHECK
1876
1877 /* Standard debugging hooks for `malloc'.
1878 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1879 Written May 1989 by Mike Haertel.
1880
1881 This library is free software; you can redistribute it and/or
1882 modify it under the terms of the GNU General Public License as
1883 published by the Free Software Foundation; either version 2 of the
1884 License, or (at your option) any later version.
1885
1886 This library is distributed in the hope that it will be useful,
1887 but WITHOUT ANY WARRANTY; without even the implied warranty of
1888 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1889 General Public License for more details.
1890
1891 You should have received a copy of the GNU General Public
1892 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1893
1894 The author may be reached (Email) at the address mike@ai.mit.edu,
1895 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1896
1897 #include <stdio.h>
1898
1899 /* Old hook values. */
1900 static void (*old_free_hook) (void *ptr);
1901 static void *(*old_malloc_hook) (size_t size);
1902 static void *(*old_realloc_hook) (void *ptr, size_t size);
1903
1904 /* Function to call when something awful happens. */
1905 static void (*abortfunc) (enum mcheck_status);
1906
1907 /* Arbitrary magical numbers. */
1908 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1909 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1910 #define MAGICBYTE ((char) 0xd7)
1911 #define MALLOCFLOOD ((char) 0x93)
1912 #define FREEFLOOD ((char) 0x95)
1913
1914 struct hdr
1915 {
1916 size_t size; /* Exact size requested by user. */
1917 size_t magic; /* Magic number to check header integrity. */
1918 };
1919
1920 static enum mcheck_status
1921 checkhdr (const struct hdr *hdr)
1922 {
1923 enum mcheck_status status;
1924 switch (hdr->magic)
1925 {
1926 default:
1927 status = MCHECK_HEAD;
1928 break;
1929 case MAGICFREE:
1930 status = MCHECK_FREE;
1931 break;
1932 case MAGICWORD:
1933 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1934 status = MCHECK_TAIL;
1935 else
1936 status = MCHECK_OK;
1937 break;
1938 }
1939 if (status != MCHECK_OK)
1940 (*abortfunc) (status);
1941 return status;
1942 }
1943
1944 static void
1945 freehook (void *ptr)
1946 {
1947 struct hdr *hdr;
1948
1949 if (ptr)
1950 {
1951 struct alignlist *l;
1952
1953 /* If the block was allocated by aligned_alloc, its real pointer
1954 to free is recorded in _aligned_blocks; find that. */
1955 PROTECT_MALLOC_STATE (0);
1956 LOCK_ALIGNED_BLOCKS ();
1957 for (l = _aligned_blocks; l != NULL; l = l->next)
1958 if (l->aligned == ptr)
1959 {
1960 l->aligned = NULL; /* Mark the slot in the list as free. */
1961 ptr = l->exact;
1962 break;
1963 }
1964 UNLOCK_ALIGNED_BLOCKS ();
1965 PROTECT_MALLOC_STATE (1);
1966
1967 hdr = ((struct hdr *) ptr) - 1;
1968 checkhdr (hdr);
1969 hdr->magic = MAGICFREE;
1970 memset (ptr, FREEFLOOD, hdr->size);
1971 }
1972 else
1973 hdr = NULL;
1974
1975 __free_hook = old_free_hook;
1976 free (hdr);
1977 __free_hook = freehook;
1978 }
1979
1980 static void *
1981 mallochook (size_t size)
1982 {
1983 struct hdr *hdr;
1984
1985 __malloc_hook = old_malloc_hook;
1986 hdr = malloc (sizeof *hdr + size + 1);
1987 __malloc_hook = mallochook;
1988 if (hdr == NULL)
1989 return NULL;
1990
1991 hdr->size = size;
1992 hdr->magic = MAGICWORD;
1993 ((char *) &hdr[1])[size] = MAGICBYTE;
1994 return memset (hdr + 1, MALLOCFLOOD, size);
1995 }
1996
1997 static void *
1998 reallochook (void *ptr, size_t size)
1999 {
2000 struct hdr *hdr = NULL;
2001 size_t osize = 0;
2002
2003 if (ptr)
2004 {
2005 hdr = ((struct hdr *) ptr) - 1;
2006 osize = hdr->size;
2007
2008 checkhdr (hdr);
2009 if (size < osize)
2010 memset ((char *) ptr + size, FREEFLOOD, osize - size);
2011 }
2012
2013 __free_hook = old_free_hook;
2014 __malloc_hook = old_malloc_hook;
2015 __realloc_hook = old_realloc_hook;
2016 hdr = realloc (hdr, sizeof *hdr + size + 1);
2017 __free_hook = freehook;
2018 __malloc_hook = mallochook;
2019 __realloc_hook = reallochook;
2020 if (hdr == NULL)
2021 return NULL;
2022
2023 hdr->size = size;
2024 hdr->magic = MAGICWORD;
2025 ((char *) &hdr[1])[size] = MAGICBYTE;
2026 if (size > osize)
2027 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2028 return hdr + 1;
2029 }
2030
2031 static void
2032 mabort (enum mcheck_status status)
2033 {
2034 const char *msg;
2035 switch (status)
2036 {
2037 case MCHECK_OK:
2038 msg = "memory is consistent, library is buggy";
2039 break;
2040 case MCHECK_HEAD:
2041 msg = "memory clobbered before allocated block";
2042 break;
2043 case MCHECK_TAIL:
2044 msg = "memory clobbered past end of allocated block";
2045 break;
2046 case MCHECK_FREE:
2047 msg = "block freed twice";
2048 break;
2049 default:
2050 msg = "bogus mcheck_status, library is buggy";
2051 break;
2052 }
2053 #ifdef __GNU_LIBRARY__
2054 __libc_fatal (msg);
2055 #else
2056 fprintf (stderr, "mcheck: %s\n", msg);
2057 fflush (stderr);
2058 # ifdef emacs
2059 emacs_abort ();
2060 # else
2061 abort ();
2062 # endif
2063 #endif
2064 }
2065
2066 static int mcheck_used = 0;
2067
2068 int
2069 mcheck (void (*func) (enum mcheck_status))
2070 {
2071 abortfunc = (func != NULL) ? func : &mabort;
2072
2073 /* These hooks may not be safely inserted if malloc is already in use. */
2074 if (!__malloc_initialized && !mcheck_used)
2075 {
2076 old_free_hook = __free_hook;
2077 __free_hook = freehook;
2078 old_malloc_hook = __malloc_hook;
2079 __malloc_hook = mallochook;
2080 old_realloc_hook = __realloc_hook;
2081 __realloc_hook = reallochook;
2082 mcheck_used = 1;
2083 }
2084
2085 return mcheck_used ? 0 : -1;
2086 }
2087
2088 enum mcheck_status
2089 mprobe (void *ptr)
2090 {
2091 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2092 }
2093
2094 #endif /* GC_MCHECK */