]> code.delx.au - gnu-emacs/blob - src/gmalloc.c
Merge from emacs--devo--0
[gnu-emacs] / src / gmalloc.c
1 /* This file is no longer automatically generated from libc. */
2
3 #define _MALLOC_INTERNAL
4
5 /* The malloc headers and source files from the C library follow here. */
6
7 /* Declarations for `malloc' and friends.
8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
9 2005, 2006, 2007 Free Software Foundation, Inc.
10 Written May 1989 by Mike Haertel.
11
12 This library is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
16
17 This library is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
21
22 You should have received a copy of the GNU General Public
23 License along with this library; see the file COPYING. If
24 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
25 Fifth Floor, Boston, MA 02110-1301, USA.
26
27 The author may be reached (Email) at the address mike@ai.mit.edu,
28 or (US mail) as Mike Haertel c/o Free Software Foundation. */
29
30 #ifndef _MALLOC_H
31
32 #define _MALLOC_H 1
33
34 #ifdef _MALLOC_INTERNAL
35
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #ifdef HAVE_GTK_AND_PTHREAD
41 #define USE_PTHREAD
42 #endif
43
44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
45 || defined STDC_HEADERS || defined PROTOTYPES) \
46 && ! defined (BROKEN_PROTOTYPES))
47 #undef PP
48 #define PP(args) args
49 #undef __ptr_t
50 #define __ptr_t void *
51 #else /* Not C++ or ANSI C. */
52 #undef PP
53 #define PP(args) ()
54 #undef __ptr_t
55 #define __ptr_t char *
56 #endif /* C++ or ANSI C. */
57
58 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
59 #include <string.h>
60 #else
61 #ifndef memset
62 #define memset(s, zero, n) bzero ((s), (n))
63 #endif
64 #ifndef memcpy
65 #define memcpy(d, s, n) bcopy ((s), (d), (n))
66 #endif
67 #endif
68
69 #ifdef HAVE_LIMITS_H
70 #include <limits.h>
71 #endif
72 #ifndef CHAR_BIT
73 #define CHAR_BIT 8
74 #endif
75
76 #ifdef HAVE_UNISTD_H
77 #include <unistd.h>
78 #endif
79
80 #ifdef USE_PTHREAD
81 #include <pthread.h>
82 #endif
83
84 #endif /* _MALLOC_INTERNAL. */
85
86
87 #ifdef __cplusplus
88 extern "C"
89 {
90 #endif
91
92 #ifdef STDC_HEADERS
93 #include <stddef.h>
94 #define __malloc_size_t size_t
95 #define __malloc_ptrdiff_t ptrdiff_t
96 #else
97 #ifdef __GNUC__
98 #include <stddef.h>
99 #ifdef __SIZE_TYPE__
100 #define __malloc_size_t __SIZE_TYPE__
101 #endif
102 #endif
103 #ifndef __malloc_size_t
104 #define __malloc_size_t unsigned int
105 #endif
106 #define __malloc_ptrdiff_t int
107 #endif
108
109 #ifndef NULL
110 #define NULL 0
111 #endif
112
113 #ifndef FREE_RETURN_TYPE
114 #define FREE_RETURN_TYPE void
115 #endif
116
117
118 /* Allocate SIZE bytes of memory. */
119 extern __ptr_t malloc PP ((__malloc_size_t __size));
120 /* Re-allocate the previously allocated block
121 in __ptr_t, making the new block SIZE bytes long. */
122 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
123 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
124 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
125 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
126 extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
127
128 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
129 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
130 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
131 __malloc_size_t __size));
132 #endif
133
134 /* Allocate SIZE bytes on a page boundary. */
135 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
136 extern __ptr_t valloc PP ((__malloc_size_t __size));
137 #endif
138
139
140 #ifdef _MALLOC_INTERNAL
141
142 /* The allocator divides the heap into blocks of fixed size; large
143 requests receive one or more whole blocks, and small requests
144 receive a fragment of a block. Fragment sizes are powers of two,
145 and all fragments of a block are the same size. When all the
146 fragments in a block have been freed, the block itself is freed. */
147 #define INT_BIT (CHAR_BIT * sizeof(int))
148 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
149 #define BLOCKSIZE (1 << BLOCKLOG)
150 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
151
152 /* Determine the amount of memory spanned by the initial heap table
153 (not an absolute limit). */
154 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
155
156 /* Number of contiguous free blocks allowed to build up at the end of
157 memory before they will be returned to the system. */
158 #define FINAL_FREE_BLOCKS 8
159
160 /* Data structure giving per-block information. */
161 typedef union
162 {
163 /* Heap information for a busy block. */
164 struct
165 {
166 /* Zero for a large (multiblock) object, or positive giving the
167 logarithm to the base two of the fragment size. */
168 int type;
169 union
170 {
171 struct
172 {
173 __malloc_size_t nfree; /* Free frags in a fragmented block. */
174 __malloc_size_t first; /* First free fragment of the block. */
175 } frag;
176 /* For a large object, in its first block, this has the number
177 of blocks in the object. In the other blocks, this has a
178 negative number which says how far back the first block is. */
179 __malloc_ptrdiff_t size;
180 } info;
181 } busy;
182 /* Heap information for a free block
183 (that may be the first of a free cluster). */
184 struct
185 {
186 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
187 __malloc_size_t next; /* Index of next free cluster. */
188 __malloc_size_t prev; /* Index of previous free cluster. */
189 } free;
190 } malloc_info;
191
192 /* Pointer to first block of the heap. */
193 extern char *_heapbase;
194
195 /* Table indexed by block number giving per-block information. */
196 extern malloc_info *_heapinfo;
197
198 /* Address to block number and vice versa. */
199 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
200 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
201
202 /* Current search index for the heap table. */
203 extern __malloc_size_t _heapindex;
204
205 /* Limit of valid info table indices. */
206 extern __malloc_size_t _heaplimit;
207
208 /* Doubly linked lists of free fragments. */
209 struct list
210 {
211 struct list *next;
212 struct list *prev;
213 };
214
215 /* Free list headers for each fragment size. */
216 extern struct list _fraghead[];
217
218 /* List of blocks allocated with `memalign' (or `valloc'). */
219 struct alignlist
220 {
221 struct alignlist *next;
222 __ptr_t aligned; /* The address that memaligned returned. */
223 __ptr_t exact; /* The address that malloc returned. */
224 };
225 extern struct alignlist *_aligned_blocks;
226
227 /* Instrumentation. */
228 extern __malloc_size_t _chunks_used;
229 extern __malloc_size_t _bytes_used;
230 extern __malloc_size_t _chunks_free;
231 extern __malloc_size_t _bytes_free;
232
233 /* Internal versions of `malloc', `realloc', and `free'
234 used when these functions need to call each other.
235 They are the same but don't call the hooks. */
236 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
237 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
238 extern void _free_internal PP ((__ptr_t __ptr));
239 extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
240 extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
241 extern void _free_internal_nolock PP ((__ptr_t __ptr));
242
243 #ifdef USE_PTHREAD
244 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
245 #define LOCK() pthread_mutex_lock (&_malloc_mutex)
246 #define UNLOCK() pthread_mutex_unlock (&_malloc_mutex)
247 #define LOCK_ALIGNED_BLOCKS() pthread_mutex_lock (&_aligned_blocks_mutex)
248 #define UNLOCK_ALIGNED_BLOCKS() pthread_mutex_unlock (&_aligned_blocks_mutex)
249 #else
250 #define LOCK()
251 #define UNLOCK()
252 #define LOCK_ALIGNED_BLOCKS()
253 #define UNLOCK_ALIGNED_BLOCKS()
254 #endif
255
256 #endif /* _MALLOC_INTERNAL. */
257
258 /* Given an address in the middle of a malloc'd object,
259 return the address of the beginning of the object. */
260 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
261
262 /* Underlying allocation function; successive calls should
263 return contiguous pieces of memory. */
264 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
265
266 /* Default value of `__morecore'. */
267 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
268
269 /* If not NULL, this function is called after each time
270 `__morecore' is called to increase the data size. */
271 extern void (*__after_morecore_hook) PP ((void));
272
273 /* Number of extra blocks to get each time we ask for more core.
274 This reduces the frequency of calling `(*__morecore)'. */
275 extern __malloc_size_t __malloc_extra_blocks;
276
277 /* Nonzero if `malloc' has been called and done its initialization. */
278 extern int __malloc_initialized;
279 /* Function called to initialize malloc data structures. */
280 extern int __malloc_initialize PP ((void));
281
282 /* Hooks for debugging versions. */
283 extern void (*__malloc_initialize_hook) PP ((void));
284 extern void (*__free_hook) PP ((__ptr_t __ptr));
285 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
286 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
287 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
288 __malloc_size_t __alignment));
289
290 /* Return values for `mprobe': these are the kinds of inconsistencies that
291 `mcheck' enables detection of. */
292 enum mcheck_status
293 {
294 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
295 MCHECK_OK, /* Block is fine. */
296 MCHECK_FREE, /* Block freed twice. */
297 MCHECK_HEAD, /* Memory before the block was clobbered. */
298 MCHECK_TAIL /* Memory after the block was clobbered. */
299 };
300
301 /* Activate a standard collection of debugging hooks. This must be called
302 before `malloc' is ever called. ABORTFUNC is called with an error code
303 (see enum above) when an inconsistency is detected. If ABORTFUNC is
304 null, the standard function prints on stderr and then calls `abort'. */
305 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
306
307 /* Check for aberrations in a particular malloc'd block. You must have
308 called `mcheck' already. These are the same checks that `mcheck' does
309 when you free or reallocate a block. */
310 extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
311
312 /* Activate a standard collection of tracing hooks. */
313 extern void mtrace PP ((void));
314 extern void muntrace PP ((void));
315
316 /* Statistics available to the user. */
317 struct mstats
318 {
319 __malloc_size_t bytes_total; /* Total size of the heap. */
320 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
321 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
322 __malloc_size_t chunks_free; /* Chunks in the free list. */
323 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
324 };
325
326 /* Pick up the current statistics. */
327 extern struct mstats mstats PP ((void));
328
329 /* Call WARNFUN with a warning message when memory usage is high. */
330 extern void memory_warnings PP ((__ptr_t __start,
331 void (*__warnfun) PP ((const char *))));
332
333
334 /* Relocating allocator. */
335
336 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
337 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
338
339 /* Free the storage allocated in HANDLEPTR. */
340 extern void r_alloc_free PP ((__ptr_t *__handleptr));
341
342 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
343 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
344
345
346 #ifdef __cplusplus
347 }
348 #endif
349
350 #endif /* malloc.h */
351 /* Memory allocator `malloc'.
352 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
353 Written May 1989 by Mike Haertel.
354
355 This library is free software; you can redistribute it and/or
356 modify it under the terms of the GNU General Public License as
357 published by the Free Software Foundation; either version 2 of the
358 License, or (at your option) any later version.
359
360 This library is distributed in the hope that it will be useful,
361 but WITHOUT ANY WARRANTY; without even the implied warranty of
362 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
363 General Public License for more details.
364
365 You should have received a copy of the GNU General Public
366 License along with this library; see the file COPYING. If
367 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
368 Fifth Floor, Boston, MA 02110-1301, USA.
369
370 The author may be reached (Email) at the address mike@ai.mit.edu,
371 or (US mail) as Mike Haertel c/o Free Software Foundation. */
372
373 #ifndef _MALLOC_INTERNAL
374 #define _MALLOC_INTERNAL
375 #include <malloc.h>
376 #endif
377 #include <errno.h>
378
379 /* How to really get more memory. */
380 #if defined(CYGWIN)
381 extern __ptr_t bss_sbrk PP ((ptrdiff_t __size));
382 extern int bss_sbrk_did_unexec;
383 #endif
384 __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)) = __default_morecore;
385
386 /* Debugging hook for `malloc'. */
387 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
388
389 /* Pointer to the base of the first block. */
390 char *_heapbase;
391
392 /* Block information table. Allocated with align/__free (not malloc/free). */
393 malloc_info *_heapinfo;
394
395 /* Number of info entries. */
396 static __malloc_size_t heapsize;
397
398 /* Search index in the info table. */
399 __malloc_size_t _heapindex;
400
401 /* Limit of valid info table indices. */
402 __malloc_size_t _heaplimit;
403
404 /* Free lists for each fragment size. */
405 struct list _fraghead[BLOCKLOG];
406
407 /* Instrumentation. */
408 __malloc_size_t _chunks_used;
409 __malloc_size_t _bytes_used;
410 __malloc_size_t _chunks_free;
411 __malloc_size_t _bytes_free;
412
413 /* Are you experienced? */
414 int __malloc_initialized;
415
416 __malloc_size_t __malloc_extra_blocks;
417
418 void (*__malloc_initialize_hook) PP ((void));
419 void (*__after_morecore_hook) PP ((void));
420
421 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
422
423 /* Some code for hunting a bug writing into _heapinfo.
424
425 Call this macro with argument PROT non-zero to protect internal
426 malloc state against writing to it, call it with a zero argument to
427 make it readable and writable.
428
429 Note that this only works if BLOCKSIZE == page size, which is
430 the case on the i386. */
431
432 #include <sys/types.h>
433 #include <sys/mman.h>
434
435 static int state_protected_p;
436 static __malloc_size_t last_state_size;
437 static malloc_info *last_heapinfo;
438
439 void
440 protect_malloc_state (protect_p)
441 int protect_p;
442 {
443 /* If _heapinfo has been relocated, make sure its old location
444 isn't left read-only; it will be reused by malloc. */
445 if (_heapinfo != last_heapinfo
446 && last_heapinfo
447 && state_protected_p)
448 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
449
450 last_state_size = _heaplimit * sizeof *_heapinfo;
451 last_heapinfo = _heapinfo;
452
453 if (protect_p != state_protected_p)
454 {
455 state_protected_p = protect_p;
456 if (mprotect (_heapinfo, last_state_size,
457 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
458 abort ();
459 }
460 }
461
462 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
463
464 #else
465 #define PROTECT_MALLOC_STATE(PROT) /* empty */
466 #endif
467
468
469 /* Aligned allocation. */
470 static __ptr_t align PP ((__malloc_size_t));
471 static __ptr_t
472 align (size)
473 __malloc_size_t size;
474 {
475 __ptr_t result;
476 unsigned long int adj;
477
478 /* align accepts an unsigned argument, but __morecore accepts a
479 signed one. This could lead to trouble if SIZE overflows a
480 signed int type accepted by __morecore. We just punt in that
481 case, since they are requesting a ludicrous amount anyway. */
482 if ((__malloc_ptrdiff_t)size < 0)
483 result = 0;
484 else
485 result = (*__morecore) (size);
486 adj = (unsigned long int) ((unsigned long int) ((char *) result -
487 (char *) NULL)) % BLOCKSIZE;
488 if (adj != 0)
489 {
490 __ptr_t new;
491 adj = BLOCKSIZE - adj;
492 new = (*__morecore) (adj);
493 result = (char *) result + adj;
494 }
495
496 if (__after_morecore_hook)
497 (*__after_morecore_hook) ();
498
499 return result;
500 }
501
502 /* Get SIZE bytes, if we can get them starting at END.
503 Return the address of the space we got.
504 If we cannot get space at END, fail and return 0. */
505 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
506 static __ptr_t
507 get_contiguous_space (size, position)
508 __malloc_ptrdiff_t size;
509 __ptr_t position;
510 {
511 __ptr_t before;
512 __ptr_t after;
513
514 before = (*__morecore) (0);
515 /* If we can tell in advance that the break is at the wrong place,
516 fail now. */
517 if (before != position)
518 return 0;
519
520 /* Allocate SIZE bytes and get the address of them. */
521 after = (*__morecore) (size);
522 if (!after)
523 return 0;
524
525 /* It was not contiguous--reject it. */
526 if (after != position)
527 {
528 (*__morecore) (- size);
529 return 0;
530 }
531
532 return after;
533 }
534
535
536 /* This is called when `_heapinfo' and `heapsize' have just
537 been set to describe a new info table. Set up the table
538 to describe itself and account for it in the statistics. */
539 static void register_heapinfo PP ((void));
540 #ifdef __GNUC__
541 __inline__
542 #endif
543 static void
544 register_heapinfo ()
545 {
546 __malloc_size_t block, blocks;
547
548 block = BLOCK (_heapinfo);
549 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
550
551 /* Account for the _heapinfo block itself in the statistics. */
552 _bytes_used += blocks * BLOCKSIZE;
553 ++_chunks_used;
554
555 /* Describe the heapinfo block itself in the heapinfo. */
556 _heapinfo[block].busy.type = 0;
557 _heapinfo[block].busy.info.size = blocks;
558 /* Leave back-pointers for malloc_find_address. */
559 while (--blocks > 0)
560 _heapinfo[block + blocks].busy.info.size = -blocks;
561 }
562
563 #ifdef USE_PTHREAD
564 static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT;
565 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
566 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
567 #endif
568
569 static void
570 malloc_initialize_1 ()
571 {
572 #ifdef GC_MCHECK
573 mcheck (NULL);
574 #endif
575
576 if (__malloc_initialize_hook)
577 (*__malloc_initialize_hook) ();
578
579 /* We don't use recursive mutex because pthread_mutexattr_init may
580 call malloc internally. */
581 #if 0 /* defined (USE_PTHREAD) */
582 {
583 pthread_mutexattr_t attr;
584
585 pthread_mutexattr_init (&attr);
586 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
587 pthread_mutex_init (&_malloc_mutex, &attr);
588 pthread_mutexattr_destroy (&attr);
589 }
590 #endif
591
592 heapsize = HEAP / BLOCKSIZE;
593 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
594 if (_heapinfo == NULL)
595 return;
596 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
597 _heapinfo[0].free.size = 0;
598 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
599 _heapindex = 0;
600 _heapbase = (char *) _heapinfo;
601 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
602
603 register_heapinfo ();
604
605 __malloc_initialized = 1;
606 PROTECT_MALLOC_STATE (1);
607 return;
608 }
609
610 /* Set everything up and remember that we have. */
611 int
612 __malloc_initialize ()
613 {
614 #ifdef USE_PTHREAD
615 pthread_once (&malloc_init_once_control, malloc_initialize_1);
616 #else
617 if (__malloc_initialized)
618 return 0;
619
620 malloc_initialize_1 ();
621 #endif
622
623 return __malloc_initialized;
624 }
625
626 static int morecore_recursing;
627
628 /* Get neatly aligned memory, initializing or
629 growing the heap info table as necessary. */
630 static __ptr_t morecore_nolock PP ((__malloc_size_t));
631 static __ptr_t
632 morecore_nolock (size)
633 __malloc_size_t size;
634 {
635 __ptr_t result;
636 malloc_info *newinfo, *oldinfo;
637 __malloc_size_t newsize;
638
639 if (morecore_recursing)
640 /* Avoid recursion. The caller will know how to handle a null return. */
641 return NULL;
642
643 result = align (size);
644 if (result == NULL)
645 return NULL;
646
647 PROTECT_MALLOC_STATE (0);
648
649 /* Check if we need to grow the info table. */
650 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
651 {
652 /* Calculate the new _heapinfo table size. We do not account for the
653 added blocks in the table itself, as we hope to place them in
654 existing free space, which is already covered by part of the
655 existing table. */
656 newsize = heapsize;
657 do
658 newsize *= 2;
659 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
660
661 /* We must not reuse existing core for the new info table when called
662 from realloc in the case of growing a large block, because the
663 block being grown is momentarily marked as free. In this case
664 _heaplimit is zero so we know not to reuse space for internal
665 allocation. */
666 if (_heaplimit != 0)
667 {
668 /* First try to allocate the new info table in core we already
669 have, in the usual way using realloc. If realloc cannot
670 extend it in place or relocate it to existing sufficient core,
671 we will get called again, and the code above will notice the
672 `morecore_recursing' flag and return null. */
673 int save = errno; /* Don't want to clobber errno with ENOMEM. */
674 morecore_recursing = 1;
675 newinfo = (malloc_info *) _realloc_internal_nolock
676 (_heapinfo, newsize * sizeof (malloc_info));
677 morecore_recursing = 0;
678 if (newinfo == NULL)
679 errno = save;
680 else
681 {
682 /* We found some space in core, and realloc has put the old
683 table's blocks on the free list. Now zero the new part
684 of the table and install the new table location. */
685 memset (&newinfo[heapsize], 0,
686 (newsize - heapsize) * sizeof (malloc_info));
687 _heapinfo = newinfo;
688 heapsize = newsize;
689 goto got_heap;
690 }
691 }
692
693 /* Allocate new space for the malloc info table. */
694 while (1)
695 {
696 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
697
698 /* Did it fail? */
699 if (newinfo == NULL)
700 {
701 (*__morecore) (-size);
702 return NULL;
703 }
704
705 /* Is it big enough to record status for its own space?
706 If so, we win. */
707 if ((__malloc_size_t) BLOCK ((char *) newinfo
708 + newsize * sizeof (malloc_info))
709 < newsize)
710 break;
711
712 /* Must try again. First give back most of what we just got. */
713 (*__morecore) (- newsize * sizeof (malloc_info));
714 newsize *= 2;
715 }
716
717 /* Copy the old table to the beginning of the new,
718 and zero the rest of the new table. */
719 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
720 memset (&newinfo[heapsize], 0,
721 (newsize - heapsize) * sizeof (malloc_info));
722 oldinfo = _heapinfo;
723 _heapinfo = newinfo;
724 heapsize = newsize;
725
726 register_heapinfo ();
727
728 /* Reset _heaplimit so _free_internal never decides
729 it can relocate or resize the info table. */
730 _heaplimit = 0;
731 _free_internal_nolock (oldinfo);
732 PROTECT_MALLOC_STATE (0);
733
734 /* The new heap limit includes the new table just allocated. */
735 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
736 return result;
737 }
738
739 got_heap:
740 _heaplimit = BLOCK ((char *) result + size);
741 return result;
742 }
743
744 /* Allocate memory from the heap. */
745 __ptr_t
746 _malloc_internal_nolock (size)
747 __malloc_size_t size;
748 {
749 __ptr_t result;
750 __malloc_size_t block, blocks, lastblocks, start;
751 register __malloc_size_t i;
752 struct list *next;
753
754 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
755 valid address you can realloc and free (though not dereference).
756
757 It turns out that some extant code (sunrpc, at least Ultrix's version)
758 expects `malloc (0)' to return non-NULL and breaks otherwise.
759 Be compatible. */
760
761 #if 0
762 if (size == 0)
763 return NULL;
764 #endif
765
766 PROTECT_MALLOC_STATE (0);
767
768 if (size < sizeof (struct list))
769 size = sizeof (struct list);
770
771 #ifdef SUNOS_LOCALTIME_BUG
772 if (size < 16)
773 size = 16;
774 #endif
775
776 /* Determine the allocation policy based on the request size. */
777 if (size <= BLOCKSIZE / 2)
778 {
779 /* Small allocation to receive a fragment of a block.
780 Determine the logarithm to base two of the fragment size. */
781 register __malloc_size_t log = 1;
782 --size;
783 while ((size /= 2) != 0)
784 ++log;
785
786 /* Look in the fragment lists for a
787 free fragment of the desired size. */
788 next = _fraghead[log].next;
789 if (next != NULL)
790 {
791 /* There are free fragments of this size.
792 Pop a fragment out of the fragment list and return it.
793 Update the block's nfree and first counters. */
794 result = (__ptr_t) next;
795 next->prev->next = next->next;
796 if (next->next != NULL)
797 next->next->prev = next->prev;
798 block = BLOCK (result);
799 if (--_heapinfo[block].busy.info.frag.nfree != 0)
800 _heapinfo[block].busy.info.frag.first = (unsigned long int)
801 ((unsigned long int) ((char *) next->next - (char *) NULL)
802 % BLOCKSIZE) >> log;
803
804 /* Update the statistics. */
805 ++_chunks_used;
806 _bytes_used += 1 << log;
807 --_chunks_free;
808 _bytes_free -= 1 << log;
809 }
810 else
811 {
812 /* No free fragments of the desired size, so get a new block
813 and break it into fragments, returning the first. */
814 #ifdef GC_MALLOC_CHECK
815 result = _malloc_internal_nolock (BLOCKSIZE);
816 PROTECT_MALLOC_STATE (0);
817 #elif defined (USE_PTHREAD)
818 result = _malloc_internal_nolock (BLOCKSIZE);
819 #else
820 result = malloc (BLOCKSIZE);
821 #endif
822 if (result == NULL)
823 {
824 PROTECT_MALLOC_STATE (1);
825 goto out;
826 }
827
828 /* Link all fragments but the first into the free list. */
829 next = (struct list *) ((char *) result + (1 << log));
830 next->next = NULL;
831 next->prev = &_fraghead[log];
832 _fraghead[log].next = next;
833
834 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
835 {
836 next = (struct list *) ((char *) result + (i << log));
837 next->next = _fraghead[log].next;
838 next->prev = &_fraghead[log];
839 next->prev->next = next;
840 next->next->prev = next;
841 }
842
843 /* Initialize the nfree and first counters for this block. */
844 block = BLOCK (result);
845 _heapinfo[block].busy.type = log;
846 _heapinfo[block].busy.info.frag.nfree = i - 1;
847 _heapinfo[block].busy.info.frag.first = i - 1;
848
849 _chunks_free += (BLOCKSIZE >> log) - 1;
850 _bytes_free += BLOCKSIZE - (1 << log);
851 _bytes_used -= BLOCKSIZE - (1 << log);
852 }
853 }
854 else
855 {
856 /* Large allocation to receive one or more blocks.
857 Search the free list in a circle starting at the last place visited.
858 If we loop completely around without finding a large enough
859 space we will have to get more memory from the system. */
860 blocks = BLOCKIFY (size);
861 start = block = _heapindex;
862 while (_heapinfo[block].free.size < blocks)
863 {
864 block = _heapinfo[block].free.next;
865 if (block == start)
866 {
867 /* Need to get more from the system. Get a little extra. */
868 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
869 block = _heapinfo[0].free.prev;
870 lastblocks = _heapinfo[block].free.size;
871 /* Check to see if the new core will be contiguous with the
872 final free block; if so we don't need to get as much. */
873 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
874 /* We can't do this if we will have to make the heap info
875 table bigger to accomodate the new space. */
876 block + wantblocks <= heapsize &&
877 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
878 ADDRESS (block + lastblocks)))
879 {
880 /* We got it contiguously. Which block we are extending
881 (the `final free block' referred to above) might have
882 changed, if it got combined with a freed info table. */
883 block = _heapinfo[0].free.prev;
884 _heapinfo[block].free.size += (wantblocks - lastblocks);
885 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
886 _heaplimit += wantblocks - lastblocks;
887 continue;
888 }
889 result = morecore_nolock (wantblocks * BLOCKSIZE);
890 if (result == NULL)
891 goto out;
892 block = BLOCK (result);
893 /* Put the new block at the end of the free list. */
894 _heapinfo[block].free.size = wantblocks;
895 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
896 _heapinfo[block].free.next = 0;
897 _heapinfo[0].free.prev = block;
898 _heapinfo[_heapinfo[block].free.prev].free.next = block;
899 ++_chunks_free;
900 /* Now loop to use some of that block for this allocation. */
901 }
902 }
903
904 /* At this point we have found a suitable free list entry.
905 Figure out how to remove what we need from the list. */
906 result = ADDRESS (block);
907 if (_heapinfo[block].free.size > blocks)
908 {
909 /* The block we found has a bit left over,
910 so relink the tail end back into the free list. */
911 _heapinfo[block + blocks].free.size
912 = _heapinfo[block].free.size - blocks;
913 _heapinfo[block + blocks].free.next
914 = _heapinfo[block].free.next;
915 _heapinfo[block + blocks].free.prev
916 = _heapinfo[block].free.prev;
917 _heapinfo[_heapinfo[block].free.prev].free.next
918 = _heapinfo[_heapinfo[block].free.next].free.prev
919 = _heapindex = block + blocks;
920 }
921 else
922 {
923 /* The block exactly matches our requirements,
924 so just remove it from the list. */
925 _heapinfo[_heapinfo[block].free.next].free.prev
926 = _heapinfo[block].free.prev;
927 _heapinfo[_heapinfo[block].free.prev].free.next
928 = _heapindex = _heapinfo[block].free.next;
929 --_chunks_free;
930 }
931
932 _heapinfo[block].busy.type = 0;
933 _heapinfo[block].busy.info.size = blocks;
934 ++_chunks_used;
935 _bytes_used += blocks * BLOCKSIZE;
936 _bytes_free -= blocks * BLOCKSIZE;
937
938 /* Mark all the blocks of the object just allocated except for the
939 first with a negative number so you can find the first block by
940 adding that adjustment. */
941 while (--blocks > 0)
942 _heapinfo[block + blocks].busy.info.size = -blocks;
943 }
944
945 PROTECT_MALLOC_STATE (1);
946 out:
947 return result;
948 }
949
950 __ptr_t
951 _malloc_internal (size)
952 __malloc_size_t size;
953 {
954 __ptr_t result;
955
956 LOCK ();
957 result = _malloc_internal_nolock (size);
958 UNLOCK ();
959
960 return result;
961 }
962
963 __ptr_t
964 malloc (size)
965 __malloc_size_t size;
966 {
967 __ptr_t (*hook) (__malloc_size_t);
968
969 if (!__malloc_initialized && !__malloc_initialize ())
970 return NULL;
971
972 /* Copy the value of __malloc_hook to an automatic variable in case
973 __malloc_hook is modified in another thread between its
974 NULL-check and the use.
975
976 Note: Strictly speaking, this is not a right solution. We should
977 use mutexes to access non-read-only variables that are shared
978 among multiple threads. We just leave it for compatibility with
979 glibc malloc (i.e., assignments to __malloc_hook) for now. */
980 hook = __malloc_hook;
981 return (hook != NULL ? *hook : _malloc_internal) (size);
982 }
983 \f
984 #ifndef _LIBC
985
986 /* On some ANSI C systems, some libc functions call _malloc, _free
987 and _realloc. Make them use the GNU functions. */
988
989 __ptr_t
990 _malloc (size)
991 __malloc_size_t size;
992 {
993 return malloc (size);
994 }
995
996 void
997 _free (ptr)
998 __ptr_t ptr;
999 {
1000 free (ptr);
1001 }
1002
1003 __ptr_t
1004 _realloc (ptr, size)
1005 __ptr_t ptr;
1006 __malloc_size_t size;
1007 {
1008 return realloc (ptr, size);
1009 }
1010
1011 #endif
1012 /* Free a block of memory allocated by `malloc'.
1013 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
1014 Written May 1989 by Mike Haertel.
1015
1016 This library is free software; you can redistribute it and/or
1017 modify it under the terms of the GNU General Public License as
1018 published by the Free Software Foundation; either version 2 of the
1019 License, or (at your option) any later version.
1020
1021 This library is distributed in the hope that it will be useful,
1022 but WITHOUT ANY WARRANTY; without even the implied warranty of
1023 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1024 General Public License for more details.
1025
1026 You should have received a copy of the GNU General Public
1027 License along with this library; see the file COPYING. If
1028 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1029 Fifth Floor, Boston, MA 02110-1301, USA.
1030
1031 The author may be reached (Email) at the address mike@ai.mit.edu,
1032 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1033
1034 #ifndef _MALLOC_INTERNAL
1035 #define _MALLOC_INTERNAL
1036 #include <malloc.h>
1037 #endif
1038
1039
1040 /* Cope with systems lacking `memmove'. */
1041 #ifndef memmove
1042 #if (defined (MEMMOVE_MISSING) || \
1043 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1044 #ifdef emacs
1045 #undef __malloc_safe_bcopy
1046 #define __malloc_safe_bcopy safe_bcopy
1047 #endif
1048 /* This function is defined in realloc.c. */
1049 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1050 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1051 #endif
1052 #endif
1053
1054
1055 /* Debugging hook for free. */
1056 void (*__free_hook) PP ((__ptr_t __ptr));
1057
1058 /* List of blocks allocated by memalign. */
1059 struct alignlist *_aligned_blocks = NULL;
1060
1061 /* Return memory to the heap.
1062 Like `_free_internal' but don't lock mutex. */
1063 void
1064 _free_internal_nolock (ptr)
1065 __ptr_t ptr;
1066 {
1067 int type;
1068 __malloc_size_t block, blocks;
1069 register __malloc_size_t i;
1070 struct list *prev, *next;
1071 __ptr_t curbrk;
1072 const __malloc_size_t lesscore_threshold
1073 /* Threshold of free space at which we will return some to the system. */
1074 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1075
1076 register struct alignlist *l;
1077
1078 if (ptr == NULL)
1079 return;
1080
1081 PROTECT_MALLOC_STATE (0);
1082
1083 LOCK_ALIGNED_BLOCKS ();
1084 for (l = _aligned_blocks; l != NULL; l = l->next)
1085 if (l->aligned == ptr)
1086 {
1087 l->aligned = NULL; /* Mark the slot in the list as free. */
1088 ptr = l->exact;
1089 break;
1090 }
1091 UNLOCK_ALIGNED_BLOCKS ();
1092
1093 block = BLOCK (ptr);
1094
1095 type = _heapinfo[block].busy.type;
1096 switch (type)
1097 {
1098 case 0:
1099 /* Get as many statistics as early as we can. */
1100 --_chunks_used;
1101 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1102 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1103
1104 /* Find the free cluster previous to this one in the free list.
1105 Start searching at the last block referenced; this may benefit
1106 programs with locality of allocation. */
1107 i = _heapindex;
1108 if (i > block)
1109 while (i > block)
1110 i = _heapinfo[i].free.prev;
1111 else
1112 {
1113 do
1114 i = _heapinfo[i].free.next;
1115 while (i > 0 && i < block);
1116 i = _heapinfo[i].free.prev;
1117 }
1118
1119 /* Determine how to link this block into the free list. */
1120 if (block == i + _heapinfo[i].free.size)
1121 {
1122 /* Coalesce this block with its predecessor. */
1123 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1124 block = i;
1125 }
1126 else
1127 {
1128 /* Really link this block back into the free list. */
1129 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1130 _heapinfo[block].free.next = _heapinfo[i].free.next;
1131 _heapinfo[block].free.prev = i;
1132 _heapinfo[i].free.next = block;
1133 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1134 ++_chunks_free;
1135 }
1136
1137 /* Now that the block is linked in, see if we can coalesce it
1138 with its successor (by deleting its successor from the list
1139 and adding in its size). */
1140 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1141 {
1142 _heapinfo[block].free.size
1143 += _heapinfo[_heapinfo[block].free.next].free.size;
1144 _heapinfo[block].free.next
1145 = _heapinfo[_heapinfo[block].free.next].free.next;
1146 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1147 --_chunks_free;
1148 }
1149
1150 /* How many trailing free blocks are there now? */
1151 blocks = _heapinfo[block].free.size;
1152
1153 /* Where is the current end of accessible core? */
1154 curbrk = (*__morecore) (0);
1155
1156 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1157 {
1158 /* The end of the malloc heap is at the end of accessible core.
1159 It's possible that moving _heapinfo will allow us to
1160 return some space to the system. */
1161
1162 __malloc_size_t info_block = BLOCK (_heapinfo);
1163 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1164 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1165 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1166 __malloc_size_t next_block = _heapinfo[block].free.next;
1167 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1168
1169 if (/* Win if this block being freed is last in core, the info table
1170 is just before it, the previous free block is just before the
1171 info table, and the two free blocks together form a useful
1172 amount to return to the system. */
1173 (block + blocks == _heaplimit &&
1174 info_block + info_blocks == block &&
1175 prev_block != 0 && prev_block + prev_blocks == info_block &&
1176 blocks + prev_blocks >= lesscore_threshold) ||
1177 /* Nope, not the case. We can also win if this block being
1178 freed is just before the info table, and the table extends
1179 to the end of core or is followed only by a free block,
1180 and the total free space is worth returning to the system. */
1181 (block + blocks == info_block &&
1182 ((info_block + info_blocks == _heaplimit &&
1183 blocks >= lesscore_threshold) ||
1184 (info_block + info_blocks == next_block &&
1185 next_block + next_blocks == _heaplimit &&
1186 blocks + next_blocks >= lesscore_threshold)))
1187 )
1188 {
1189 malloc_info *newinfo;
1190 __malloc_size_t oldlimit = _heaplimit;
1191
1192 /* Free the old info table, clearing _heaplimit to avoid
1193 recursion into this code. We don't want to return the
1194 table's blocks to the system before we have copied them to
1195 the new location. */
1196 _heaplimit = 0;
1197 _free_internal_nolock (_heapinfo);
1198 _heaplimit = oldlimit;
1199
1200 /* Tell malloc to search from the beginning of the heap for
1201 free blocks, so it doesn't reuse the ones just freed. */
1202 _heapindex = 0;
1203
1204 /* Allocate new space for the info table and move its data. */
1205 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1206 * BLOCKSIZE);
1207 PROTECT_MALLOC_STATE (0);
1208 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1209 _heapinfo = newinfo;
1210
1211 /* We should now have coalesced the free block with the
1212 blocks freed from the old info table. Examine the entire
1213 trailing free block to decide below whether to return some
1214 to the system. */
1215 block = _heapinfo[0].free.prev;
1216 blocks = _heapinfo[block].free.size;
1217 }
1218
1219 /* Now see if we can return stuff to the system. */
1220 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1221 {
1222 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1223 _heaplimit -= blocks;
1224 (*__morecore) (-bytes);
1225 _heapinfo[_heapinfo[block].free.prev].free.next
1226 = _heapinfo[block].free.next;
1227 _heapinfo[_heapinfo[block].free.next].free.prev
1228 = _heapinfo[block].free.prev;
1229 block = _heapinfo[block].free.prev;
1230 --_chunks_free;
1231 _bytes_free -= bytes;
1232 }
1233 }
1234
1235 /* Set the next search to begin at this block. */
1236 _heapindex = block;
1237 break;
1238
1239 default:
1240 /* Do some of the statistics. */
1241 --_chunks_used;
1242 _bytes_used -= 1 << type;
1243 ++_chunks_free;
1244 _bytes_free += 1 << type;
1245
1246 /* Get the address of the first free fragment in this block. */
1247 prev = (struct list *) ((char *) ADDRESS (block) +
1248 (_heapinfo[block].busy.info.frag.first << type));
1249
1250 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1251 {
1252 /* If all fragments of this block are free, remove them
1253 from the fragment list and free the whole block. */
1254 next = prev;
1255 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1256 next = next->next;
1257 prev->prev->next = next;
1258 if (next != NULL)
1259 next->prev = prev->prev;
1260 _heapinfo[block].busy.type = 0;
1261 _heapinfo[block].busy.info.size = 1;
1262
1263 /* Keep the statistics accurate. */
1264 ++_chunks_used;
1265 _bytes_used += BLOCKSIZE;
1266 _chunks_free -= BLOCKSIZE >> type;
1267 _bytes_free -= BLOCKSIZE;
1268
1269 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1270 _free_internal_nolock (ADDRESS (block));
1271 #else
1272 free (ADDRESS (block));
1273 #endif
1274 }
1275 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1276 {
1277 /* If some fragments of this block are free, link this
1278 fragment into the fragment list after the first free
1279 fragment of this block. */
1280 next = (struct list *) ptr;
1281 next->next = prev->next;
1282 next->prev = prev;
1283 prev->next = next;
1284 if (next->next != NULL)
1285 next->next->prev = next;
1286 ++_heapinfo[block].busy.info.frag.nfree;
1287 }
1288 else
1289 {
1290 /* No fragments of this block are free, so link this
1291 fragment into the fragment list and announce that
1292 it is the first free fragment of this block. */
1293 prev = (struct list *) ptr;
1294 _heapinfo[block].busy.info.frag.nfree = 1;
1295 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1296 ((unsigned long int) ((char *) ptr - (char *) NULL)
1297 % BLOCKSIZE >> type);
1298 prev->next = _fraghead[type].next;
1299 prev->prev = &_fraghead[type];
1300 prev->prev->next = prev;
1301 if (prev->next != NULL)
1302 prev->next->prev = prev;
1303 }
1304 break;
1305 }
1306
1307 PROTECT_MALLOC_STATE (1);
1308 }
1309
1310 /* Return memory to the heap.
1311 Like `free' but don't call a __free_hook if there is one. */
1312 void
1313 _free_internal (ptr)
1314 __ptr_t ptr;
1315 {
1316 LOCK ();
1317 _free_internal_nolock (ptr);
1318 UNLOCK ();
1319 }
1320
1321 /* Return memory to the heap. */
1322
1323 FREE_RETURN_TYPE
1324 free (ptr)
1325 __ptr_t ptr;
1326 {
1327 void (*hook) (__ptr_t) = __free_hook;
1328
1329 if (hook != NULL)
1330 (*hook) (ptr);
1331 else
1332 _free_internal (ptr);
1333 }
1334
1335 /* Define the `cfree' alias for `free'. */
1336 #ifdef weak_alias
1337 weak_alias (free, cfree)
1338 #else
1339 void
1340 cfree (ptr)
1341 __ptr_t ptr;
1342 {
1343 free (ptr);
1344 }
1345 #endif
1346 /* Change the size of a block allocated by `malloc'.
1347 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1348 Written May 1989 by Mike Haertel.
1349
1350 This library is free software; you can redistribute it and/or
1351 modify it under the terms of the GNU General Public License as
1352 published by the Free Software Foundation; either version 2 of the
1353 License, or (at your option) any later version.
1354
1355 This library is distributed in the hope that it will be useful,
1356 but WITHOUT ANY WARRANTY; without even the implied warranty of
1357 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1358 General Public License for more details.
1359
1360 You should have received a copy of the GNU General Public
1361 License along with this library; see the file COPYING. If
1362 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1363 Fifth Floor, Boston, MA 02110-1301, USA.
1364
1365 The author may be reached (Email) at the address mike@ai.mit.edu,
1366 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1367
1368 #ifndef _MALLOC_INTERNAL
1369 #define _MALLOC_INTERNAL
1370 #include <malloc.h>
1371 #endif
1372
1373
1374
1375 /* Cope with systems lacking `memmove'. */
1376 #if (defined (MEMMOVE_MISSING) || \
1377 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1378
1379 #ifdef emacs
1380 #undef __malloc_safe_bcopy
1381 #define __malloc_safe_bcopy safe_bcopy
1382 #else
1383
1384 /* Snarfed directly from Emacs src/dispnew.c:
1385 XXX Should use system bcopy if it handles overlap. */
1386
1387 /* Like bcopy except never gets confused by overlap. */
1388
1389 void
1390 __malloc_safe_bcopy (afrom, ato, size)
1391 __ptr_t afrom;
1392 __ptr_t ato;
1393 __malloc_size_t size;
1394 {
1395 char *from = afrom, *to = ato;
1396
1397 if (size <= 0 || from == to)
1398 return;
1399
1400 /* If the source and destination don't overlap, then bcopy can
1401 handle it. If they do overlap, but the destination is lower in
1402 memory than the source, we'll assume bcopy can handle that. */
1403 if (to < from || from + size <= to)
1404 bcopy (from, to, size);
1405
1406 /* Otherwise, we'll copy from the end. */
1407 else
1408 {
1409 register char *endf = from + size;
1410 register char *endt = to + size;
1411
1412 /* If TO - FROM is large, then we should break the copy into
1413 nonoverlapping chunks of TO - FROM bytes each. However, if
1414 TO - FROM is small, then the bcopy function call overhead
1415 makes this not worth it. The crossover point could be about
1416 anywhere. Since I don't think the obvious copy loop is too
1417 bad, I'm trying to err in its favor. */
1418 if (to - from < 64)
1419 {
1420 do
1421 *--endt = *--endf;
1422 while (endf != from);
1423 }
1424 else
1425 {
1426 for (;;)
1427 {
1428 endt -= (to - from);
1429 endf -= (to - from);
1430
1431 if (endt < to)
1432 break;
1433
1434 bcopy (endf, endt, to - from);
1435 }
1436
1437 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1438 little left over. The amount left over is
1439 (endt + (to - from)) - to, which is endt - from. */
1440 bcopy (from, to, endt - from);
1441 }
1442 }
1443 }
1444 #endif /* emacs */
1445
1446 #ifndef memmove
1447 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1448 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1449 #endif
1450
1451 #endif
1452
1453
1454 #define min(A, B) ((A) < (B) ? (A) : (B))
1455
1456 /* Debugging hook for realloc. */
1457 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1458
1459 /* Resize the given region to the new size, returning a pointer
1460 to the (possibly moved) region. This is optimized for speed;
1461 some benchmarks seem to indicate that greater compactness is
1462 achieved by unconditionally allocating and copying to a
1463 new region. This module has incestuous knowledge of the
1464 internals of both free and malloc. */
1465 __ptr_t
1466 _realloc_internal_nolock (ptr, size)
1467 __ptr_t ptr;
1468 __malloc_size_t size;
1469 {
1470 __ptr_t result;
1471 int type;
1472 __malloc_size_t block, blocks, oldlimit;
1473
1474 if (size == 0)
1475 {
1476 _free_internal_nolock (ptr);
1477 return _malloc_internal_nolock (0);
1478 }
1479 else if (ptr == NULL)
1480 return _malloc_internal_nolock (size);
1481
1482 block = BLOCK (ptr);
1483
1484 PROTECT_MALLOC_STATE (0);
1485
1486 type = _heapinfo[block].busy.type;
1487 switch (type)
1488 {
1489 case 0:
1490 /* Maybe reallocate a large block to a small fragment. */
1491 if (size <= BLOCKSIZE / 2)
1492 {
1493 result = _malloc_internal_nolock (size);
1494 if (result != NULL)
1495 {
1496 memcpy (result, ptr, size);
1497 _free_internal_nolock (ptr);
1498 goto out;
1499 }
1500 }
1501
1502 /* The new size is a large allocation as well;
1503 see if we can hold it in place. */
1504 blocks = BLOCKIFY (size);
1505 if (blocks < _heapinfo[block].busy.info.size)
1506 {
1507 /* The new size is smaller; return
1508 excess memory to the free list. */
1509 _heapinfo[block + blocks].busy.type = 0;
1510 _heapinfo[block + blocks].busy.info.size
1511 = _heapinfo[block].busy.info.size - blocks;
1512 _heapinfo[block].busy.info.size = blocks;
1513 /* We have just created a new chunk by splitting a chunk in two.
1514 Now we will free this chunk; increment the statistics counter
1515 so it doesn't become wrong when _free_internal decrements it. */
1516 ++_chunks_used;
1517 _free_internal_nolock (ADDRESS (block + blocks));
1518 result = ptr;
1519 }
1520 else if (blocks == _heapinfo[block].busy.info.size)
1521 /* No size change necessary. */
1522 result = ptr;
1523 else
1524 {
1525 /* Won't fit, so allocate a new region that will.
1526 Free the old region first in case there is sufficient
1527 adjacent free space to grow without moving. */
1528 blocks = _heapinfo[block].busy.info.size;
1529 /* Prevent free from actually returning memory to the system. */
1530 oldlimit = _heaplimit;
1531 _heaplimit = 0;
1532 _free_internal_nolock (ptr);
1533 result = _malloc_internal_nolock (size);
1534 PROTECT_MALLOC_STATE (0);
1535 if (_heaplimit == 0)
1536 _heaplimit = oldlimit;
1537 if (result == NULL)
1538 {
1539 /* Now we're really in trouble. We have to unfree
1540 the thing we just freed. Unfortunately it might
1541 have been coalesced with its neighbors. */
1542 if (_heapindex == block)
1543 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1544 else
1545 {
1546 __ptr_t previous
1547 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1548 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1549 _free_internal_nolock (previous);
1550 }
1551 goto out;
1552 }
1553 if (ptr != result)
1554 memmove (result, ptr, blocks * BLOCKSIZE);
1555 }
1556 break;
1557
1558 default:
1559 /* Old size is a fragment; type is logarithm
1560 to base two of the fragment size. */
1561 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1562 size <= (__malloc_size_t) (1 << type))
1563 /* The new size is the same kind of fragment. */
1564 result = ptr;
1565 else
1566 {
1567 /* The new size is different; allocate a new space,
1568 and copy the lesser of the new size and the old. */
1569 result = _malloc_internal_nolock (size);
1570 if (result == NULL)
1571 goto out;
1572 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1573 _free_internal_nolock (ptr);
1574 }
1575 break;
1576 }
1577
1578 PROTECT_MALLOC_STATE (1);
1579 out:
1580 return result;
1581 }
1582
1583 __ptr_t
1584 _realloc_internal (ptr, size)
1585 __ptr_t ptr;
1586 __malloc_size_t size;
1587 {
1588 __ptr_t result;
1589
1590 LOCK();
1591 result = _realloc_internal_nolock (ptr, size);
1592 UNLOCK ();
1593
1594 return result;
1595 }
1596
1597 __ptr_t
1598 realloc (ptr, size)
1599 __ptr_t ptr;
1600 __malloc_size_t size;
1601 {
1602 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1603
1604 if (!__malloc_initialized && !__malloc_initialize ())
1605 return NULL;
1606
1607 hook = __realloc_hook;
1608 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1609 }
1610 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1611
1612 This library is free software; you can redistribute it and/or
1613 modify it under the terms of the GNU General Public License as
1614 published by the Free Software Foundation; either version 2 of the
1615 License, or (at your option) any later version.
1616
1617 This library is distributed in the hope that it will be useful,
1618 but WITHOUT ANY WARRANTY; without even the implied warranty of
1619 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1620 General Public License for more details.
1621
1622 You should have received a copy of the GNU General Public
1623 License along with this library; see the file COPYING. If
1624 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1625 Fifth Floor, Boston, MA 02110-1301, USA.
1626
1627 The author may be reached (Email) at the address mike@ai.mit.edu,
1628 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1629
1630 #ifndef _MALLOC_INTERNAL
1631 #define _MALLOC_INTERNAL
1632 #include <malloc.h>
1633 #endif
1634
1635 /* Allocate an array of NMEMB elements each SIZE bytes long.
1636 The entire array is initialized to zeros. */
1637 __ptr_t
1638 calloc (nmemb, size)
1639 register __malloc_size_t nmemb;
1640 register __malloc_size_t size;
1641 {
1642 register __ptr_t result = malloc (nmemb * size);
1643
1644 if (result != NULL)
1645 (void) memset (result, 0, nmemb * size);
1646
1647 return result;
1648 }
1649 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1650 This file is part of the GNU C Library.
1651
1652 The GNU C Library is free software; you can redistribute it and/or modify
1653 it under the terms of the GNU General Public License as published by
1654 the Free Software Foundation; either version 2, or (at your option)
1655 any later version.
1656
1657 The GNU C Library is distributed in the hope that it will be useful,
1658 but WITHOUT ANY WARRANTY; without even the implied warranty of
1659 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1660 GNU General Public License for more details.
1661
1662 You should have received a copy of the GNU General Public License
1663 along with the GNU C Library; see the file COPYING. If not, write to
1664 the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
1665 MA 02110-1301, USA. */
1666
1667 #ifndef _MALLOC_INTERNAL
1668 #define _MALLOC_INTERNAL
1669 #include <malloc.h>
1670 #endif
1671
1672 #ifndef __GNU_LIBRARY__
1673 #define __sbrk sbrk
1674 #endif
1675
1676 #ifdef __GNU_LIBRARY__
1677 /* It is best not to declare this and cast its result on foreign operating
1678 systems with potentially hostile include files. */
1679
1680 #include <stddef.h>
1681 extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1682 #endif
1683
1684 #ifndef NULL
1685 #define NULL 0
1686 #endif
1687
1688 /* Allocate INCREMENT more bytes of data space,
1689 and return the start of data space, or NULL on errors.
1690 If INCREMENT is negative, shrink data space. */
1691 __ptr_t
1692 __default_morecore (increment)
1693 __malloc_ptrdiff_t increment;
1694 {
1695 __ptr_t result;
1696 #if defined(CYGWIN)
1697 if (!bss_sbrk_did_unexec)
1698 {
1699 return bss_sbrk (increment);
1700 }
1701 #endif
1702 result = (__ptr_t) __sbrk (increment);
1703 if (result == (__ptr_t) -1)
1704 return NULL;
1705 return result;
1706 }
1707 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1708
1709 This library is free software; you can redistribute it and/or
1710 modify it under the terms of the GNU General Public License as
1711 published by the Free Software Foundation; either version 2 of the
1712 License, or (at your option) any later version.
1713
1714 This library is distributed in the hope that it will be useful,
1715 but WITHOUT ANY WARRANTY; without even the implied warranty of
1716 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1717 General Public License for more details.
1718
1719 You should have received a copy of the GNU General Public
1720 License along with this library; see the file COPYING. If
1721 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1722 Fifth Floor, Boston, MA 02110-1301, USA. */
1723
1724 #ifndef _MALLOC_INTERNAL
1725 #define _MALLOC_INTERNAL
1726 #include <malloc.h>
1727 #endif
1728
1729 #if __DJGPP__ - 0 == 1
1730
1731 /* There is some problem with memalign in DJGPP v1 and we are supposed
1732 to omit it. Noone told me why, they just told me to do it. */
1733
1734 #else
1735
1736 __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1737 __malloc_size_t __alignment));
1738
1739 __ptr_t
1740 memalign (alignment, size)
1741 __malloc_size_t alignment;
1742 __malloc_size_t size;
1743 {
1744 __ptr_t result;
1745 unsigned long int adj, lastadj;
1746 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1747
1748 if (hook)
1749 return (*hook) (alignment, size);
1750
1751 /* Allocate a block with enough extra space to pad the block with up to
1752 (ALIGNMENT - 1) bytes if necessary. */
1753 result = malloc (size + alignment - 1);
1754 if (result == NULL)
1755 return NULL;
1756
1757 /* Figure out how much we will need to pad this particular block
1758 to achieve the required alignment. */
1759 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1760
1761 do
1762 {
1763 /* Reallocate the block with only as much excess as it needs. */
1764 free (result);
1765 result = malloc (adj + size);
1766 if (result == NULL) /* Impossible unless interrupted. */
1767 return NULL;
1768
1769 lastadj = adj;
1770 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1771 /* It's conceivable we might have been so unlucky as to get a
1772 different block with weaker alignment. If so, this block is too
1773 short to contain SIZE after alignment correction. So we must
1774 try again and get another block, slightly larger. */
1775 } while (adj > lastadj);
1776
1777 if (adj != 0)
1778 {
1779 /* Record this block in the list of aligned blocks, so that `free'
1780 can identify the pointer it is passed, which will be in the middle
1781 of an allocated block. */
1782
1783 struct alignlist *l;
1784 LOCK_ALIGNED_BLOCKS ();
1785 for (l = _aligned_blocks; l != NULL; l = l->next)
1786 if (l->aligned == NULL)
1787 /* This slot is free. Use it. */
1788 break;
1789 if (l == NULL)
1790 {
1791 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1792 if (l != NULL)
1793 {
1794 l->next = _aligned_blocks;
1795 _aligned_blocks = l;
1796 }
1797 }
1798 if (l != NULL)
1799 {
1800 l->exact = result;
1801 result = l->aligned = (char *) result + alignment - adj;
1802 }
1803 UNLOCK_ALIGNED_BLOCKS ();
1804 if (l == NULL)
1805 {
1806 free (result);
1807 result = NULL;
1808 }
1809 }
1810
1811 return result;
1812 }
1813
1814 #endif /* Not DJGPP v1 */
1815 /* Allocate memory on a page boundary.
1816 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1817
1818 This library is free software; you can redistribute it and/or
1819 modify it under the terms of the GNU General Public License as
1820 published by the Free Software Foundation; either version 2 of the
1821 License, or (at your option) any later version.
1822
1823 This library is distributed in the hope that it will be useful,
1824 but WITHOUT ANY WARRANTY; without even the implied warranty of
1825 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1826 General Public License for more details.
1827
1828 You should have received a copy of the GNU General Public
1829 License along with this library; see the file COPYING. If
1830 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1831 Fifth Floor, Boston, MA 02110-1301, USA.
1832
1833 The author may be reached (Email) at the address mike@ai.mit.edu,
1834 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1835
1836 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1837
1838 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1839 on MSDOS, where it conflicts with a system header file. */
1840
1841 #define ELIDE_VALLOC
1842
1843 #endif
1844
1845 #ifndef ELIDE_VALLOC
1846
1847 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1848 #include <stddef.h>
1849 #include <sys/cdefs.h>
1850 #if defined (__GLIBC__) && __GLIBC__ >= 2
1851 /* __getpagesize is already declared in <unistd.h> with return type int */
1852 #else
1853 extern size_t __getpagesize PP ((void));
1854 #endif
1855 #else
1856 #include "getpagesize.h"
1857 #define __getpagesize() getpagesize()
1858 #endif
1859
1860 #ifndef _MALLOC_INTERNAL
1861 #define _MALLOC_INTERNAL
1862 #include <malloc.h>
1863 #endif
1864
1865 static __malloc_size_t pagesize;
1866
1867 __ptr_t
1868 valloc (size)
1869 __malloc_size_t size;
1870 {
1871 if (pagesize == 0)
1872 pagesize = __getpagesize ();
1873
1874 return memalign (pagesize, size);
1875 }
1876
1877 #endif /* Not ELIDE_VALLOC. */
1878
1879 #ifdef GC_MCHECK
1880
1881 /* Standard debugging hooks for `malloc'.
1882 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1883 Written May 1989 by Mike Haertel.
1884
1885 This library is free software; you can redistribute it and/or
1886 modify it under the terms of the GNU General Public License as
1887 published by the Free Software Foundation; either version 2 of the
1888 License, or (at your option) any later version.
1889
1890 This library is distributed in the hope that it will be useful,
1891 but WITHOUT ANY WARRANTY; without even the implied warranty of
1892 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1893 General Public License for more details.
1894
1895 You should have received a copy of the GNU General Public
1896 License along with this library; see the file COPYING. If
1897 not, write to the Free Software Foundation, Inc., 51 Franklin Street,
1898 Fifth Floor, Boston, MA 02110-1301, USA.
1899
1900 The author may be reached (Email) at the address mike@ai.mit.edu,
1901 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1902
1903 #ifdef emacs
1904 #include <stdio.h>
1905 #else
1906 #ifndef _MALLOC_INTERNAL
1907 #define _MALLOC_INTERNAL
1908 #include <malloc.h>
1909 #include <stdio.h>
1910 #endif
1911 #endif
1912
1913 /* Old hook values. */
1914 static void (*old_free_hook) __P ((__ptr_t ptr));
1915 static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1916 static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1917
1918 /* Function to call when something awful happens. */
1919 static void (*abortfunc) __P ((enum mcheck_status));
1920
1921 /* Arbitrary magical numbers. */
1922 #define MAGICWORD 0xfedabeeb
1923 #define MAGICFREE 0xd8675309
1924 #define MAGICBYTE ((char) 0xd7)
1925 #define MALLOCFLOOD ((char) 0x93)
1926 #define FREEFLOOD ((char) 0x95)
1927
1928 struct hdr
1929 {
1930 __malloc_size_t size; /* Exact size requested by user. */
1931 unsigned long int magic; /* Magic number to check header integrity. */
1932 };
1933
1934 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1935 #define flood memset
1936 #else
1937 static void flood __P ((__ptr_t, int, __malloc_size_t));
1938 static void
1939 flood (ptr, val, size)
1940 __ptr_t ptr;
1941 int val;
1942 __malloc_size_t size;
1943 {
1944 char *cp = ptr;
1945 while (size--)
1946 *cp++ = val;
1947 }
1948 #endif
1949
1950 static enum mcheck_status checkhdr __P ((const struct hdr *));
1951 static enum mcheck_status
1952 checkhdr (hdr)
1953 const struct hdr *hdr;
1954 {
1955 enum mcheck_status status;
1956 switch (hdr->magic)
1957 {
1958 default:
1959 status = MCHECK_HEAD;
1960 break;
1961 case MAGICFREE:
1962 status = MCHECK_FREE;
1963 break;
1964 case MAGICWORD:
1965 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1966 status = MCHECK_TAIL;
1967 else
1968 status = MCHECK_OK;
1969 break;
1970 }
1971 if (status != MCHECK_OK)
1972 (*abortfunc) (status);
1973 return status;
1974 }
1975
1976 static void freehook __P ((__ptr_t));
1977 static void
1978 freehook (ptr)
1979 __ptr_t ptr;
1980 {
1981 struct hdr *hdr;
1982
1983 if (ptr)
1984 {
1985 hdr = ((struct hdr *) ptr) - 1;
1986 checkhdr (hdr);
1987 hdr->magic = MAGICFREE;
1988 flood (ptr, FREEFLOOD, hdr->size);
1989 }
1990 else
1991 hdr = NULL;
1992
1993 __free_hook = old_free_hook;
1994 free (hdr);
1995 __free_hook = freehook;
1996 }
1997
1998 static __ptr_t mallochook __P ((__malloc_size_t));
1999 static __ptr_t
2000 mallochook (size)
2001 __malloc_size_t size;
2002 {
2003 struct hdr *hdr;
2004
2005 __malloc_hook = old_malloc_hook;
2006 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
2007 __malloc_hook = mallochook;
2008 if (hdr == NULL)
2009 return NULL;
2010
2011 hdr->size = size;
2012 hdr->magic = MAGICWORD;
2013 ((char *) &hdr[1])[size] = MAGICBYTE;
2014 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
2015 return (__ptr_t) (hdr + 1);
2016 }
2017
2018 static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
2019 static __ptr_t
2020 reallochook (ptr, size)
2021 __ptr_t ptr;
2022 __malloc_size_t size;
2023 {
2024 struct hdr *hdr = NULL;
2025 __malloc_size_t osize = 0;
2026
2027 if (ptr)
2028 {
2029 hdr = ((struct hdr *) ptr) - 1;
2030 osize = hdr->size;
2031
2032 checkhdr (hdr);
2033 if (size < osize)
2034 flood ((char *) ptr + size, FREEFLOOD, osize - size);
2035 }
2036
2037 __free_hook = old_free_hook;
2038 __malloc_hook = old_malloc_hook;
2039 __realloc_hook = old_realloc_hook;
2040 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
2041 __free_hook = freehook;
2042 __malloc_hook = mallochook;
2043 __realloc_hook = reallochook;
2044 if (hdr == NULL)
2045 return NULL;
2046
2047 hdr->size = size;
2048 hdr->magic = MAGICWORD;
2049 ((char *) &hdr[1])[size] = MAGICBYTE;
2050 if (size > osize)
2051 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
2052 return (__ptr_t) (hdr + 1);
2053 }
2054
2055 static void
2056 mabort (status)
2057 enum mcheck_status status;
2058 {
2059 const char *msg;
2060 switch (status)
2061 {
2062 case MCHECK_OK:
2063 msg = "memory is consistent, library is buggy";
2064 break;
2065 case MCHECK_HEAD:
2066 msg = "memory clobbered before allocated block";
2067 break;
2068 case MCHECK_TAIL:
2069 msg = "memory clobbered past end of allocated block";
2070 break;
2071 case MCHECK_FREE:
2072 msg = "block freed twice";
2073 break;
2074 default:
2075 msg = "bogus mcheck_status, library is buggy";
2076 break;
2077 }
2078 #ifdef __GNU_LIBRARY__
2079 __libc_fatal (msg);
2080 #else
2081 fprintf (stderr, "mcheck: %s\n", msg);
2082 fflush (stderr);
2083 abort ();
2084 #endif
2085 }
2086
2087 static int mcheck_used = 0;
2088
2089 int
2090 mcheck (func)
2091 void (*func) __P ((enum mcheck_status));
2092 {
2093 abortfunc = (func != NULL) ? func : &mabort;
2094
2095 /* These hooks may not be safely inserted if malloc is already in use. */
2096 if (!__malloc_initialized && !mcheck_used)
2097 {
2098 old_free_hook = __free_hook;
2099 __free_hook = freehook;
2100 old_malloc_hook = __malloc_hook;
2101 __malloc_hook = mallochook;
2102 old_realloc_hook = __realloc_hook;
2103 __realloc_hook = reallochook;
2104 mcheck_used = 1;
2105 }
2106
2107 return mcheck_used ? 0 : -1;
2108 }
2109
2110 enum mcheck_status
2111 mprobe (__ptr_t ptr)
2112 {
2113 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2114 }
2115
2116 #endif /* GC_MCHECK */
2117
2118 /* arch-tag: 93dce5c0-f49a-41b5-86b1-f91c4169c02e
2119 (do not change this comment) */