]> code.delx.au - gnu-emacs/blob - src/gmalloc.c
Standardize on ASCII without @sc.
[gnu-emacs] / src / gmalloc.c
1 /* This file is no longer automatically generated from libc. */
2
3 #define _MALLOC_INTERNAL
4
5 /* The malloc headers and source files from the C library follow here. */
6
7 /* Declarations for `malloc' and friends.
8 Copyright 1990, 91, 92, 93, 95, 96, 99 Free Software Foundation, Inc.
9 Written May 1989 by Mike Haertel.
10
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Library General Public License as
13 published by the Free Software Foundation; either version 2 of the
14 License, or (at your option) any later version.
15
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Library General Public License for more details.
20
21 You should have received a copy of the GNU Library General Public
22 License along with this library; see the file COPYING.LIB. If
23 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
24 Cambridge, MA 02139, USA.
25
26 The author may be reached (Email) at the address mike@ai.mit.edu,
27 or (US mail) as Mike Haertel c/o Free Software Foundation. */
28
29 #ifndef _MALLOC_H
30
31 #define _MALLOC_H 1
32
33 #ifdef _MALLOC_INTERNAL
34
35 #ifdef HAVE_CONFIG_H
36 #include <config.h>
37 #endif
38
39 #if defined __cplusplus || (defined (__STDC__) && __STDC__) || \
40 defined STDC_HEADERS || defined PROTOTYPES
41 #undef PP
42 #define PP(args) args
43 #undef __ptr_t
44 #define __ptr_t void *
45 #else /* Not C++ or ANSI C. */
46 #undef PP
47 #define PP(args) ()
48 #undef __ptr_t
49 #define __ptr_t char *
50 #endif /* C++ or ANSI C. */
51
52 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
53 #include <string.h>
54 #else
55 #ifndef memset
56 #define memset(s, zero, n) bzero ((s), (n))
57 #endif
58 #ifndef memcpy
59 #define memcpy(d, s, n) bcopy ((s), (d), (n))
60 #endif
61 #endif
62
63 #ifdef HAVE_LIMITS_H
64 #include <limits.h>
65 #endif
66 #ifndef CHAR_BIT
67 #define CHAR_BIT 8
68 #endif
69
70 #ifdef HAVE_UNISTD_H
71 #include <unistd.h>
72 #endif
73
74 #endif /* _MALLOC_INTERNAL. */
75
76
77 #ifdef __cplusplus
78 extern "C"
79 {
80 #endif
81
82 #ifdef STDC_HEADERS
83 #include <stddef.h>
84 #define __malloc_size_t size_t
85 #define __malloc_ptrdiff_t ptrdiff_t
86 #else
87 #ifdef __GNUC__
88 #include <stddef.h>
89 #ifdef __SIZE_TYPE__
90 #define __malloc_size_t __SIZE_TYPE__
91 #endif
92 #endif
93 #ifndef __malloc_size_t
94 #define __malloc_size_t unsigned int
95 #endif
96 #define __malloc_ptrdiff_t int
97 #endif
98
99 #ifndef NULL
100 #define NULL 0
101 #endif
102
103 #ifndef FREE_RETURN_TYPE
104 #define FREE_RETURN_TYPE void
105 #endif
106
107
108 /* Allocate SIZE bytes of memory. */
109 extern __ptr_t malloc PP ((__malloc_size_t __size));
110 /* Re-allocate the previously allocated block
111 in __ptr_t, making the new block SIZE bytes long. */
112 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size));
113 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
114 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size));
115 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
116 extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr));
117
118 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
119 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
120 extern __ptr_t memalign PP ((__malloc_size_t __alignment,
121 __malloc_size_t __size));
122 #endif
123
124 /* Allocate SIZE bytes on a page boundary. */
125 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
126 extern __ptr_t valloc PP ((__malloc_size_t __size));
127 #endif
128
129
130 #ifdef _MALLOC_INTERNAL
131
132 /* The allocator divides the heap into blocks of fixed size; large
133 requests receive one or more whole blocks, and small requests
134 receive a fragment of a block. Fragment sizes are powers of two,
135 and all fragments of a block are the same size. When all the
136 fragments in a block have been freed, the block itself is freed. */
137 #define INT_BIT (CHAR_BIT * sizeof(int))
138 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
139 #define BLOCKSIZE (1 << BLOCKLOG)
140 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
141
142 /* Determine the amount of memory spanned by the initial heap table
143 (not an absolute limit). */
144 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
145
146 /* Number of contiguous free blocks allowed to build up at the end of
147 memory before they will be returned to the system. */
148 #define FINAL_FREE_BLOCKS 8
149
150 /* Data structure giving per-block information. */
151 typedef union
152 {
153 /* Heap information for a busy block. */
154 struct
155 {
156 /* Zero for a large (multiblock) object, or positive giving the
157 logarithm to the base two of the fragment size. */
158 int type;
159 union
160 {
161 struct
162 {
163 __malloc_size_t nfree; /* Free frags in a fragmented block. */
164 __malloc_size_t first; /* First free fragment of the block. */
165 } frag;
166 /* For a large object, in its first block, this has the number
167 of blocks in the object. In the other blocks, this has a
168 negative number which says how far back the first block is. */
169 __malloc_ptrdiff_t size;
170 } info;
171 } busy;
172 /* Heap information for a free block
173 (that may be the first of a free cluster). */
174 struct
175 {
176 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
177 __malloc_size_t next; /* Index of next free cluster. */
178 __malloc_size_t prev; /* Index of previous free cluster. */
179 } free;
180 } malloc_info;
181
182 /* Pointer to first block of the heap. */
183 extern char *_heapbase;
184
185 /* Table indexed by block number giving per-block information. */
186 extern malloc_info *_heapinfo;
187
188 /* Address to block number and vice versa. */
189 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
190 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
191
192 /* Current search index for the heap table. */
193 extern __malloc_size_t _heapindex;
194
195 /* Limit of valid info table indices. */
196 extern __malloc_size_t _heaplimit;
197
198 /* Doubly linked lists of free fragments. */
199 struct list
200 {
201 struct list *next;
202 struct list *prev;
203 };
204
205 /* Free list headers for each fragment size. */
206 extern struct list _fraghead[];
207
208 /* List of blocks allocated with `memalign' (or `valloc'). */
209 struct alignlist
210 {
211 struct alignlist *next;
212 __ptr_t aligned; /* The address that memaligned returned. */
213 __ptr_t exact; /* The address that malloc returned. */
214 };
215 extern struct alignlist *_aligned_blocks;
216
217 /* Instrumentation. */
218 extern __malloc_size_t _chunks_used;
219 extern __malloc_size_t _bytes_used;
220 extern __malloc_size_t _chunks_free;
221 extern __malloc_size_t _bytes_free;
222
223 /* Internal versions of `malloc', `realloc', and `free'
224 used when these functions need to call each other.
225 They are the same but don't call the hooks. */
226 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
227 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
228 extern void _free_internal PP ((__ptr_t __ptr));
229
230 #endif /* _MALLOC_INTERNAL. */
231
232 /* Given an address in the middle of a malloc'd object,
233 return the address of the beginning of the object. */
234 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr));
235
236 /* Underlying allocation function; successive calls should
237 return contiguous pieces of memory. */
238 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size));
239
240 /* Default value of `__morecore'. */
241 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size));
242
243 /* If not NULL, this function is called after each time
244 `__morecore' is called to increase the data size. */
245 extern void (*__after_morecore_hook) PP ((void));
246
247 /* Number of extra blocks to get each time we ask for more core.
248 This reduces the frequency of calling `(*__morecore)'. */
249 extern __malloc_size_t __malloc_extra_blocks;
250
251 /* Nonzero if `malloc' has been called and done its initialization. */
252 extern int __malloc_initialized;
253 /* Function called to initialize malloc data structures. */
254 extern int __malloc_initialize PP ((void));
255
256 /* Hooks for debugging versions. */
257 extern void (*__malloc_initialize_hook) PP ((void));
258 extern void (*__free_hook) PP ((__ptr_t __ptr));
259 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
260 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
261 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
262 __malloc_size_t __alignment));
263
264 /* Return values for `mprobe': these are the kinds of inconsistencies that
265 `mcheck' enables detection of. */
266 enum mcheck_status
267 {
268 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
269 MCHECK_OK, /* Block is fine. */
270 MCHECK_FREE, /* Block freed twice. */
271 MCHECK_HEAD, /* Memory before the block was clobbered. */
272 MCHECK_TAIL /* Memory after the block was clobbered. */
273 };
274
275 /* Activate a standard collection of debugging hooks. This must be called
276 before `malloc' is ever called. ABORTFUNC is called with an error code
277 (see enum above) when an inconsistency is detected. If ABORTFUNC is
278 null, the standard function prints on stderr and then calls `abort'. */
279 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status))));
280
281 /* Check for aberrations in a particular malloc'd block. You must have
282 called `mcheck' already. These are the same checks that `mcheck' does
283 when you free or reallocate a block. */
284 extern enum mcheck_status mprobe PP ((__ptr_t __ptr));
285
286 /* Activate a standard collection of tracing hooks. */
287 extern void mtrace PP ((void));
288 extern void muntrace PP ((void));
289
290 /* Statistics available to the user. */
291 struct mstats
292 {
293 __malloc_size_t bytes_total; /* Total size of the heap. */
294 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
295 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
296 __malloc_size_t chunks_free; /* Chunks in the free list. */
297 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
298 };
299
300 /* Pick up the current statistics. */
301 extern struct mstats mstats PP ((void));
302
303 /* Call WARNFUN with a warning message when memory usage is high. */
304 extern void memory_warnings PP ((__ptr_t __start,
305 void (*__warnfun) PP ((const char *))));
306
307
308 /* Relocating allocator. */
309
310 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
311 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
312
313 /* Free the storage allocated in HANDLEPTR. */
314 extern void r_alloc_free PP ((__ptr_t *__handleptr));
315
316 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
317 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size));
318
319
320 #ifdef __cplusplus
321 }
322 #endif
323
324 #endif /* malloc.h */
325 /* Memory allocator `malloc'.
326 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
327 Written May 1989 by Mike Haertel.
328
329 This library is free software; you can redistribute it and/or
330 modify it under the terms of the GNU Library General Public License as
331 published by the Free Software Foundation; either version 2 of the
332 License, or (at your option) any later version.
333
334 This library is distributed in the hope that it will be useful,
335 but WITHOUT ANY WARRANTY; without even the implied warranty of
336 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
337 Library General Public License for more details.
338
339 You should have received a copy of the GNU Library General Public
340 License along with this library; see the file COPYING.LIB. If
341 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
342 Cambridge, MA 02139, USA.
343
344 The author may be reached (Email) at the address mike@ai.mit.edu,
345 or (US mail) as Mike Haertel c/o Free Software Foundation. */
346
347 #ifndef _MALLOC_INTERNAL
348 #define _MALLOC_INTERNAL
349 #include <malloc.h>
350 #endif
351 #include <errno.h>
352
353 /* How to really get more memory. */
354 __ptr_t (*__morecore) PP ((ptrdiff_t __size)) = __default_morecore;
355
356 /* Debugging hook for `malloc'. */
357 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size));
358
359 /* Pointer to the base of the first block. */
360 char *_heapbase;
361
362 /* Block information table. Allocated with align/__free (not malloc/free). */
363 malloc_info *_heapinfo;
364
365 /* Number of info entries. */
366 static __malloc_size_t heapsize;
367
368 /* Search index in the info table. */
369 __malloc_size_t _heapindex;
370
371 /* Limit of valid info table indices. */
372 __malloc_size_t _heaplimit;
373
374 /* Free lists for each fragment size. */
375 struct list _fraghead[BLOCKLOG];
376
377 /* Instrumentation. */
378 __malloc_size_t _chunks_used;
379 __malloc_size_t _bytes_used;
380 __malloc_size_t _chunks_free;
381 __malloc_size_t _bytes_free;
382
383 /* Are you experienced? */
384 int __malloc_initialized;
385
386 __malloc_size_t __malloc_extra_blocks;
387
388 void (*__malloc_initialize_hook) PP ((void));
389 void (*__after_morecore_hook) PP ((void));
390
391 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
392
393 /* Some code for hunting a bug writing into _heapinfo.
394
395 Call this macro with argument PROT non-zero to protect internal
396 malloc state against writing to it, call it with a zero argument to
397 make it readable and writable.
398
399 Note that this only works if BLOCKSIZE == page size, which is
400 the case on the i386. */
401
402 #include <sys/types.h>
403 #include <sys/mman.h>
404
405 static int state_protected_p;
406 static __malloc_size_t last_state_size;
407 static malloc_info *last_heapinfo;
408
409 void
410 protect_malloc_state (protect_p)
411 int protect_p;
412 {
413 /* If _heapinfo has been relocated, make sure its old location
414 isn't left read-only; it will be reused by malloc. */
415 if (_heapinfo != last_heapinfo
416 && last_heapinfo
417 && state_protected_p)
418 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
419
420 last_state_size = _heaplimit * sizeof *_heapinfo;
421 last_heapinfo = _heapinfo;
422
423 if (protect_p != state_protected_p)
424 {
425 state_protected_p = protect_p;
426 if (mprotect (_heapinfo, last_state_size,
427 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
428 abort ();
429 }
430 }
431
432 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
433
434 #else
435 #define PROTECT_MALLOC_STATE(PROT) /* empty */
436 #endif
437
438
439 /* Aligned allocation. */
440 static __ptr_t align PP ((__malloc_size_t));
441 static __ptr_t
442 align (size)
443 __malloc_size_t size;
444 {
445 __ptr_t result;
446 unsigned long int adj;
447
448 /* align accepts an unsigned argument, but __morecore accepts a
449 signed one. This could lead to trouble if SIZE overflows a
450 signed int type accepted by __morecore. We just punt in that
451 case, since they are requesting a ludicrous amount anyway. */
452 if ((__malloc_ptrdiff_t)size < 0)
453 result = 0;
454 else
455 result = (*__morecore) (size);
456 adj = (unsigned long int) ((unsigned long int) ((char *) result -
457 (char *) NULL)) % BLOCKSIZE;
458 if (adj != 0)
459 {
460 __ptr_t new;
461 adj = BLOCKSIZE - adj;
462 new = (*__morecore) (adj);
463 result = (char *) result + adj;
464 }
465
466 if (__after_morecore_hook)
467 (*__after_morecore_hook) ();
468
469 return result;
470 }
471
472 /* Get SIZE bytes, if we can get them starting at END.
473 Return the address of the space we got.
474 If we cannot get space at END, fail and return 0. */
475 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t));
476 static __ptr_t
477 get_contiguous_space (size, position)
478 __malloc_ptrdiff_t size;
479 __ptr_t position;
480 {
481 __ptr_t before;
482 __ptr_t after;
483
484 before = (*__morecore) (0);
485 /* If we can tell in advance that the break is at the wrong place,
486 fail now. */
487 if (before != position)
488 return 0;
489
490 /* Allocate SIZE bytes and get the address of them. */
491 after = (*__morecore) (size);
492 if (!after)
493 return 0;
494
495 /* It was not contiguous--reject it. */
496 if (after != position)
497 {
498 (*__morecore) (- size);
499 return 0;
500 }
501
502 return after;
503 }
504
505
506 /* This is called when `_heapinfo' and `heapsize' have just
507 been set to describe a new info table. Set up the table
508 to describe itself and account for it in the statistics. */
509 static void register_heapinfo PP ((void));
510 #ifdef __GNUC__
511 __inline__
512 #endif
513 static void
514 register_heapinfo ()
515 {
516 __malloc_size_t block, blocks;
517
518 block = BLOCK (_heapinfo);
519 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
520
521 /* Account for the _heapinfo block itself in the statistics. */
522 _bytes_used += blocks * BLOCKSIZE;
523 ++_chunks_used;
524
525 /* Describe the heapinfo block itself in the heapinfo. */
526 _heapinfo[block].busy.type = 0;
527 _heapinfo[block].busy.info.size = blocks;
528 /* Leave back-pointers for malloc_find_address. */
529 while (--blocks > 0)
530 _heapinfo[block + blocks].busy.info.size = -blocks;
531 }
532
533 /* Set everything up and remember that we have. */
534 int
535 __malloc_initialize ()
536 {
537 if (__malloc_initialized)
538 return 0;
539
540 #ifdef GC_MCHECK
541 mcheck (NULL);
542 #endif
543
544 if (__malloc_initialize_hook)
545 (*__malloc_initialize_hook) ();
546
547 heapsize = HEAP / BLOCKSIZE;
548 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
549 if (_heapinfo == NULL)
550 return 0;
551 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
552 _heapinfo[0].free.size = 0;
553 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
554 _heapindex = 0;
555 _heapbase = (char *) _heapinfo;
556 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
557
558 register_heapinfo ();
559
560 __malloc_initialized = 1;
561 PROTECT_MALLOC_STATE (1);
562 return 1;
563 }
564
565 static int morecore_recursing;
566
567 /* Get neatly aligned memory, initializing or
568 growing the heap info table as necessary. */
569 static __ptr_t morecore PP ((__malloc_size_t));
570 static __ptr_t
571 morecore (size)
572 __malloc_size_t size;
573 {
574 __ptr_t result;
575 malloc_info *newinfo, *oldinfo;
576 __malloc_size_t newsize;
577
578 if (morecore_recursing)
579 /* Avoid recursion. The caller will know how to handle a null return. */
580 return NULL;
581
582 result = align (size);
583 if (result == NULL)
584 return NULL;
585
586 PROTECT_MALLOC_STATE (0);
587
588 /* Check if we need to grow the info table. */
589 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
590 {
591 /* Calculate the new _heapinfo table size. We do not account for the
592 added blocks in the table itself, as we hope to place them in
593 existing free space, which is already covered by part of the
594 existing table. */
595 newsize = heapsize;
596 do
597 newsize *= 2;
598 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
599
600 /* We must not reuse existing core for the new info table when called
601 from realloc in the case of growing a large block, because the
602 block being grown is momentarily marked as free. In this case
603 _heaplimit is zero so we know not to reuse space for internal
604 allocation. */
605 if (_heaplimit != 0)
606 {
607 /* First try to allocate the new info table in core we already
608 have, in the usual way using realloc. If realloc cannot
609 extend it in place or relocate it to existing sufficient core,
610 we will get called again, and the code above will notice the
611 `morecore_recursing' flag and return null. */
612 int save = errno; /* Don't want to clobber errno with ENOMEM. */
613 morecore_recursing = 1;
614 newinfo = (malloc_info *) _realloc_internal
615 (_heapinfo, newsize * sizeof (malloc_info));
616 morecore_recursing = 0;
617 if (newinfo == NULL)
618 errno = save;
619 else
620 {
621 /* We found some space in core, and realloc has put the old
622 table's blocks on the free list. Now zero the new part
623 of the table and install the new table location. */
624 memset (&newinfo[heapsize], 0,
625 (newsize - heapsize) * sizeof (malloc_info));
626 _heapinfo = newinfo;
627 heapsize = newsize;
628 goto got_heap;
629 }
630 }
631
632 /* Allocate new space for the malloc info table. */
633 while (1)
634 {
635 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
636
637 /* Did it fail? */
638 if (newinfo == NULL)
639 {
640 (*__morecore) (-size);
641 return NULL;
642 }
643
644 /* Is it big enough to record status for its own space?
645 If so, we win. */
646 if ((__malloc_size_t) BLOCK ((char *) newinfo
647 + newsize * sizeof (malloc_info))
648 < newsize)
649 break;
650
651 /* Must try again. First give back most of what we just got. */
652 (*__morecore) (- newsize * sizeof (malloc_info));
653 newsize *= 2;
654 }
655
656 /* Copy the old table to the beginning of the new,
657 and zero the rest of the new table. */
658 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
659 memset (&newinfo[heapsize], 0,
660 (newsize - heapsize) * sizeof (malloc_info));
661 oldinfo = _heapinfo;
662 _heapinfo = newinfo;
663 heapsize = newsize;
664
665 register_heapinfo ();
666
667 /* Reset _heaplimit so _free_internal never decides
668 it can relocate or resize the info table. */
669 _heaplimit = 0;
670 _free_internal (oldinfo);
671 PROTECT_MALLOC_STATE (0);
672
673 /* The new heap limit includes the new table just allocated. */
674 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
675 return result;
676 }
677
678 got_heap:
679 _heaplimit = BLOCK ((char *) result + size);
680 return result;
681 }
682
683 /* Allocate memory from the heap. */
684 __ptr_t
685 _malloc_internal (size)
686 __malloc_size_t size;
687 {
688 __ptr_t result;
689 __malloc_size_t block, blocks, lastblocks, start;
690 register __malloc_size_t i;
691 struct list *next;
692
693 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
694 valid address you can realloc and free (though not dereference).
695
696 It turns out that some extant code (sunrpc, at least Ultrix's version)
697 expects `malloc (0)' to return non-NULL and breaks otherwise.
698 Be compatible. */
699
700 #if 0
701 if (size == 0)
702 return NULL;
703 #endif
704
705 PROTECT_MALLOC_STATE (0);
706
707 if (size < sizeof (struct list))
708 size = sizeof (struct list);
709
710 #ifdef SUNOS_LOCALTIME_BUG
711 if (size < 16)
712 size = 16;
713 #endif
714
715 /* Determine the allocation policy based on the request size. */
716 if (size <= BLOCKSIZE / 2)
717 {
718 /* Small allocation to receive a fragment of a block.
719 Determine the logarithm to base two of the fragment size. */
720 register __malloc_size_t log = 1;
721 --size;
722 while ((size /= 2) != 0)
723 ++log;
724
725 /* Look in the fragment lists for a
726 free fragment of the desired size. */
727 next = _fraghead[log].next;
728 if (next != NULL)
729 {
730 /* There are free fragments of this size.
731 Pop a fragment out of the fragment list and return it.
732 Update the block's nfree and first counters. */
733 result = (__ptr_t) next;
734 next->prev->next = next->next;
735 if (next->next != NULL)
736 next->next->prev = next->prev;
737 block = BLOCK (result);
738 if (--_heapinfo[block].busy.info.frag.nfree != 0)
739 _heapinfo[block].busy.info.frag.first = (unsigned long int)
740 ((unsigned long int) ((char *) next->next - (char *) NULL)
741 % BLOCKSIZE) >> log;
742
743 /* Update the statistics. */
744 ++_chunks_used;
745 _bytes_used += 1 << log;
746 --_chunks_free;
747 _bytes_free -= 1 << log;
748 }
749 else
750 {
751 /* No free fragments of the desired size, so get a new block
752 and break it into fragments, returning the first. */
753 #ifdef GC_MALLOC_CHECK
754 result = _malloc_internal (BLOCKSIZE);
755 PROTECT_MALLOC_STATE (0);
756 #else
757 result = malloc (BLOCKSIZE);
758 #endif
759 if (result == NULL)
760 {
761 PROTECT_MALLOC_STATE (1);
762 return NULL;
763 }
764
765 /* Link all fragments but the first into the free list. */
766 next = (struct list *) ((char *) result + (1 << log));
767 next->next = NULL;
768 next->prev = &_fraghead[log];
769 _fraghead[log].next = next;
770
771 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
772 {
773 next = (struct list *) ((char *) result + (i << log));
774 next->next = _fraghead[log].next;
775 next->prev = &_fraghead[log];
776 next->prev->next = next;
777 next->next->prev = next;
778 }
779
780 /* Initialize the nfree and first counters for this block. */
781 block = BLOCK (result);
782 _heapinfo[block].busy.type = log;
783 _heapinfo[block].busy.info.frag.nfree = i - 1;
784 _heapinfo[block].busy.info.frag.first = i - 1;
785
786 _chunks_free += (BLOCKSIZE >> log) - 1;
787 _bytes_free += BLOCKSIZE - (1 << log);
788 _bytes_used -= BLOCKSIZE - (1 << log);
789 }
790 }
791 else
792 {
793 /* Large allocation to receive one or more blocks.
794 Search the free list in a circle starting at the last place visited.
795 If we loop completely around without finding a large enough
796 space we will have to get more memory from the system. */
797 blocks = BLOCKIFY (size);
798 start = block = _heapindex;
799 while (_heapinfo[block].free.size < blocks)
800 {
801 block = _heapinfo[block].free.next;
802 if (block == start)
803 {
804 /* Need to get more from the system. Get a little extra. */
805 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
806 block = _heapinfo[0].free.prev;
807 lastblocks = _heapinfo[block].free.size;
808 /* Check to see if the new core will be contiguous with the
809 final free block; if so we don't need to get as much. */
810 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
811 /* We can't do this if we will have to make the heap info
812 table bigger to accomodate the new space. */
813 block + wantblocks <= heapsize &&
814 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
815 ADDRESS (block + lastblocks)))
816 {
817 /* We got it contiguously. Which block we are extending
818 (the `final free block' referred to above) might have
819 changed, if it got combined with a freed info table. */
820 block = _heapinfo[0].free.prev;
821 _heapinfo[block].free.size += (wantblocks - lastblocks);
822 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
823 _heaplimit += wantblocks - lastblocks;
824 continue;
825 }
826 result = morecore (wantblocks * BLOCKSIZE);
827 if (result == NULL)
828 return NULL;
829 block = BLOCK (result);
830 /* Put the new block at the end of the free list. */
831 _heapinfo[block].free.size = wantblocks;
832 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
833 _heapinfo[block].free.next = 0;
834 _heapinfo[0].free.prev = block;
835 _heapinfo[_heapinfo[block].free.prev].free.next = block;
836 ++_chunks_free;
837 /* Now loop to use some of that block for this allocation. */
838 }
839 }
840
841 /* At this point we have found a suitable free list entry.
842 Figure out how to remove what we need from the list. */
843 result = ADDRESS (block);
844 if (_heapinfo[block].free.size > blocks)
845 {
846 /* The block we found has a bit left over,
847 so relink the tail end back into the free list. */
848 _heapinfo[block + blocks].free.size
849 = _heapinfo[block].free.size - blocks;
850 _heapinfo[block + blocks].free.next
851 = _heapinfo[block].free.next;
852 _heapinfo[block + blocks].free.prev
853 = _heapinfo[block].free.prev;
854 _heapinfo[_heapinfo[block].free.prev].free.next
855 = _heapinfo[_heapinfo[block].free.next].free.prev
856 = _heapindex = block + blocks;
857 }
858 else
859 {
860 /* The block exactly matches our requirements,
861 so just remove it from the list. */
862 _heapinfo[_heapinfo[block].free.next].free.prev
863 = _heapinfo[block].free.prev;
864 _heapinfo[_heapinfo[block].free.prev].free.next
865 = _heapindex = _heapinfo[block].free.next;
866 --_chunks_free;
867 }
868
869 _heapinfo[block].busy.type = 0;
870 _heapinfo[block].busy.info.size = blocks;
871 ++_chunks_used;
872 _bytes_used += blocks * BLOCKSIZE;
873 _bytes_free -= blocks * BLOCKSIZE;
874
875 /* Mark all the blocks of the object just allocated except for the
876 first with a negative number so you can find the first block by
877 adding that adjustment. */
878 while (--blocks > 0)
879 _heapinfo[block + blocks].busy.info.size = -blocks;
880 }
881
882 PROTECT_MALLOC_STATE (1);
883 return result;
884 }
885
886 __ptr_t
887 malloc (size)
888 __malloc_size_t size;
889 {
890 if (!__malloc_initialized && !__malloc_initialize ())
891 return NULL;
892
893 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
894 }
895 \f
896 #ifndef _LIBC
897
898 /* On some ANSI C systems, some libc functions call _malloc, _free
899 and _realloc. Make them use the GNU functions. */
900
901 __ptr_t
902 _malloc (size)
903 __malloc_size_t size;
904 {
905 return malloc (size);
906 }
907
908 void
909 _free (ptr)
910 __ptr_t ptr;
911 {
912 free (ptr);
913 }
914
915 __ptr_t
916 _realloc (ptr, size)
917 __ptr_t ptr;
918 __malloc_size_t size;
919 {
920 return realloc (ptr, size);
921 }
922
923 #endif
924 /* Free a block of memory allocated by `malloc'.
925 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
926 Written May 1989 by Mike Haertel.
927
928 This library is free software; you can redistribute it and/or
929 modify it under the terms of the GNU Library General Public License as
930 published by the Free Software Foundation; either version 2 of the
931 License, or (at your option) any later version.
932
933 This library is distributed in the hope that it will be useful,
934 but WITHOUT ANY WARRANTY; without even the implied warranty of
935 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
936 Library General Public License for more details.
937
938 You should have received a copy of the GNU Library General Public
939 License along with this library; see the file COPYING.LIB. If
940 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
941 Cambridge, MA 02139, USA.
942
943 The author may be reached (Email) at the address mike@ai.mit.edu,
944 or (US mail) as Mike Haertel c/o Free Software Foundation. */
945
946 #ifndef _MALLOC_INTERNAL
947 #define _MALLOC_INTERNAL
948 #include <malloc.h>
949 #endif
950
951
952 /* Cope with systems lacking `memmove'. */
953 #ifndef memmove
954 #if (defined (MEMMOVE_MISSING) || \
955 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
956 #ifdef emacs
957 #undef __malloc_safe_bcopy
958 #define __malloc_safe_bcopy safe_bcopy
959 #endif
960 /* This function is defined in realloc.c. */
961 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
962 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
963 #endif
964 #endif
965
966
967 /* Debugging hook for free. */
968 void (*__free_hook) PP ((__ptr_t __ptr));
969
970 /* List of blocks allocated by memalign. */
971 struct alignlist *_aligned_blocks = NULL;
972
973 /* Return memory to the heap.
974 Like `free' but don't call a __free_hook if there is one. */
975 void
976 _free_internal (ptr)
977 __ptr_t ptr;
978 {
979 int type;
980 __malloc_size_t block, blocks;
981 register __malloc_size_t i;
982 struct list *prev, *next;
983 __ptr_t curbrk;
984 const __malloc_size_t lesscore_threshold
985 /* Threshold of free space at which we will return some to the system. */
986 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
987
988 register struct alignlist *l;
989
990 if (ptr == NULL)
991 return;
992
993 PROTECT_MALLOC_STATE (0);
994
995 for (l = _aligned_blocks; l != NULL; l = l->next)
996 if (l->aligned == ptr)
997 {
998 l->aligned = NULL; /* Mark the slot in the list as free. */
999 ptr = l->exact;
1000 break;
1001 }
1002
1003 block = BLOCK (ptr);
1004
1005 type = _heapinfo[block].busy.type;
1006 switch (type)
1007 {
1008 case 0:
1009 /* Get as many statistics as early as we can. */
1010 --_chunks_used;
1011 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1012 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1013
1014 /* Find the free cluster previous to this one in the free list.
1015 Start searching at the last block referenced; this may benefit
1016 programs with locality of allocation. */
1017 i = _heapindex;
1018 if (i > block)
1019 while (i > block)
1020 i = _heapinfo[i].free.prev;
1021 else
1022 {
1023 do
1024 i = _heapinfo[i].free.next;
1025 while (i > 0 && i < block);
1026 i = _heapinfo[i].free.prev;
1027 }
1028
1029 /* Determine how to link this block into the free list. */
1030 if (block == i + _heapinfo[i].free.size)
1031 {
1032 /* Coalesce this block with its predecessor. */
1033 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1034 block = i;
1035 }
1036 else
1037 {
1038 /* Really link this block back into the free list. */
1039 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1040 _heapinfo[block].free.next = _heapinfo[i].free.next;
1041 _heapinfo[block].free.prev = i;
1042 _heapinfo[i].free.next = block;
1043 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1044 ++_chunks_free;
1045 }
1046
1047 /* Now that the block is linked in, see if we can coalesce it
1048 with its successor (by deleting its successor from the list
1049 and adding in its size). */
1050 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1051 {
1052 _heapinfo[block].free.size
1053 += _heapinfo[_heapinfo[block].free.next].free.size;
1054 _heapinfo[block].free.next
1055 = _heapinfo[_heapinfo[block].free.next].free.next;
1056 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1057 --_chunks_free;
1058 }
1059
1060 /* How many trailing free blocks are there now? */
1061 blocks = _heapinfo[block].free.size;
1062
1063 /* Where is the current end of accessible core? */
1064 curbrk = (*__morecore) (0);
1065
1066 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1067 {
1068 /* The end of the malloc heap is at the end of accessible core.
1069 It's possible that moving _heapinfo will allow us to
1070 return some space to the system. */
1071
1072 __malloc_size_t info_block = BLOCK (_heapinfo);
1073 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
1074 __malloc_size_t prev_block = _heapinfo[block].free.prev;
1075 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
1076 __malloc_size_t next_block = _heapinfo[block].free.next;
1077 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
1078
1079 if (/* Win if this block being freed is last in core, the info table
1080 is just before it, the previous free block is just before the
1081 info table, and the two free blocks together form a useful
1082 amount to return to the system. */
1083 (block + blocks == _heaplimit &&
1084 info_block + info_blocks == block &&
1085 prev_block != 0 && prev_block + prev_blocks == info_block &&
1086 blocks + prev_blocks >= lesscore_threshold) ||
1087 /* Nope, not the case. We can also win if this block being
1088 freed is just before the info table, and the table extends
1089 to the end of core or is followed only by a free block,
1090 and the total free space is worth returning to the system. */
1091 (block + blocks == info_block &&
1092 ((info_block + info_blocks == _heaplimit &&
1093 blocks >= lesscore_threshold) ||
1094 (info_block + info_blocks == next_block &&
1095 next_block + next_blocks == _heaplimit &&
1096 blocks + next_blocks >= lesscore_threshold)))
1097 )
1098 {
1099 malloc_info *newinfo;
1100 __malloc_size_t oldlimit = _heaplimit;
1101
1102 /* Free the old info table, clearing _heaplimit to avoid
1103 recursion into this code. We don't want to return the
1104 table's blocks to the system before we have copied them to
1105 the new location. */
1106 _heaplimit = 0;
1107 _free_internal (_heapinfo);
1108 _heaplimit = oldlimit;
1109
1110 /* Tell malloc to search from the beginning of the heap for
1111 free blocks, so it doesn't reuse the ones just freed. */
1112 _heapindex = 0;
1113
1114 /* Allocate new space for the info table and move its data. */
1115 newinfo = (malloc_info *) _malloc_internal (info_blocks
1116 * BLOCKSIZE);
1117 PROTECT_MALLOC_STATE (0);
1118 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1119 _heapinfo = newinfo;
1120
1121 /* We should now have coalesced the free block with the
1122 blocks freed from the old info table. Examine the entire
1123 trailing free block to decide below whether to return some
1124 to the system. */
1125 block = _heapinfo[0].free.prev;
1126 blocks = _heapinfo[block].free.size;
1127 }
1128
1129 /* Now see if we can return stuff to the system. */
1130 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1131 {
1132 register __malloc_size_t bytes = blocks * BLOCKSIZE;
1133 _heaplimit -= blocks;
1134 (*__morecore) (-bytes);
1135 _heapinfo[_heapinfo[block].free.prev].free.next
1136 = _heapinfo[block].free.next;
1137 _heapinfo[_heapinfo[block].free.next].free.prev
1138 = _heapinfo[block].free.prev;
1139 block = _heapinfo[block].free.prev;
1140 --_chunks_free;
1141 _bytes_free -= bytes;
1142 }
1143 }
1144
1145 /* Set the next search to begin at this block. */
1146 _heapindex = block;
1147 break;
1148
1149 default:
1150 /* Do some of the statistics. */
1151 --_chunks_used;
1152 _bytes_used -= 1 << type;
1153 ++_chunks_free;
1154 _bytes_free += 1 << type;
1155
1156 /* Get the address of the first free fragment in this block. */
1157 prev = (struct list *) ((char *) ADDRESS (block) +
1158 (_heapinfo[block].busy.info.frag.first << type));
1159
1160 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1161 {
1162 /* If all fragments of this block are free, remove them
1163 from the fragment list and free the whole block. */
1164 next = prev;
1165 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
1166 next = next->next;
1167 prev->prev->next = next;
1168 if (next != NULL)
1169 next->prev = prev->prev;
1170 _heapinfo[block].busy.type = 0;
1171 _heapinfo[block].busy.info.size = 1;
1172
1173 /* Keep the statistics accurate. */
1174 ++_chunks_used;
1175 _bytes_used += BLOCKSIZE;
1176 _chunks_free -= BLOCKSIZE >> type;
1177 _bytes_free -= BLOCKSIZE;
1178
1179 #ifdef GC_MALLOC_CHECK
1180 _free_internal (ADDRESS (block));
1181 #else
1182 free (ADDRESS (block));
1183 #endif
1184 }
1185 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1186 {
1187 /* If some fragments of this block are free, link this
1188 fragment into the fragment list after the first free
1189 fragment of this block. */
1190 next = (struct list *) ptr;
1191 next->next = prev->next;
1192 next->prev = prev;
1193 prev->next = next;
1194 if (next->next != NULL)
1195 next->next->prev = next;
1196 ++_heapinfo[block].busy.info.frag.nfree;
1197 }
1198 else
1199 {
1200 /* No fragments of this block are free, so link this
1201 fragment into the fragment list and announce that
1202 it is the first free fragment of this block. */
1203 prev = (struct list *) ptr;
1204 _heapinfo[block].busy.info.frag.nfree = 1;
1205 _heapinfo[block].busy.info.frag.first = (unsigned long int)
1206 ((unsigned long int) ((char *) ptr - (char *) NULL)
1207 % BLOCKSIZE >> type);
1208 prev->next = _fraghead[type].next;
1209 prev->prev = &_fraghead[type];
1210 prev->prev->next = prev;
1211 if (prev->next != NULL)
1212 prev->next->prev = prev;
1213 }
1214 break;
1215 }
1216
1217 PROTECT_MALLOC_STATE (1);
1218 }
1219
1220 /* Return memory to the heap. */
1221
1222 FREE_RETURN_TYPE
1223 free (ptr)
1224 __ptr_t ptr;
1225 {
1226 if (__free_hook != NULL)
1227 (*__free_hook) (ptr);
1228 else
1229 _free_internal (ptr);
1230 }
1231
1232 /* Define the `cfree' alias for `free'. */
1233 #ifdef weak_alias
1234 weak_alias (free, cfree)
1235 #else
1236 void
1237 cfree (ptr)
1238 __ptr_t ptr;
1239 {
1240 free (ptr);
1241 }
1242 #endif
1243 /* Change the size of a block allocated by `malloc'.
1244 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1245 Written May 1989 by Mike Haertel.
1246
1247 This library is free software; you can redistribute it and/or
1248 modify it under the terms of the GNU Library General Public License as
1249 published by the Free Software Foundation; either version 2 of the
1250 License, or (at your option) any later version.
1251
1252 This library is distributed in the hope that it will be useful,
1253 but WITHOUT ANY WARRANTY; without even the implied warranty of
1254 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1255 Library General Public License for more details.
1256
1257 You should have received a copy of the GNU Library General Public
1258 License along with this library; see the file COPYING.LIB. If
1259 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1260 Cambridge, MA 02139, USA.
1261
1262 The author may be reached (Email) at the address mike@ai.mit.edu,
1263 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1264
1265 #ifndef _MALLOC_INTERNAL
1266 #define _MALLOC_INTERNAL
1267 #include <malloc.h>
1268 #endif
1269
1270
1271
1272 /* Cope with systems lacking `memmove'. */
1273 #if (defined (MEMMOVE_MISSING) || \
1274 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
1275
1276 #ifdef emacs
1277 #undef __malloc_safe_bcopy
1278 #define __malloc_safe_bcopy safe_bcopy
1279 #else
1280
1281 /* Snarfed directly from Emacs src/dispnew.c:
1282 XXX Should use system bcopy if it handles overlap. */
1283
1284 /* Like bcopy except never gets confused by overlap. */
1285
1286 void
1287 __malloc_safe_bcopy (afrom, ato, size)
1288 __ptr_t afrom;
1289 __ptr_t ato;
1290 __malloc_size_t size;
1291 {
1292 char *from = afrom, *to = ato;
1293
1294 if (size <= 0 || from == to)
1295 return;
1296
1297 /* If the source and destination don't overlap, then bcopy can
1298 handle it. If they do overlap, but the destination is lower in
1299 memory than the source, we'll assume bcopy can handle that. */
1300 if (to < from || from + size <= to)
1301 bcopy (from, to, size);
1302
1303 /* Otherwise, we'll copy from the end. */
1304 else
1305 {
1306 register char *endf = from + size;
1307 register char *endt = to + size;
1308
1309 /* If TO - FROM is large, then we should break the copy into
1310 nonoverlapping chunks of TO - FROM bytes each. However, if
1311 TO - FROM is small, then the bcopy function call overhead
1312 makes this not worth it. The crossover point could be about
1313 anywhere. Since I don't think the obvious copy loop is too
1314 bad, I'm trying to err in its favor. */
1315 if (to - from < 64)
1316 {
1317 do
1318 *--endt = *--endf;
1319 while (endf != from);
1320 }
1321 else
1322 {
1323 for (;;)
1324 {
1325 endt -= (to - from);
1326 endf -= (to - from);
1327
1328 if (endt < to)
1329 break;
1330
1331 bcopy (endf, endt, to - from);
1332 }
1333
1334 /* If SIZE wasn't a multiple of TO - FROM, there will be a
1335 little left over. The amount left over is
1336 (endt + (to - from)) - to, which is endt - from. */
1337 bcopy (from, to, endt - from);
1338 }
1339 }
1340 }
1341 #endif /* emacs */
1342
1343 #ifndef memmove
1344 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t));
1345 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
1346 #endif
1347
1348 #endif
1349
1350
1351 #define min(A, B) ((A) < (B) ? (A) : (B))
1352
1353 /* Debugging hook for realloc. */
1354 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1355
1356 /* Resize the given region to the new size, returning a pointer
1357 to the (possibly moved) region. This is optimized for speed;
1358 some benchmarks seem to indicate that greater compactness is
1359 achieved by unconditionally allocating and copying to a
1360 new region. This module has incestuous knowledge of the
1361 internals of both free and malloc. */
1362 __ptr_t
1363 _realloc_internal (ptr, size)
1364 __ptr_t ptr;
1365 __malloc_size_t size;
1366 {
1367 __ptr_t result;
1368 int type;
1369 __malloc_size_t block, blocks, oldlimit;
1370
1371 if (size == 0)
1372 {
1373 _free_internal (ptr);
1374 return _malloc_internal (0);
1375 }
1376 else if (ptr == NULL)
1377 return _malloc_internal (size);
1378
1379 block = BLOCK (ptr);
1380
1381 PROTECT_MALLOC_STATE (0);
1382
1383 type = _heapinfo[block].busy.type;
1384 switch (type)
1385 {
1386 case 0:
1387 /* Maybe reallocate a large block to a small fragment. */
1388 if (size <= BLOCKSIZE / 2)
1389 {
1390 result = _malloc_internal (size);
1391 if (result != NULL)
1392 {
1393 memcpy (result, ptr, size);
1394 _free_internal (ptr);
1395 return result;
1396 }
1397 }
1398
1399 /* The new size is a large allocation as well;
1400 see if we can hold it in place. */
1401 blocks = BLOCKIFY (size);
1402 if (blocks < _heapinfo[block].busy.info.size)
1403 {
1404 /* The new size is smaller; return
1405 excess memory to the free list. */
1406 _heapinfo[block + blocks].busy.type = 0;
1407 _heapinfo[block + blocks].busy.info.size
1408 = _heapinfo[block].busy.info.size - blocks;
1409 _heapinfo[block].busy.info.size = blocks;
1410 /* We have just created a new chunk by splitting a chunk in two.
1411 Now we will free this chunk; increment the statistics counter
1412 so it doesn't become wrong when _free_internal decrements it. */
1413 ++_chunks_used;
1414 _free_internal (ADDRESS (block + blocks));
1415 result = ptr;
1416 }
1417 else if (blocks == _heapinfo[block].busy.info.size)
1418 /* No size change necessary. */
1419 result = ptr;
1420 else
1421 {
1422 /* Won't fit, so allocate a new region that will.
1423 Free the old region first in case there is sufficient
1424 adjacent free space to grow without moving. */
1425 blocks = _heapinfo[block].busy.info.size;
1426 /* Prevent free from actually returning memory to the system. */
1427 oldlimit = _heaplimit;
1428 _heaplimit = 0;
1429 _free_internal (ptr);
1430 result = _malloc_internal (size);
1431 PROTECT_MALLOC_STATE (0);
1432 if (_heaplimit == 0)
1433 _heaplimit = oldlimit;
1434 if (result == NULL)
1435 {
1436 /* Now we're really in trouble. We have to unfree
1437 the thing we just freed. Unfortunately it might
1438 have been coalesced with its neighbors. */
1439 if (_heapindex == block)
1440 (void) _malloc_internal (blocks * BLOCKSIZE);
1441 else
1442 {
1443 __ptr_t previous
1444 = _malloc_internal ((block - _heapindex) * BLOCKSIZE);
1445 (void) _malloc_internal (blocks * BLOCKSIZE);
1446 _free_internal (previous);
1447 }
1448 return NULL;
1449 }
1450 if (ptr != result)
1451 memmove (result, ptr, blocks * BLOCKSIZE);
1452 }
1453 break;
1454
1455 default:
1456 /* Old size is a fragment; type is logarithm
1457 to base two of the fragment size. */
1458 if (size > (__malloc_size_t) (1 << (type - 1)) &&
1459 size <= (__malloc_size_t) (1 << type))
1460 /* The new size is the same kind of fragment. */
1461 result = ptr;
1462 else
1463 {
1464 /* The new size is different; allocate a new space,
1465 and copy the lesser of the new size and the old. */
1466 result = _malloc_internal (size);
1467 if (result == NULL)
1468 return NULL;
1469 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1470 _free_internal (ptr);
1471 }
1472 break;
1473 }
1474
1475 PROTECT_MALLOC_STATE (1);
1476 return result;
1477 }
1478
1479 __ptr_t
1480 realloc (ptr, size)
1481 __ptr_t ptr;
1482 __malloc_size_t size;
1483 {
1484 if (!__malloc_initialized && !__malloc_initialize ())
1485 return NULL;
1486
1487 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
1488 (ptr, size);
1489 }
1490 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1491
1492 This library is free software; you can redistribute it and/or
1493 modify it under the terms of the GNU Library General Public License as
1494 published by the Free Software Foundation; either version 2 of the
1495 License, or (at your option) any later version.
1496
1497 This library is distributed in the hope that it will be useful,
1498 but WITHOUT ANY WARRANTY; without even the implied warranty of
1499 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1500 Library General Public License for more details.
1501
1502 You should have received a copy of the GNU Library General Public
1503 License along with this library; see the file COPYING.LIB. If
1504 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1505 Cambridge, MA 02139, USA.
1506
1507 The author may be reached (Email) at the address mike@ai.mit.edu,
1508 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1509
1510 #ifndef _MALLOC_INTERNAL
1511 #define _MALLOC_INTERNAL
1512 #include <malloc.h>
1513 #endif
1514
1515 /* Allocate an array of NMEMB elements each SIZE bytes long.
1516 The entire array is initialized to zeros. */
1517 __ptr_t
1518 calloc (nmemb, size)
1519 register __malloc_size_t nmemb;
1520 register __malloc_size_t size;
1521 {
1522 register __ptr_t result = malloc (nmemb * size);
1523
1524 if (result != NULL)
1525 (void) memset (result, 0, nmemb * size);
1526
1527 return result;
1528 }
1529 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1530 This file is part of the GNU C Library.
1531
1532 The GNU C Library is free software; you can redistribute it and/or modify
1533 it under the terms of the GNU General Public License as published by
1534 the Free Software Foundation; either version 2, or (at your option)
1535 any later version.
1536
1537 The GNU C Library is distributed in the hope that it will be useful,
1538 but WITHOUT ANY WARRANTY; without even the implied warranty of
1539 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1540 GNU General Public License for more details.
1541
1542 You should have received a copy of the GNU General Public License
1543 along with the GNU C Library; see the file COPYING. If not, write to
1544 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
1545
1546 #ifndef _MALLOC_INTERNAL
1547 #define _MALLOC_INTERNAL
1548 #include <malloc.h>
1549 #endif
1550
1551 #ifndef __GNU_LIBRARY__
1552 #define __sbrk sbrk
1553 #endif
1554
1555 #ifdef __GNU_LIBRARY__
1556 /* It is best not to declare this and cast its result on foreign operating
1557 systems with potentially hostile include files. */
1558
1559 #include <stddef.h>
1560 extern __ptr_t __sbrk PP ((ptrdiff_t increment));
1561 #endif
1562
1563 #ifndef NULL
1564 #define NULL 0
1565 #endif
1566
1567 /* Allocate INCREMENT more bytes of data space,
1568 and return the start of data space, or NULL on errors.
1569 If INCREMENT is negative, shrink data space. */
1570 __ptr_t
1571 __default_morecore (increment)
1572 __malloc_ptrdiff_t increment;
1573 {
1574 __ptr_t result = (__ptr_t) __sbrk (increment);
1575 if (result == (__ptr_t) -1)
1576 return NULL;
1577 return result;
1578 }
1579 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1580
1581 This library is free software; you can redistribute it and/or
1582 modify it under the terms of the GNU Library General Public License as
1583 published by the Free Software Foundation; either version 2 of the
1584 License, or (at your option) any later version.
1585
1586 This library is distributed in the hope that it will be useful,
1587 but WITHOUT ANY WARRANTY; without even the implied warranty of
1588 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1589 Library General Public License for more details.
1590
1591 You should have received a copy of the GNU Library General Public
1592 License along with this library; see the file COPYING.LIB. If
1593 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1594 Cambridge, MA 02139, USA. */
1595
1596 #ifndef _MALLOC_INTERNAL
1597 #define _MALLOC_INTERNAL
1598 #include <malloc.h>
1599 #endif
1600
1601 #if __DJGPP__ - 0 == 1
1602
1603 /* There is some problem with memalign in DJGPP v1 and we are supposed
1604 to omit it. Noone told me why, they just told me to do it. */
1605
1606 #else
1607
1608 __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size,
1609 __malloc_size_t __alignment));
1610
1611 __ptr_t
1612 memalign (alignment, size)
1613 __malloc_size_t alignment;
1614 __malloc_size_t size;
1615 {
1616 __ptr_t result;
1617 unsigned long int adj, lastadj;
1618
1619 if (__memalign_hook)
1620 return (*__memalign_hook) (alignment, size);
1621
1622 /* Allocate a block with enough extra space to pad the block with up to
1623 (ALIGNMENT - 1) bytes if necessary. */
1624 result = malloc (size + alignment - 1);
1625 if (result == NULL)
1626 return NULL;
1627
1628 /* Figure out how much we will need to pad this particular block
1629 to achieve the required alignment. */
1630 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1631
1632 do
1633 {
1634 /* Reallocate the block with only as much excess as it needs. */
1635 free (result);
1636 result = malloc (adj + size);
1637 if (result == NULL) /* Impossible unless interrupted. */
1638 return NULL;
1639
1640 lastadj = adj;
1641 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
1642 /* It's conceivable we might have been so unlucky as to get a
1643 different block with weaker alignment. If so, this block is too
1644 short to contain SIZE after alignment correction. So we must
1645 try again and get another block, slightly larger. */
1646 } while (adj > lastadj);
1647
1648 if (adj != 0)
1649 {
1650 /* Record this block in the list of aligned blocks, so that `free'
1651 can identify the pointer it is passed, which will be in the middle
1652 of an allocated block. */
1653
1654 struct alignlist *l;
1655 for (l = _aligned_blocks; l != NULL; l = l->next)
1656 if (l->aligned == NULL)
1657 /* This slot is free. Use it. */
1658 break;
1659 if (l == NULL)
1660 {
1661 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1662 if (l == NULL)
1663 {
1664 free (result);
1665 return NULL;
1666 }
1667 l->next = _aligned_blocks;
1668 _aligned_blocks = l;
1669 }
1670 l->exact = result;
1671 result = l->aligned = (char *) result + alignment - adj;
1672 }
1673
1674 return result;
1675 }
1676
1677 #endif /* Not DJGPP v1 */
1678 /* Allocate memory on a page boundary.
1679 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1680
1681 This library is free software; you can redistribute it and/or
1682 modify it under the terms of the GNU Library General Public License as
1683 published by the Free Software Foundation; either version 2 of the
1684 License, or (at your option) any later version.
1685
1686 This library is distributed in the hope that it will be useful,
1687 but WITHOUT ANY WARRANTY; without even the implied warranty of
1688 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1689 Library General Public License for more details.
1690
1691 You should have received a copy of the GNU Library General Public
1692 License along with this library; see the file COPYING.LIB. If
1693 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1694 Cambridge, MA 02139, USA.
1695
1696 The author may be reached (Email) at the address mike@ai.mit.edu,
1697 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1698
1699 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
1700
1701 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
1702 on MSDOS, where it conflicts with a system header file. */
1703
1704 #define ELIDE_VALLOC
1705
1706 #endif
1707
1708 #ifndef ELIDE_VALLOC
1709
1710 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
1711 #include <stddef.h>
1712 #include <sys/cdefs.h>
1713 #if defined (__GLIBC__) && __GLIBC__ >= 2
1714 /* __getpagesize is already declared in <unistd.h> with return type int */
1715 #else
1716 extern size_t __getpagesize PP ((void));
1717 #endif
1718 #else
1719 #include "getpagesize.h"
1720 #define __getpagesize() getpagesize()
1721 #endif
1722
1723 #ifndef _MALLOC_INTERNAL
1724 #define _MALLOC_INTERNAL
1725 #include <malloc.h>
1726 #endif
1727
1728 static __malloc_size_t pagesize;
1729
1730 __ptr_t
1731 valloc (size)
1732 __malloc_size_t size;
1733 {
1734 if (pagesize == 0)
1735 pagesize = __getpagesize ();
1736
1737 return memalign (pagesize, size);
1738 }
1739
1740 #endif /* Not ELIDE_VALLOC. */
1741
1742 #ifdef GC_MCHECK
1743
1744 /* Standard debugging hooks for `malloc'.
1745 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1746 Written May 1989 by Mike Haertel.
1747
1748 This library is free software; you can redistribute it and/or
1749 modify it under the terms of the GNU Library General Public License as
1750 published by the Free Software Foundation; either version 2 of the
1751 License, or (at your option) any later version.
1752
1753 This library is distributed in the hope that it will be useful,
1754 but WITHOUT ANY WARRANTY; without even the implied warranty of
1755 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1756 Library General Public License for more details.
1757
1758 You should have received a copy of the GNU Library General Public
1759 License along with this library; see the file COPYING.LIB. If
1760 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
1761 Cambridge, MA 02139, USA.
1762
1763 The author may be reached (Email) at the address mike@ai.mit.edu,
1764 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1765
1766 #ifdef emacs
1767 #include <stdio.h>
1768 #else
1769 #ifndef _MALLOC_INTERNAL
1770 #define _MALLOC_INTERNAL
1771 #include <malloc.h>
1772 #include <stdio.h>
1773 #endif
1774 #endif
1775
1776 /* Old hook values. */
1777 static void (*old_free_hook) __P ((__ptr_t ptr));
1778 static __ptr_t (*old_malloc_hook) __P ((__malloc_size_t size));
1779 static __ptr_t (*old_realloc_hook) __P ((__ptr_t ptr, __malloc_size_t size));
1780
1781 /* Function to call when something awful happens. */
1782 static void (*abortfunc) __P ((enum mcheck_status));
1783
1784 /* Arbitrary magical numbers. */
1785 #define MAGICWORD 0xfedabeeb
1786 #define MAGICFREE 0xd8675309
1787 #define MAGICBYTE ((char) 0xd7)
1788 #define MALLOCFLOOD ((char) 0x93)
1789 #define FREEFLOOD ((char) 0x95)
1790
1791 struct hdr
1792 {
1793 __malloc_size_t size; /* Exact size requested by user. */
1794 unsigned long int magic; /* Magic number to check header integrity. */
1795 };
1796
1797 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
1798 #define flood memset
1799 #else
1800 static void flood __P ((__ptr_t, int, __malloc_size_t));
1801 static void
1802 flood (ptr, val, size)
1803 __ptr_t ptr;
1804 int val;
1805 __malloc_size_t size;
1806 {
1807 char *cp = ptr;
1808 while (size--)
1809 *cp++ = val;
1810 }
1811 #endif
1812
1813 static enum mcheck_status checkhdr __P ((const struct hdr *));
1814 static enum mcheck_status
1815 checkhdr (hdr)
1816 const struct hdr *hdr;
1817 {
1818 enum mcheck_status status;
1819 switch (hdr->magic)
1820 {
1821 default:
1822 status = MCHECK_HEAD;
1823 break;
1824 case MAGICFREE:
1825 status = MCHECK_FREE;
1826 break;
1827 case MAGICWORD:
1828 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1829 status = MCHECK_TAIL;
1830 else
1831 status = MCHECK_OK;
1832 break;
1833 }
1834 if (status != MCHECK_OK)
1835 (*abortfunc) (status);
1836 return status;
1837 }
1838
1839 static void freehook __P ((__ptr_t));
1840 static void
1841 freehook (ptr)
1842 __ptr_t ptr;
1843 {
1844 struct hdr *hdr;
1845
1846 if (ptr)
1847 {
1848 hdr = ((struct hdr *) ptr) - 1;
1849 checkhdr (hdr);
1850 hdr->magic = MAGICFREE;
1851 flood (ptr, FREEFLOOD, hdr->size);
1852 }
1853 else
1854 hdr = NULL;
1855
1856 __free_hook = old_free_hook;
1857 free (hdr);
1858 __free_hook = freehook;
1859 }
1860
1861 static __ptr_t mallochook __P ((__malloc_size_t));
1862 static __ptr_t
1863 mallochook (size)
1864 __malloc_size_t size;
1865 {
1866 struct hdr *hdr;
1867
1868 __malloc_hook = old_malloc_hook;
1869 hdr = (struct hdr *) malloc (sizeof (struct hdr) + size + 1);
1870 __malloc_hook = mallochook;
1871 if (hdr == NULL)
1872 return NULL;
1873
1874 hdr->size = size;
1875 hdr->magic = MAGICWORD;
1876 ((char *) &hdr[1])[size] = MAGICBYTE;
1877 flood ((__ptr_t) (hdr + 1), MALLOCFLOOD, size);
1878 return (__ptr_t) (hdr + 1);
1879 }
1880
1881 static __ptr_t reallochook __P ((__ptr_t, __malloc_size_t));
1882 static __ptr_t
1883 reallochook (ptr, size)
1884 __ptr_t ptr;
1885 __malloc_size_t size;
1886 {
1887 struct hdr *hdr = NULL;
1888 __malloc_size_t osize = 0;
1889
1890 if (ptr)
1891 {
1892 hdr = ((struct hdr *) ptr) - 1;
1893 osize = hdr->size;
1894
1895 checkhdr (hdr);
1896 if (size < osize)
1897 flood ((char *) ptr + size, FREEFLOOD, osize - size);
1898 }
1899
1900 __free_hook = old_free_hook;
1901 __malloc_hook = old_malloc_hook;
1902 __realloc_hook = old_realloc_hook;
1903 hdr = (struct hdr *) realloc ((__ptr_t) hdr, sizeof (struct hdr) + size + 1);
1904 __free_hook = freehook;
1905 __malloc_hook = mallochook;
1906 __realloc_hook = reallochook;
1907 if (hdr == NULL)
1908 return NULL;
1909
1910 hdr->size = size;
1911 hdr->magic = MAGICWORD;
1912 ((char *) &hdr[1])[size] = MAGICBYTE;
1913 if (size > osize)
1914 flood ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1915 return (__ptr_t) (hdr + 1);
1916 }
1917
1918 static void
1919 mabort (status)
1920 enum mcheck_status status;
1921 {
1922 const char *msg;
1923 switch (status)
1924 {
1925 case MCHECK_OK:
1926 msg = "memory is consistent, library is buggy";
1927 break;
1928 case MCHECK_HEAD:
1929 msg = "memory clobbered before allocated block";
1930 break;
1931 case MCHECK_TAIL:
1932 msg = "memory clobbered past end of allocated block";
1933 break;
1934 case MCHECK_FREE:
1935 msg = "block freed twice";
1936 break;
1937 default:
1938 msg = "bogus mcheck_status, library is buggy";
1939 break;
1940 }
1941 #ifdef __GNU_LIBRARY__
1942 __libc_fatal (msg);
1943 #else
1944 fprintf (stderr, "mcheck: %s\n", msg);
1945 fflush (stderr);
1946 abort ();
1947 #endif
1948 }
1949
1950 static int mcheck_used = 0;
1951
1952 int
1953 mcheck (func)
1954 void (*func) __P ((enum mcheck_status));
1955 {
1956 abortfunc = (func != NULL) ? func : &mabort;
1957
1958 /* These hooks may not be safely inserted if malloc is already in use. */
1959 if (!__malloc_initialized && !mcheck_used)
1960 {
1961 old_free_hook = __free_hook;
1962 __free_hook = freehook;
1963 old_malloc_hook = __malloc_hook;
1964 __malloc_hook = mallochook;
1965 old_realloc_hook = __realloc_hook;
1966 __realloc_hook = reallochook;
1967 mcheck_used = 1;
1968 }
1969
1970 return mcheck_used ? 0 : -1;
1971 }
1972
1973 enum mcheck_status
1974 mprobe (__ptr_t ptr)
1975 {
1976 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
1977 }
1978
1979 #endif /* GC_MCHECK */