1 /* Declarations for `malloc' and friends.
2 Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2016 Free
3 Software Foundation, Inc.
4 Written May 1989 by Mike Haertel.
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public
17 License along with this library. If not, see <http://www.gnu.org/licenses/>.
19 The author may be reached (Email) at the address mike@ai.mit.edu,
20 or (US mail) as Mike Haertel c/o Free Software Foundation. */
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
32 #ifdef HYBRID_GET_CURRENT_DIR_NAME
33 #undef get_current_dir_name
43 #include <w32heap.h> /* for sbrk */
47 extern void emacs_abort (void);
50 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
51 realloc... as defined in this file (and renamed gmalloc,
52 grealloc... via the macros that follow). The dumped emacs,
53 however, will use the system malloc, realloc.... In other source
54 files, malloc, realloc... are renamed hybrid_malloc,
55 hybrid_realloc... via macros in conf_post.h. hybrid_malloc and
56 friends are wrapper functions defined later in this file.
57 aligned_alloc is defined as a macro only in alloc.c.
59 As of this writing (August 2014), Cygwin is the only platform on
60 which HYBRID_MACRO is defined. Any other platform that wants to
61 define it will have to define the macros DUMPED and
62 ALLOCATED_BEFORE_DUMPING, defined below for Cygwin. */
67 #define malloc gmalloc
68 #define realloc grealloc
69 #define calloc gcalloc
70 #define aligned_alloc galigned_alloc
74 extern void *bss_sbrk (ptrdiff_t size
);
75 extern int bss_sbrk_did_unexec
;
76 extern char bss_sbrk_buffer
[];
77 extern void *bss_sbrk_buffer_end
;
78 #define DUMPED bss_sbrk_did_unexec
79 #define ALLOCATED_BEFORE_DUMPING(P) \
80 ((P) < bss_sbrk_buffer_end && (P) >= (void *) bss_sbrk_buffer)
91 extern void emacs_abort (void);
94 /* Underlying allocation function; successive calls should
95 return contiguous pieces of memory. */
96 extern void *(*__morecore
) (ptrdiff_t size
);
98 /* Default value of `__morecore'. */
99 extern void *__default_morecore (ptrdiff_t size
);
102 #define extern static
105 /* Allocate SIZE bytes of memory. */
106 extern void *malloc (size_t size
) ATTRIBUTE_MALLOC_SIZE ((1));
107 /* Re-allocate the previously allocated block
108 in ptr, making the new block SIZE bytes long. */
109 extern void *realloc (void *ptr
, size_t size
) ATTRIBUTE_ALLOC_SIZE ((2));
110 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
111 extern void *calloc (size_t nmemb
, size_t size
) ATTRIBUTE_MALLOC_SIZE ((1,2));
112 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
113 extern void free (void *ptr
);
115 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
116 extern void *aligned_alloc (size_t, size_t);
118 extern void *memalign (size_t, size_t);
119 extern int posix_memalign (void **, size_t, size_t);
123 /* Set up mutexes and make malloc etc. thread-safe. */
124 extern void malloc_enable_thread (void);
127 /* The allocator divides the heap into blocks of fixed size; large
128 requests receive one or more whole blocks, and small requests
129 receive a fragment of a block. Fragment sizes are powers of two,
130 and all fragments of a block are the same size. When all the
131 fragments in a block have been freed, the block itself is freed. */
132 #define INT_BIT (CHAR_BIT * sizeof (int))
133 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
134 #define BLOCKSIZE (1 << BLOCKLOG)
135 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
137 /* Determine the amount of memory spanned by the initial heap table
138 (not an absolute limit). */
139 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
141 /* Number of contiguous free blocks allowed to build up at the end of
142 memory before they will be returned to the system. */
143 #define FINAL_FREE_BLOCKS 8
145 /* Data structure giving per-block information. */
148 /* Heap information for a busy block. */
151 /* Zero for a large (multiblock) object, or positive giving the
152 logarithm to the base two of the fragment size. */
158 size_t nfree
; /* Free frags in a fragmented block. */
159 size_t first
; /* First free fragment of the block. */
161 /* For a large object, in its first block, this has the number
162 of blocks in the object. In the other blocks, this has a
163 negative number which says how far back the first block is. */
167 /* Heap information for a free block
168 (that may be the first of a free cluster). */
171 size_t size
; /* Size (in blocks) of a free cluster. */
172 size_t next
; /* Index of next free cluster. */
173 size_t prev
; /* Index of previous free cluster. */
177 /* Pointer to first block of the heap. */
178 extern char *_heapbase
;
180 /* Table indexed by block number giving per-block information. */
181 extern malloc_info
*_heapinfo
;
183 /* Address to block number and vice versa. */
184 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
185 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
187 /* Current search index for the heap table. */
188 extern size_t _heapindex
;
190 /* Limit of valid info table indices. */
191 extern size_t _heaplimit
;
193 /* Doubly linked lists of free fragments. */
200 /* Free list headers for each fragment size. */
201 extern struct list _fraghead
[];
203 /* List of blocks allocated with aligned_alloc and friends. */
206 struct alignlist
*next
;
207 void *aligned
; /* The address that aligned_alloc returned. */
208 void *exact
; /* The address that malloc returned. */
210 extern struct alignlist
*_aligned_blocks
;
212 /* Instrumentation. */
213 extern size_t _chunks_used
;
214 extern size_t _bytes_used
;
215 extern size_t _chunks_free
;
216 extern size_t _bytes_free
;
218 /* Internal versions of `malloc', `realloc', and `free'
219 used when these functions need to call each other.
220 They are the same but don't call the hooks. */
221 extern void *_malloc_internal (size_t);
222 extern void *_realloc_internal (void *, size_t);
223 extern void _free_internal (void *);
224 extern void *_malloc_internal_nolock (size_t);
225 extern void *_realloc_internal_nolock (void *, size_t);
226 extern void _free_internal_nolock (void *);
229 extern pthread_mutex_t _malloc_mutex
, _aligned_blocks_mutex
;
230 extern int _malloc_thread_enabled_p
;
233 if (_malloc_thread_enabled_p) \
234 pthread_mutex_lock (&_malloc_mutex); \
238 if (_malloc_thread_enabled_p) \
239 pthread_mutex_unlock (&_malloc_mutex); \
241 #define LOCK_ALIGNED_BLOCKS() \
243 if (_malloc_thread_enabled_p) \
244 pthread_mutex_lock (&_aligned_blocks_mutex); \
246 #define UNLOCK_ALIGNED_BLOCKS() \
248 if (_malloc_thread_enabled_p) \
249 pthread_mutex_unlock (&_aligned_blocks_mutex); \
254 #define LOCK_ALIGNED_BLOCKS()
255 #define UNLOCK_ALIGNED_BLOCKS()
258 /* Given an address in the middle of a malloc'd object,
259 return the address of the beginning of the object. */
260 extern void *malloc_find_object_address (void *ptr
);
262 /* If not NULL, this function is called after each time
263 `__morecore' is called to increase the data size. */
264 extern void (*__after_morecore_hook
) (void);
266 /* Number of extra blocks to get each time we ask for more core.
267 This reduces the frequency of calling `(*__morecore)'. */
268 extern size_t __malloc_extra_blocks
;
270 /* Nonzero if `malloc' has been called and done its initialization. */
271 extern int __malloc_initialized
;
272 /* Function called to initialize malloc data structures. */
273 extern int __malloc_initialize (void);
275 /* Hooks for debugging versions. */
276 extern void (*__malloc_initialize_hook
) (void);
277 extern void (*__free_hook
) (void *ptr
);
278 extern void *(*__malloc_hook
) (size_t size
);
279 extern void *(*__realloc_hook
) (void *ptr
, size_t size
);
280 extern void *(*__memalign_hook
) (size_t size
, size_t alignment
);
282 /* Return values for `mprobe': these are the kinds of inconsistencies that
283 `mcheck' enables detection of. */
286 MCHECK_DISABLED
= -1, /* Consistency checking is not turned on. */
287 MCHECK_OK
, /* Block is fine. */
288 MCHECK_FREE
, /* Block freed twice. */
289 MCHECK_HEAD
, /* Memory before the block was clobbered. */
290 MCHECK_TAIL
/* Memory after the block was clobbered. */
293 /* Activate a standard collection of debugging hooks. This must be called
294 before `malloc' is ever called. ABORTFUNC is called with an error code
295 (see enum above) when an inconsistency is detected. If ABORTFUNC is
296 null, the standard function prints on stderr and then calls `abort'. */
297 extern int mcheck (void (*abortfunc
) (enum mcheck_status
));
299 /* Check for aberrations in a particular malloc'd block. You must have
300 called `mcheck' already. These are the same checks that `mcheck' does
301 when you free or reallocate a block. */
302 extern enum mcheck_status
mprobe (void *ptr
);
304 /* Activate a standard collection of tracing hooks. */
305 extern void mtrace (void);
306 extern void muntrace (void);
308 /* Statistics available to the user. */
311 size_t bytes_total
; /* Total size of the heap. */
312 size_t chunks_used
; /* Chunks allocated by the user. */
313 size_t bytes_used
; /* Byte total of user-allocated chunks. */
314 size_t chunks_free
; /* Chunks in the free list. */
315 size_t bytes_free
; /* Byte total of chunks in the free list. */
318 /* Pick up the current statistics. */
319 extern struct mstats
mstats (void);
321 /* Call WARNFUN with a warning message when memory usage is high. */
322 extern void memory_warnings (void *start
, void (*warnfun
) (const char *));
330 /* Memory allocator `malloc'.
331 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
332 Written May 1989 by Mike Haertel.
334 This library is free software; you can redistribute it and/or
335 modify it under the terms of the GNU General Public License as
336 published by the Free Software Foundation; either version 2 of the
337 License, or (at your option) any later version.
339 This library is distributed in the hope that it will be useful,
340 but WITHOUT ANY WARRANTY; without even the implied warranty of
341 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
342 General Public License for more details.
344 You should have received a copy of the GNU General Public
345 License along with this library. If not, see <http://www.gnu.org/licenses/>.
347 The author may be reached (Email) at the address mike@ai.mit.edu,
348 or (US mail) as Mike Haertel c/o Free Software Foundation. */
352 void *(*__morecore
) (ptrdiff_t size
) = __default_morecore
;
354 #ifndef HYBRID_MALLOC
356 /* Debugging hook for `malloc'. */
357 void *(*__malloc_hook
) (size_t size
);
359 /* Pointer to the base of the first block. */
362 /* Block information table. Allocated with align/__free (not malloc/free). */
363 malloc_info
*_heapinfo
;
365 /* Search index in the info table. */
368 /* Limit of valid info table indices. */
371 /* Free lists for each fragment size. */
372 struct list _fraghead
[BLOCKLOG
];
374 /* Instrumentation. */
380 /* Are you experienced? */
381 int __malloc_initialized
;
383 size_t __malloc_extra_blocks
;
385 void (*__malloc_initialize_hook
) (void);
386 void (*__after_morecore_hook
) (void);
390 static struct list _fraghead
[BLOCKLOG
];
392 #endif /* HYBRID_MALLOC */
394 /* Number of info entries. */
395 static size_t heapsize
;
397 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
399 /* Some code for hunting a bug writing into _heapinfo.
401 Call this macro with argument PROT non-zero to protect internal
402 malloc state against writing to it, call it with a zero argument to
403 make it readable and writable.
405 Note that this only works if BLOCKSIZE == page size, which is
406 the case on the i386. */
408 #include <sys/types.h>
409 #include <sys/mman.h>
411 static int state_protected_p
;
412 static size_t last_state_size
;
413 static malloc_info
*last_heapinfo
;
416 protect_malloc_state (int protect_p
)
418 /* If _heapinfo has been relocated, make sure its old location
419 isn't left read-only; it will be reused by malloc. */
420 if (_heapinfo
!= last_heapinfo
422 && state_protected_p
)
423 mprotect (last_heapinfo
, last_state_size
, PROT_READ
| PROT_WRITE
);
425 last_state_size
= _heaplimit
* sizeof *_heapinfo
;
426 last_heapinfo
= _heapinfo
;
428 if (protect_p
!= state_protected_p
)
430 state_protected_p
= protect_p
;
431 if (mprotect (_heapinfo
, last_state_size
,
432 protect_p
? PROT_READ
: PROT_READ
| PROT_WRITE
) != 0)
437 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
440 #define PROTECT_MALLOC_STATE(PROT) /* empty */
444 /* Aligned allocation. */
451 /* align accepts an unsigned argument, but __morecore accepts a
452 signed one. This could lead to trouble if SIZE overflows the
453 ptrdiff_t type accepted by __morecore. We just punt in that
454 case, since they are requesting a ludicrous amount anyway. */
455 if (PTRDIFF_MAX
< size
)
458 result
= (*__morecore
) (size
);
459 adj
= (uintptr_t) result
% BLOCKSIZE
;
462 adj
= BLOCKSIZE
- adj
;
464 result
= (char *) result
+ adj
;
467 if (__after_morecore_hook
)
468 (*__after_morecore_hook
) ();
473 /* Get SIZE bytes, if we can get them starting at END.
474 Return the address of the space we got.
475 If we cannot get space at END, fail and return 0. */
477 get_contiguous_space (ptrdiff_t size
, void *position
)
482 before
= (*__morecore
) (0);
483 /* If we can tell in advance that the break is at the wrong place,
485 if (before
!= position
)
488 /* Allocate SIZE bytes and get the address of them. */
489 after
= (*__morecore
) (size
);
493 /* It was not contiguous--reject it. */
494 if (after
!= position
)
496 (*__morecore
) (- size
);
504 /* This is called when `_heapinfo' and `heapsize' have just
505 been set to describe a new info table. Set up the table
506 to describe itself and account for it in the statistics. */
508 register_heapinfo (void)
510 size_t block
, blocks
;
512 block
= BLOCK (_heapinfo
);
513 blocks
= BLOCKIFY (heapsize
* sizeof (malloc_info
));
515 /* Account for the _heapinfo block itself in the statistics. */
516 _bytes_used
+= blocks
* BLOCKSIZE
;
519 /* Describe the heapinfo block itself in the heapinfo. */
520 _heapinfo
[block
].busy
.type
= 0;
521 _heapinfo
[block
].busy
.info
.size
= blocks
;
522 /* Leave back-pointers for malloc_find_address. */
524 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
528 pthread_mutex_t _malloc_mutex
= PTHREAD_MUTEX_INITIALIZER
;
529 pthread_mutex_t _aligned_blocks_mutex
= PTHREAD_MUTEX_INITIALIZER
;
530 int _malloc_thread_enabled_p
;
533 malloc_atfork_handler_prepare (void)
536 LOCK_ALIGNED_BLOCKS ();
540 malloc_atfork_handler_parent (void)
542 UNLOCK_ALIGNED_BLOCKS ();
547 malloc_atfork_handler_child (void)
549 UNLOCK_ALIGNED_BLOCKS ();
553 /* Set up mutexes and make malloc etc. thread-safe. */
555 malloc_enable_thread (void)
557 if (_malloc_thread_enabled_p
)
560 /* Some pthread implementations call malloc for statically
561 initialized mutexes when they are used first. To avoid such a
562 situation, we initialize mutexes here while their use is
563 disabled in malloc etc. */
564 pthread_mutex_init (&_malloc_mutex
, NULL
);
565 pthread_mutex_init (&_aligned_blocks_mutex
, NULL
);
566 pthread_atfork (malloc_atfork_handler_prepare
,
567 malloc_atfork_handler_parent
,
568 malloc_atfork_handler_child
);
569 _malloc_thread_enabled_p
= 1;
571 #endif /* USE_PTHREAD */
574 malloc_initialize_1 (void)
580 if (__malloc_initialize_hook
)
581 (*__malloc_initialize_hook
) ();
583 heapsize
= HEAP
/ BLOCKSIZE
;
584 _heapinfo
= align (heapsize
* sizeof (malloc_info
));
585 if (_heapinfo
== NULL
)
587 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
588 _heapinfo
[0].free
.size
= 0;
589 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
591 _heapbase
= (char *) _heapinfo
;
592 _heaplimit
= BLOCK (_heapbase
+ heapsize
* sizeof (malloc_info
));
594 register_heapinfo ();
596 __malloc_initialized
= 1;
597 PROTECT_MALLOC_STATE (1);
601 /* Set everything up and remember that we have.
602 main will call malloc which calls this function. That is before any threads
603 or signal handlers has been set up, so we don't need thread protection. */
605 __malloc_initialize (void)
607 if (__malloc_initialized
)
610 malloc_initialize_1 ();
612 return __malloc_initialized
;
615 static int morecore_recursing
;
617 /* Get neatly aligned memory, initializing or
618 growing the heap info table as necessary. */
620 morecore_nolock (size_t size
)
623 malloc_info
*newinfo
, *oldinfo
;
626 if (morecore_recursing
)
627 /* Avoid recursion. The caller will know how to handle a null return. */
630 result
= align (size
);
634 PROTECT_MALLOC_STATE (0);
636 /* Check if we need to grow the info table. */
637 if ((size_t) BLOCK ((char *) result
+ size
) > heapsize
)
639 /* Calculate the new _heapinfo table size. We do not account for the
640 added blocks in the table itself, as we hope to place them in
641 existing free space, which is already covered by part of the
646 while ((size_t) BLOCK ((char *) result
+ size
) > newsize
);
648 /* We must not reuse existing core for the new info table when called
649 from realloc in the case of growing a large block, because the
650 block being grown is momentarily marked as free. In this case
651 _heaplimit is zero so we know not to reuse space for internal
655 /* First try to allocate the new info table in core we already
656 have, in the usual way using realloc. If realloc cannot
657 extend it in place or relocate it to existing sufficient core,
658 we will get called again, and the code above will notice the
659 `morecore_recursing' flag and return null. */
660 int save
= errno
; /* Don't want to clobber errno with ENOMEM. */
661 morecore_recursing
= 1;
662 newinfo
= _realloc_internal_nolock (_heapinfo
,
663 newsize
* sizeof (malloc_info
));
664 morecore_recursing
= 0;
669 /* We found some space in core, and realloc has put the old
670 table's blocks on the free list. Now zero the new part
671 of the table and install the new table location. */
672 memset (&newinfo
[heapsize
], 0,
673 (newsize
- heapsize
) * sizeof (malloc_info
));
680 /* Allocate new space for the malloc info table. */
683 newinfo
= align (newsize
* sizeof (malloc_info
));
688 (*__morecore
) (-size
);
692 /* Is it big enough to record status for its own space?
694 if ((size_t) BLOCK ((char *) newinfo
695 + newsize
* sizeof (malloc_info
))
699 /* Must try again. First give back most of what we just got. */
700 (*__morecore
) (- newsize
* sizeof (malloc_info
));
704 /* Copy the old table to the beginning of the new,
705 and zero the rest of the new table. */
706 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
707 memset (&newinfo
[heapsize
], 0,
708 (newsize
- heapsize
) * sizeof (malloc_info
));
713 register_heapinfo ();
715 /* Reset _heaplimit so _free_internal never decides
716 it can relocate or resize the info table. */
718 _free_internal_nolock (oldinfo
);
719 PROTECT_MALLOC_STATE (0);
721 /* The new heap limit includes the new table just allocated. */
722 _heaplimit
= BLOCK ((char *) newinfo
+ heapsize
* sizeof (malloc_info
));
727 _heaplimit
= BLOCK ((char *) result
+ size
);
731 /* Allocate memory from the heap. */
733 _malloc_internal_nolock (size_t size
)
736 size_t block
, blocks
, lastblocks
, start
;
740 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
741 valid address you can realloc and free (though not dereference).
743 It turns out that some extant code (sunrpc, at least Ultrix's version)
744 expects `malloc (0)' to return non-NULL and breaks otherwise.
752 PROTECT_MALLOC_STATE (0);
754 if (size
< sizeof (struct list
))
755 size
= sizeof (struct list
);
757 /* Determine the allocation policy based on the request size. */
758 if (size
<= BLOCKSIZE
/ 2)
760 /* Small allocation to receive a fragment of a block.
761 Determine the logarithm to base two of the fragment size. */
762 register size_t log
= 1;
764 while ((size
/= 2) != 0)
767 /* Look in the fragment lists for a
768 free fragment of the desired size. */
769 next
= _fraghead
[log
].next
;
772 /* There are free fragments of this size.
773 Pop a fragment out of the fragment list and return it.
774 Update the block's nfree and first counters. */
776 next
->prev
->next
= next
->next
;
777 if (next
->next
!= NULL
)
778 next
->next
->prev
= next
->prev
;
779 block
= BLOCK (result
);
780 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
781 _heapinfo
[block
].busy
.info
.frag
.first
=
782 (uintptr_t) next
->next
% BLOCKSIZE
>> log
;
784 /* Update the statistics. */
786 _bytes_used
+= 1 << log
;
788 _bytes_free
-= 1 << log
;
792 /* No free fragments of the desired size, so get a new block
793 and break it into fragments, returning the first. */
794 #ifdef GC_MALLOC_CHECK
795 result
= _malloc_internal_nolock (BLOCKSIZE
);
796 PROTECT_MALLOC_STATE (0);
797 #elif defined (USE_PTHREAD)
798 result
= _malloc_internal_nolock (BLOCKSIZE
);
800 result
= malloc (BLOCKSIZE
);
804 PROTECT_MALLOC_STATE (1);
808 /* Link all fragments but the first into the free list. */
809 next
= (struct list
*) ((char *) result
+ (1 << log
));
811 next
->prev
= &_fraghead
[log
];
812 _fraghead
[log
].next
= next
;
814 for (i
= 2; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
816 next
= (struct list
*) ((char *) result
+ (i
<< log
));
817 next
->next
= _fraghead
[log
].next
;
818 next
->prev
= &_fraghead
[log
];
819 next
->prev
->next
= next
;
820 next
->next
->prev
= next
;
823 /* Initialize the nfree and first counters for this block. */
824 block
= BLOCK (result
);
825 _heapinfo
[block
].busy
.type
= log
;
826 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
827 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
829 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
830 _bytes_free
+= BLOCKSIZE
- (1 << log
);
831 _bytes_used
-= BLOCKSIZE
- (1 << log
);
836 /* Large allocation to receive one or more blocks.
837 Search the free list in a circle starting at the last place visited.
838 If we loop completely around without finding a large enough
839 space we will have to get more memory from the system. */
840 blocks
= BLOCKIFY (size
);
841 start
= block
= _heapindex
;
842 while (_heapinfo
[block
].free
.size
< blocks
)
844 block
= _heapinfo
[block
].free
.next
;
847 /* Need to get more from the system. Get a little extra. */
848 size_t wantblocks
= blocks
+ __malloc_extra_blocks
;
849 block
= _heapinfo
[0].free
.prev
;
850 lastblocks
= _heapinfo
[block
].free
.size
;
851 /* Check to see if the new core will be contiguous with the
852 final free block; if so we don't need to get as much. */
853 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
854 /* We can't do this if we will have to make the heap info
855 table bigger to accommodate the new space. */
856 block
+ wantblocks
<= heapsize
&&
857 get_contiguous_space ((wantblocks
- lastblocks
) * BLOCKSIZE
,
858 ADDRESS (block
+ lastblocks
)))
860 /* We got it contiguously. Which block we are extending
861 (the `final free block' referred to above) might have
862 changed, if it got combined with a freed info table. */
863 block
= _heapinfo
[0].free
.prev
;
864 _heapinfo
[block
].free
.size
+= (wantblocks
- lastblocks
);
865 _bytes_free
+= (wantblocks
- lastblocks
) * BLOCKSIZE
;
866 _heaplimit
+= wantblocks
- lastblocks
;
869 result
= morecore_nolock (wantblocks
* BLOCKSIZE
);
872 block
= BLOCK (result
);
873 /* Put the new block at the end of the free list. */
874 _heapinfo
[block
].free
.size
= wantblocks
;
875 _heapinfo
[block
].free
.prev
= _heapinfo
[0].free
.prev
;
876 _heapinfo
[block
].free
.next
= 0;
877 _heapinfo
[0].free
.prev
= block
;
878 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
= block
;
880 /* Now loop to use some of that block for this allocation. */
884 /* At this point we have found a suitable free list entry.
885 Figure out how to remove what we need from the list. */
886 result
= ADDRESS (block
);
887 if (_heapinfo
[block
].free
.size
> blocks
)
889 /* The block we found has a bit left over,
890 so relink the tail end back into the free list. */
891 _heapinfo
[block
+ blocks
].free
.size
892 = _heapinfo
[block
].free
.size
- blocks
;
893 _heapinfo
[block
+ blocks
].free
.next
894 = _heapinfo
[block
].free
.next
;
895 _heapinfo
[block
+ blocks
].free
.prev
896 = _heapinfo
[block
].free
.prev
;
897 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
898 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
899 = _heapindex
= block
+ blocks
;
903 /* The block exactly matches our requirements,
904 so just remove it from the list. */
905 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
906 = _heapinfo
[block
].free
.prev
;
907 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
908 = _heapindex
= _heapinfo
[block
].free
.next
;
912 _heapinfo
[block
].busy
.type
= 0;
913 _heapinfo
[block
].busy
.info
.size
= blocks
;
915 _bytes_used
+= blocks
* BLOCKSIZE
;
916 _bytes_free
-= blocks
* BLOCKSIZE
;
918 /* Mark all the blocks of the object just allocated except for the
919 first with a negative number so you can find the first block by
920 adding that adjustment. */
922 _heapinfo
[block
+ blocks
].busy
.info
.size
= -blocks
;
925 PROTECT_MALLOC_STATE (1);
931 _malloc_internal (size_t size
)
936 result
= _malloc_internal_nolock (size
);
945 void *(*hook
) (size_t);
947 if (!__malloc_initialized
&& !__malloc_initialize ())
950 /* Copy the value of __malloc_hook to an automatic variable in case
951 __malloc_hook is modified in another thread between its
952 NULL-check and the use.
954 Note: Strictly speaking, this is not a right solution. We should
955 use mutexes to access non-read-only variables that are shared
956 among multiple threads. We just leave it for compatibility with
957 glibc malloc (i.e., assignments to __malloc_hook) for now. */
958 hook
= __malloc_hook
;
959 return (hook
!= NULL
? *hook
: _malloc_internal
) (size
);
962 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
964 /* On some ANSI C systems, some libc functions call _malloc, _free
965 and _realloc. Make them use the GNU functions. */
967 extern void *_malloc (size_t);
968 extern void _free (void *);
969 extern void *_realloc (void *, size_t);
972 _malloc (size_t size
)
974 return malloc (size
);
984 _realloc (void *ptr
, size_t size
)
986 return realloc (ptr
, size
);
990 /* Free a block of memory allocated by `malloc'.
991 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
992 Written May 1989 by Mike Haertel.
994 This library is free software; you can redistribute it and/or
995 modify it under the terms of the GNU General Public License as
996 published by the Free Software Foundation; either version 2 of the
997 License, or (at your option) any later version.
999 This library is distributed in the hope that it will be useful,
1000 but WITHOUT ANY WARRANTY; without even the implied warranty of
1001 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1002 General Public License for more details.
1004 You should have received a copy of the GNU General Public
1005 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1007 The author may be reached (Email) at the address mike@ai.mit.edu,
1008 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1011 #ifndef HYBRID_MALLOC
1012 /* Debugging hook for free. */
1013 void (*__free_hook
) (void *__ptr
);
1015 /* List of blocks allocated by aligned_alloc. */
1016 struct alignlist
*_aligned_blocks
= NULL
;
1019 /* Return memory to the heap.
1020 Like `_free_internal' but don't lock mutex. */
1022 _free_internal_nolock (void *ptr
)
1025 size_t block
, blocks
;
1027 struct list
*prev
, *next
;
1029 const size_t lesscore_threshold
1030 /* Threshold of free space at which we will return some to the system. */
1031 = FINAL_FREE_BLOCKS
+ 2 * __malloc_extra_blocks
;
1033 register struct alignlist
*l
;
1038 PROTECT_MALLOC_STATE (0);
1040 LOCK_ALIGNED_BLOCKS ();
1041 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1042 if (l
->aligned
== ptr
)
1044 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1048 UNLOCK_ALIGNED_BLOCKS ();
1050 block
= BLOCK (ptr
);
1052 type
= _heapinfo
[block
].busy
.type
;
1056 /* Get as many statistics as early as we can. */
1058 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1059 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
1061 /* Find the free cluster previous to this one in the free list.
1062 Start searching at the last block referenced; this may benefit
1063 programs with locality of allocation. */
1067 i
= _heapinfo
[i
].free
.prev
;
1071 i
= _heapinfo
[i
].free
.next
;
1072 while (i
> 0 && i
< block
);
1073 i
= _heapinfo
[i
].free
.prev
;
1076 /* Determine how to link this block into the free list. */
1077 if (block
== i
+ _heapinfo
[i
].free
.size
)
1079 /* Coalesce this block with its predecessor. */
1080 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
1085 /* Really link this block back into the free list. */
1086 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
1087 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
1088 _heapinfo
[block
].free
.prev
= i
;
1089 _heapinfo
[i
].free
.next
= block
;
1090 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1094 /* Now that the block is linked in, see if we can coalesce it
1095 with its successor (by deleting its successor from the list
1096 and adding in its size). */
1097 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
1099 _heapinfo
[block
].free
.size
1100 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
1101 _heapinfo
[block
].free
.next
1102 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
1103 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
1107 /* How many trailing free blocks are there now? */
1108 blocks
= _heapinfo
[block
].free
.size
;
1110 /* Where is the current end of accessible core? */
1111 curbrk
= (*__morecore
) (0);
1113 if (_heaplimit
!= 0 && curbrk
== ADDRESS (_heaplimit
))
1115 /* The end of the malloc heap is at the end of accessible core.
1116 It's possible that moving _heapinfo will allow us to
1117 return some space to the system. */
1119 size_t info_block
= BLOCK (_heapinfo
);
1120 size_t info_blocks
= _heapinfo
[info_block
].busy
.info
.size
;
1121 size_t prev_block
= _heapinfo
[block
].free
.prev
;
1122 size_t prev_blocks
= _heapinfo
[prev_block
].free
.size
;
1123 size_t next_block
= _heapinfo
[block
].free
.next
;
1124 size_t next_blocks
= _heapinfo
[next_block
].free
.size
;
1126 if (/* Win if this block being freed is last in core, the info table
1127 is just before it, the previous free block is just before the
1128 info table, and the two free blocks together form a useful
1129 amount to return to the system. */
1130 (block
+ blocks
== _heaplimit
&&
1131 info_block
+ info_blocks
== block
&&
1132 prev_block
!= 0 && prev_block
+ prev_blocks
== info_block
&&
1133 blocks
+ prev_blocks
>= lesscore_threshold
) ||
1134 /* Nope, not the case. We can also win if this block being
1135 freed is just before the info table, and the table extends
1136 to the end of core or is followed only by a free block,
1137 and the total free space is worth returning to the system. */
1138 (block
+ blocks
== info_block
&&
1139 ((info_block
+ info_blocks
== _heaplimit
&&
1140 blocks
>= lesscore_threshold
) ||
1141 (info_block
+ info_blocks
== next_block
&&
1142 next_block
+ next_blocks
== _heaplimit
&&
1143 blocks
+ next_blocks
>= lesscore_threshold
)))
1146 malloc_info
*newinfo
;
1147 size_t oldlimit
= _heaplimit
;
1149 /* Free the old info table, clearing _heaplimit to avoid
1150 recursion into this code. We don't want to return the
1151 table's blocks to the system before we have copied them to
1152 the new location. */
1154 _free_internal_nolock (_heapinfo
);
1155 _heaplimit
= oldlimit
;
1157 /* Tell malloc to search from the beginning of the heap for
1158 free blocks, so it doesn't reuse the ones just freed. */
1161 /* Allocate new space for the info table and move its data. */
1162 newinfo
= _malloc_internal_nolock (info_blocks
* BLOCKSIZE
);
1163 PROTECT_MALLOC_STATE (0);
1164 memmove (newinfo
, _heapinfo
, info_blocks
* BLOCKSIZE
);
1165 _heapinfo
= newinfo
;
1167 /* We should now have coalesced the free block with the
1168 blocks freed from the old info table. Examine the entire
1169 trailing free block to decide below whether to return some
1171 block
= _heapinfo
[0].free
.prev
;
1172 blocks
= _heapinfo
[block
].free
.size
;
1175 /* Now see if we can return stuff to the system. */
1176 if (block
+ blocks
== _heaplimit
&& blocks
>= lesscore_threshold
)
1178 register size_t bytes
= blocks
* BLOCKSIZE
;
1179 _heaplimit
-= blocks
;
1180 (*__morecore
) (-bytes
);
1181 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
1182 = _heapinfo
[block
].free
.next
;
1183 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
1184 = _heapinfo
[block
].free
.prev
;
1185 block
= _heapinfo
[block
].free
.prev
;
1187 _bytes_free
-= bytes
;
1191 /* Set the next search to begin at this block. */
1196 /* Do some of the statistics. */
1198 _bytes_used
-= 1 << type
;
1200 _bytes_free
+= 1 << type
;
1202 /* Get the address of the first free fragment in this block. */
1203 prev
= (struct list
*) ((char *) ADDRESS (block
) +
1204 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
1206 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
1208 /* If all fragments of this block are free, remove them
1209 from the fragment list and free the whole block. */
1211 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
1213 prev
->prev
->next
= next
;
1215 next
->prev
= prev
->prev
;
1216 _heapinfo
[block
].busy
.type
= 0;
1217 _heapinfo
[block
].busy
.info
.size
= 1;
1219 /* Keep the statistics accurate. */
1221 _bytes_used
+= BLOCKSIZE
;
1222 _chunks_free
-= BLOCKSIZE
>> type
;
1223 _bytes_free
-= BLOCKSIZE
;
1225 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1226 _free_internal_nolock (ADDRESS (block
));
1228 free (ADDRESS (block
));
1231 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
1233 /* If some fragments of this block are free, link this
1234 fragment into the fragment list after the first free
1235 fragment of this block. */
1237 next
->next
= prev
->next
;
1240 if (next
->next
!= NULL
)
1241 next
->next
->prev
= next
;
1242 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
1246 /* No fragments of this block are free, so link this
1247 fragment into the fragment list and announce that
1248 it is the first free fragment of this block. */
1250 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
1251 _heapinfo
[block
].busy
.info
.frag
.first
=
1252 (uintptr_t) ptr
% BLOCKSIZE
>> type
;
1253 prev
->next
= _fraghead
[type
].next
;
1254 prev
->prev
= &_fraghead
[type
];
1255 prev
->prev
->next
= prev
;
1256 if (prev
->next
!= NULL
)
1257 prev
->next
->prev
= prev
;
1262 PROTECT_MALLOC_STATE (1);
1265 /* Return memory to the heap.
1266 Like `free' but don't call a __free_hook if there is one. */
1268 _free_internal (void *ptr
)
1271 _free_internal_nolock (ptr
);
1275 /* Return memory to the heap. */
1280 void (*hook
) (void *) = __free_hook
;
1285 _free_internal (ptr
);
1288 #ifndef HYBRID_MALLOC
1289 /* Define the `cfree' alias for `free'. */
1291 weak_alias (free
, cfree
)
1300 /* Change the size of a block allocated by `malloc'.
1301 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1302 Written May 1989 by Mike Haertel.
1304 This library is free software; you can redistribute it and/or
1305 modify it under the terms of the GNU General Public License as
1306 published by the Free Software Foundation; either version 2 of the
1307 License, or (at your option) any later version.
1309 This library is distributed in the hope that it will be useful,
1310 but WITHOUT ANY WARRANTY; without even the implied warranty of
1311 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1312 General Public License for more details.
1314 You should have received a copy of the GNU General Public
1315 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1317 The author may be reached (Email) at the address mike@ai.mit.edu,
1318 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1321 #define min(a, b) ((a) < (b) ? (a) : (b))
1324 #ifndef HYBRID_MALLOC
1325 /* Debugging hook for realloc. */
1326 void *(*__realloc_hook
) (void *ptr
, size_t size
);
1329 /* Resize the given region to the new size, returning a pointer
1330 to the (possibly moved) region. This is optimized for speed;
1331 some benchmarks seem to indicate that greater compactness is
1332 achieved by unconditionally allocating and copying to a
1333 new region. This module has incestuous knowledge of the
1334 internals of both free and malloc. */
1336 _realloc_internal_nolock (void *ptr
, size_t size
)
1340 size_t block
, blocks
, oldlimit
;
1344 _free_internal_nolock (ptr
);
1345 return _malloc_internal_nolock (0);
1347 else if (ptr
== NULL
)
1348 return _malloc_internal_nolock (size
);
1350 block
= BLOCK (ptr
);
1352 PROTECT_MALLOC_STATE (0);
1354 type
= _heapinfo
[block
].busy
.type
;
1358 /* Maybe reallocate a large block to a small fragment. */
1359 if (size
<= BLOCKSIZE
/ 2)
1361 result
= _malloc_internal_nolock (size
);
1364 memcpy (result
, ptr
, size
);
1365 _free_internal_nolock (ptr
);
1370 /* The new size is a large allocation as well;
1371 see if we can hold it in place. */
1372 blocks
= BLOCKIFY (size
);
1373 if (blocks
< _heapinfo
[block
].busy
.info
.size
)
1375 /* The new size is smaller; return
1376 excess memory to the free list. */
1377 _heapinfo
[block
+ blocks
].busy
.type
= 0;
1378 _heapinfo
[block
+ blocks
].busy
.info
.size
1379 = _heapinfo
[block
].busy
.info
.size
- blocks
;
1380 _heapinfo
[block
].busy
.info
.size
= blocks
;
1381 /* We have just created a new chunk by splitting a chunk in two.
1382 Now we will free this chunk; increment the statistics counter
1383 so it doesn't become wrong when _free_internal decrements it. */
1385 _free_internal_nolock (ADDRESS (block
+ blocks
));
1388 else if (blocks
== _heapinfo
[block
].busy
.info
.size
)
1389 /* No size change necessary. */
1393 /* Won't fit, so allocate a new region that will.
1394 Free the old region first in case there is sufficient
1395 adjacent free space to grow without moving. */
1396 blocks
= _heapinfo
[block
].busy
.info
.size
;
1397 /* Prevent free from actually returning memory to the system. */
1398 oldlimit
= _heaplimit
;
1400 _free_internal_nolock (ptr
);
1401 result
= _malloc_internal_nolock (size
);
1402 PROTECT_MALLOC_STATE (0);
1403 if (_heaplimit
== 0)
1404 _heaplimit
= oldlimit
;
1407 /* Now we're really in trouble. We have to unfree
1408 the thing we just freed. Unfortunately it might
1409 have been coalesced with its neighbors. */
1410 if (_heapindex
== block
)
1411 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1415 = _malloc_internal_nolock ((block
- _heapindex
) * BLOCKSIZE
);
1416 (void) _malloc_internal_nolock (blocks
* BLOCKSIZE
);
1417 _free_internal_nolock (previous
);
1422 memmove (result
, ptr
, blocks
* BLOCKSIZE
);
1427 /* Old size is a fragment; type is logarithm
1428 to base two of the fragment size. */
1429 if (size
> (size_t) (1 << (type
- 1)) &&
1430 size
<= (size_t) (1 << type
))
1431 /* The new size is the same kind of fragment. */
1435 /* The new size is different; allocate a new space,
1436 and copy the lesser of the new size and the old. */
1437 result
= _malloc_internal_nolock (size
);
1440 memcpy (result
, ptr
, min (size
, (size_t) 1 << type
));
1441 _free_internal_nolock (ptr
);
1446 PROTECT_MALLOC_STATE (1);
1452 _realloc_internal (void *ptr
, size_t size
)
1457 result
= _realloc_internal_nolock (ptr
, size
);
1464 realloc (void *ptr
, size_t size
)
1466 void *(*hook
) (void *, size_t);
1468 if (!__malloc_initialized
&& !__malloc_initialize ())
1471 hook
= __realloc_hook
;
1472 return (hook
!= NULL
? *hook
: _realloc_internal
) (ptr
, size
);
1474 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1476 This library is free software; you can redistribute it and/or
1477 modify it under the terms of the GNU General Public License as
1478 published by the Free Software Foundation; either version 2 of the
1479 License, or (at your option) any later version.
1481 This library is distributed in the hope that it will be useful,
1482 but WITHOUT ANY WARRANTY; without even the implied warranty of
1483 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1484 General Public License for more details.
1486 You should have received a copy of the GNU General Public
1487 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1489 The author may be reached (Email) at the address mike@ai.mit.edu,
1490 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1492 /* Allocate an array of NMEMB elements each SIZE bytes long.
1493 The entire array is initialized to zeros. */
1495 calloc (size_t nmemb
, size_t size
)
1498 size_t bytes
= nmemb
* size
;
1500 if (size
!= 0 && bytes
/ size
!= nmemb
)
1506 result
= malloc (bytes
);
1508 return memset (result
, 0, bytes
);
1511 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
1512 This file is part of the GNU C Library.
1514 The GNU C Library is free software; you can redistribute it and/or modify
1515 it under the terms of the GNU General Public License as published by
1516 the Free Software Foundation; either version 2, or (at your option)
1519 The GNU C Library is distributed in the hope that it will be useful,
1520 but WITHOUT ANY WARRANTY; without even the implied warranty of
1521 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1522 GNU General Public License for more details.
1524 You should have received a copy of the GNU General Public License
1525 along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */
1527 /* uClibc defines __GNU_LIBRARY__, but it is not completely
1529 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1531 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1532 /* It is best not to declare this and cast its result on foreign operating
1533 systems with potentially hostile include files. */
1535 extern void *__sbrk (ptrdiff_t increment
);
1536 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
1538 /* Allocate INCREMENT more bytes of data space,
1539 and return the start of data space, or NULL on errors.
1540 If INCREMENT is negative, shrink data space. */
1542 __default_morecore (ptrdiff_t increment
)
1545 #ifdef HYBRID_MALLOC
1548 return bss_sbrk (increment
);
1551 result
= (void *) __sbrk (increment
);
1552 if (result
== (void *) -1)
1556 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
1558 This library is free software; you can redistribute it and/or
1559 modify it under the terms of the GNU General Public License as
1560 published by the Free Software Foundation; either version 2 of the
1561 License, or (at your option) any later version.
1563 This library is distributed in the hope that it will be useful,
1564 but WITHOUT ANY WARRANTY; without even the implied warranty of
1565 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1566 General Public License for more details.
1568 You should have received a copy of the GNU General Public
1569 License along with this library. If not, see <http://www.gnu.org/licenses/>. */
1571 #ifndef HYBRID_MALLOC
1572 void *(*__memalign_hook
) (size_t size
, size_t alignment
);
1576 aligned_alloc (size_t alignment
, size_t size
)
1579 size_t adj
, lastadj
;
1580 void *(*hook
) (size_t, size_t) = __memalign_hook
;
1583 return (*hook
) (alignment
, size
);
1585 /* Allocate a block with enough extra space to pad the block with up to
1586 (ALIGNMENT - 1) bytes if necessary. */
1587 if (- size
< alignment
)
1592 result
= malloc (size
+ alignment
- 1);
1596 /* Figure out how much we will need to pad this particular block
1597 to achieve the required alignment. */
1598 adj
= alignment
- (uintptr_t) result
% alignment
;
1599 if (adj
== alignment
)
1602 if (adj
!= alignment
- 1)
1606 /* Reallocate the block with only as much excess as it
1609 result
= malloc (size
+ adj
);
1610 if (result
== NULL
) /* Impossible unless interrupted. */
1614 adj
= alignment
- (uintptr_t) result
% alignment
;
1615 if (adj
== alignment
)
1617 /* It's conceivable we might have been so unlucky as to get
1618 a different block with weaker alignment. If so, this
1619 block is too short to contain SIZE after alignment
1620 correction. So we must try again and get another block,
1622 } while (adj
> lastadj
);
1627 /* Record this block in the list of aligned blocks, so that `free'
1628 can identify the pointer it is passed, which will be in the middle
1629 of an allocated block. */
1631 struct alignlist
*l
;
1632 LOCK_ALIGNED_BLOCKS ();
1633 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1634 if (l
->aligned
== NULL
)
1635 /* This slot is free. Use it. */
1639 l
= malloc (sizeof *l
);
1642 l
->next
= _aligned_blocks
;
1643 _aligned_blocks
= l
;
1649 result
= l
->aligned
= (char *) result
+ adj
;
1651 UNLOCK_ALIGNED_BLOCKS ();
1662 /* Note that memalign and posix_memalign are not used in Emacs. */
1663 #ifndef HYBRID_MALLOC
1664 /* An obsolete alias for aligned_alloc, for any old libraries that use
1668 memalign (size_t alignment
, size_t size
)
1670 return aligned_alloc (alignment
, size
);
1673 /* If HYBRID_MALLOC is defined, we may want to use the system
1674 posix_memalign below. */
1676 posix_memalign (void **memptr
, size_t alignment
, size_t size
)
1681 || alignment
% sizeof (void *) != 0
1682 || (alignment
& (alignment
- 1)) != 0)
1685 mem
= aligned_alloc (alignment
, size
);
1695 /* Allocate memory on a page boundary.
1696 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
1698 This library is free software; you can redistribute it and/or
1699 modify it under the terms of the GNU General Public License as
1700 published by the Free Software Foundation; either version 2 of the
1701 License, or (at your option) any later version.
1703 This library is distributed in the hope that it will be useful,
1704 but WITHOUT ANY WARRANTY; without even the implied warranty of
1705 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1706 General Public License for more details.
1708 You should have received a copy of the GNU General Public
1709 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1711 The author may be reached (Email) at the address mike@ai.mit.edu,
1712 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1714 #ifndef HYBRID_MALLOC
1715 /* Allocate SIZE bytes on a page boundary. */
1716 extern void *valloc (size_t);
1718 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1719 # include "getpagesize.h"
1720 #elif !defined getpagesize
1721 extern int getpagesize (void);
1724 static size_t pagesize
;
1727 valloc (size_t size
)
1730 pagesize
= getpagesize ();
1732 return aligned_alloc (pagesize
, size
);
1734 #endif /* HYBRID_MALLOC */
1739 #undef aligned_alloc
1742 #ifdef HYBRID_MALLOC
1743 /* Declare system malloc and friends. */
1744 extern void *malloc (size_t size
);
1745 extern void *realloc (void *ptr
, size_t size
);
1746 extern void *calloc (size_t nmemb
, size_t size
);
1747 extern void free (void *ptr
);
1748 #ifdef HAVE_ALIGNED_ALLOC
1749 extern void *aligned_alloc (size_t alignment
, size_t size
);
1750 #elif defined HAVE_POSIX_MEMALIGN
1751 extern int posix_memalign (void **memptr
, size_t alignment
, size_t size
);
1754 /* See the comments near the beginning of this file for explanations
1755 of the following functions. */
1758 hybrid_malloc (size_t size
)
1761 return malloc (size
);
1762 return gmalloc (size
);
1766 hybrid_calloc (size_t nmemb
, size_t size
)
1769 return calloc (nmemb
, size
);
1770 return gcalloc (nmemb
, size
);
1774 hybrid_free (void *ptr
)
1778 else if (!ALLOCATED_BEFORE_DUMPING (ptr
))
1780 /* Otherwise the dumped emacs is trying to free something allocated
1781 before dumping; do nothing. */
1785 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1787 hybrid_aligned_alloc (size_t alignment
, size_t size
)
1790 return galigned_alloc (alignment
, size
);
1791 /* The following is copied from alloc.c */
1792 #ifdef HAVE_ALIGNED_ALLOC
1793 return aligned_alloc (alignment
, size
);
1794 #else /* HAVE_POSIX_MEMALIGN */
1796 return posix_memalign (&p
, alignment
, size
) == 0 ? p
: 0;
1802 hybrid_realloc (void *ptr
, size_t size
)
1806 size_t block
, oldsize
;
1809 return grealloc (ptr
, size
);
1810 if (!ALLOCATED_BEFORE_DUMPING (ptr
))
1811 return realloc (ptr
, size
);
1813 /* The dumped emacs is trying to realloc storage allocated before
1814 dumping. We just malloc new space and copy the data. */
1815 if (size
== 0 || ptr
== NULL
)
1816 return malloc (size
);
1817 block
= ((char *) ptr
- _heapbase
) / BLOCKSIZE
+ 1;
1818 type
= _heapinfo
[block
].busy
.type
;
1820 type
== 0 ? _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
1821 : (size_t) 1 << type
;
1822 result
= malloc (size
);
1824 return memcpy (result
, ptr
, min (oldsize
, size
));
1828 #ifdef HYBRID_GET_CURRENT_DIR_NAME
1829 /* Defined in sysdep.c. */
1830 char *gget_current_dir_name (void);
1833 hybrid_get_current_dir_name (void)
1836 return get_current_dir_name ();
1837 return gget_current_dir_name ();
1841 #else /* ! HYBRID_MALLOC */
1844 malloc (size_t size
)
1846 return gmalloc (size
);
1850 calloc (size_t nmemb
, size_t size
)
1852 return gcalloc (nmemb
, size
);
1862 aligned_alloc (size_t alignment
, size_t size
)
1864 return galigned_alloc (alignment
, size
);
1868 realloc (void *ptr
, size_t size
)
1870 return grealloc (ptr
, size
);
1873 #endif /* HYBRID_MALLOC */
1877 /* Standard debugging hooks for `malloc'.
1878 Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
1879 Written May 1989 by Mike Haertel.
1881 This library is free software; you can redistribute it and/or
1882 modify it under the terms of the GNU General Public License as
1883 published by the Free Software Foundation; either version 2 of the
1884 License, or (at your option) any later version.
1886 This library is distributed in the hope that it will be useful,
1887 but WITHOUT ANY WARRANTY; without even the implied warranty of
1888 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1889 General Public License for more details.
1891 You should have received a copy of the GNU General Public
1892 License along with this library. If not, see <http://www.gnu.org/licenses/>.
1894 The author may be reached (Email) at the address mike@ai.mit.edu,
1895 or (US mail) as Mike Haertel c/o Free Software Foundation. */
1899 /* Old hook values. */
1900 static void (*old_free_hook
) (void *ptr
);
1901 static void *(*old_malloc_hook
) (size_t size
);
1902 static void *(*old_realloc_hook
) (void *ptr
, size_t size
);
1904 /* Function to call when something awful happens. */
1905 static void (*abortfunc
) (enum mcheck_status
);
1907 /* Arbitrary magical numbers. */
1908 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1909 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1910 #define MAGICBYTE ((char) 0xd7)
1911 #define MALLOCFLOOD ((char) 0x93)
1912 #define FREEFLOOD ((char) 0x95)
1916 size_t size
; /* Exact size requested by user. */
1917 size_t magic
; /* Magic number to check header integrity. */
1920 static enum mcheck_status
1921 checkhdr (const struct hdr
*hdr
)
1923 enum mcheck_status status
;
1927 status
= MCHECK_HEAD
;
1930 status
= MCHECK_FREE
;
1933 if (((char *) &hdr
[1])[hdr
->size
] != MAGICBYTE
)
1934 status
= MCHECK_TAIL
;
1939 if (status
!= MCHECK_OK
)
1940 (*abortfunc
) (status
);
1945 freehook (void *ptr
)
1951 struct alignlist
*l
;
1953 /* If the block was allocated by aligned_alloc, its real pointer
1954 to free is recorded in _aligned_blocks; find that. */
1955 PROTECT_MALLOC_STATE (0);
1956 LOCK_ALIGNED_BLOCKS ();
1957 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
1958 if (l
->aligned
== ptr
)
1960 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
1964 UNLOCK_ALIGNED_BLOCKS ();
1965 PROTECT_MALLOC_STATE (1);
1967 hdr
= ((struct hdr
*) ptr
) - 1;
1969 hdr
->magic
= MAGICFREE
;
1970 memset (ptr
, FREEFLOOD
, hdr
->size
);
1975 __free_hook
= old_free_hook
;
1977 __free_hook
= freehook
;
1981 mallochook (size_t size
)
1985 __malloc_hook
= old_malloc_hook
;
1986 hdr
= malloc (sizeof *hdr
+ size
+ 1);
1987 __malloc_hook
= mallochook
;
1992 hdr
->magic
= MAGICWORD
;
1993 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
1994 return memset (hdr
+ 1, MALLOCFLOOD
, size
);
1998 reallochook (void *ptr
, size_t size
)
2000 struct hdr
*hdr
= NULL
;
2005 hdr
= ((struct hdr
*) ptr
) - 1;
2010 memset ((char *) ptr
+ size
, FREEFLOOD
, osize
- size
);
2013 __free_hook
= old_free_hook
;
2014 __malloc_hook
= old_malloc_hook
;
2015 __realloc_hook
= old_realloc_hook
;
2016 hdr
= realloc (hdr
, sizeof *hdr
+ size
+ 1);
2017 __free_hook
= freehook
;
2018 __malloc_hook
= mallochook
;
2019 __realloc_hook
= reallochook
;
2024 hdr
->magic
= MAGICWORD
;
2025 ((char *) &hdr
[1])[size
] = MAGICBYTE
;
2027 memset ((char *) (hdr
+ 1) + osize
, MALLOCFLOOD
, size
- osize
);
2032 mabort (enum mcheck_status status
)
2038 msg
= "memory is consistent, library is buggy";
2041 msg
= "memory clobbered before allocated block";
2044 msg
= "memory clobbered past end of allocated block";
2047 msg
= "block freed twice";
2050 msg
= "bogus mcheck_status, library is buggy";
2053 #ifdef __GNU_LIBRARY__
2056 fprintf (stderr
, "mcheck: %s\n", msg
);
2066 static int mcheck_used
= 0;
2069 mcheck (void (*func
) (enum mcheck_status
))
2071 abortfunc
= (func
!= NULL
) ? func
: &mabort
;
2073 /* These hooks may not be safely inserted if malloc is already in use. */
2074 if (!__malloc_initialized
&& !mcheck_used
)
2076 old_free_hook
= __free_hook
;
2077 __free_hook
= freehook
;
2078 old_malloc_hook
= __malloc_hook
;
2079 __malloc_hook
= mallochook
;
2080 old_realloc_hook
= __realloc_hook
;
2081 __realloc_hook
= reallochook
;
2085 return mcheck_used
? 0 : -1;
2091 return mcheck_used
? checkhdr (ptr
) : MCHECK_DISABLED
;
2094 #endif /* GC_MCHECK */