1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
4 This file is part of GNU Emacs.
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
29 #include "lisp.h" /* Needed for VALBITS. */
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
38 typedef void *POINTER
;
45 typedef char *POINTER
;
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER
;
53 typedef unsigned long SIZE
;
55 /* Declared in dispnew.c, this version doesn't screw up if regions
57 extern void safe_bcopy ();
59 extern int __malloc_extra_blocks
;
66 typedef void *POINTER
;
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
73 #define bzero(x, len) memset (x, 0, len)
75 #endif /* not emacs */
77 #include "getpagesize.h"
79 #define NIL ((POINTER) 0)
81 /* A flag to indicate whether we have initialized ralloc yet. For
82 Emacs's sake, please do not make this local to malloc_init; on some
83 machines, the dumping procedure makes all static variables
84 read-only. On these machines, the word static is #defined to be
85 the empty string, meaning that r_alloc_initialized becomes an
86 automatic variable, and loses its value each time Emacs is started up. */
87 static int r_alloc_initialized
= 0;
89 static void r_alloc_init ();
91 /* Declarations for working with the malloc, ralloc, and system breaks. */
93 /* Function to set the real break value. */
94 static POINTER (*real_morecore
) ();
96 /* The break value, as seen by malloc. */
97 static POINTER virtual_break_value
;
99 /* The address of the end of the last data in use by ralloc,
100 including relocatable blocs as well as malloc data. */
101 static POINTER break_value
;
103 /* This is the size of a page. We round memory requests to this boundary. */
104 static int page_size
;
106 /* Whenever we get memory from the system, get this many extra bytes. This
107 must be a multiple of page_size. */
108 static int extra_bytes
;
110 /* Macros for rounding. Note that rounding to any value is possible
111 by changing the definition of PAGE. */
112 #define PAGE (getpagesize ())
113 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
114 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
116 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
118 #define MEM_ALIGN sizeof(double)
119 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
122 /* Data structures of heaps and blocs. */
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get continguous
135 space to add onto the heap. When that happens, we start a new heap. */
141 /* Start of memory range of this heap. */
143 /* End of memory range of this heap. */
145 /* Start of relocatable data in this heap. */
147 /* Start of unused space in this heap. */
149 /* First bloc in this heap. */
150 struct bp
*first_bloc
;
151 /* Last bloc in this heap. */
152 struct bp
*last_bloc
;
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base
;
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap
, last_heap
;
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data. */
177 POINTER new_data
; /* tmporarily used for relocation */
178 /* Heap this bloc is in. */
182 #define NIL_BLOC ((bloc_ptr) 0)
183 #define BLOC_PTR_SIZE (sizeof (struct bp))
185 /* Head and tail of the list of relocatable blocs. */
186 static bloc_ptr first_bloc
, last_bloc
;
189 /* Functions to get and return memory from the system. */
191 /* Find the heap that ADDRESS falls within. */
199 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
201 if (heap
->start
<= address
&& address
<= heap
->end
)
208 /* Find SIZE bytes of space in a heap.
209 Try to get them at ADDRESS (which must fall within some heap's range)
210 if we can get that many within one heap.
212 If enough space is not presently available in our reserve, this means
213 getting more page-aligned space from the system. If the retuned space
214 is not contiguos to the last heap, allocate a new heap, and append it
216 obtain does not try to keep track of whether space is in use
217 or not in use. It just returns the address of SIZE bytes that
218 fall within a single heap. If you call obtain twice in a row
219 with the same arguments, you typically get the same value.
220 to the heap list. It's the caller's responsibility to keep
221 track of what space is in use.
223 Return the address of the space if all went well, or zero if we couldn't
224 allocate the memory. */
227 obtain (address
, size
)
232 SIZE already_available
;
234 /* Find the heap that ADDRESS falls within. */
235 for (heap
= last_heap
; heap
; heap
= heap
->prev
)
237 if (heap
->start
<= address
&& address
<= heap
->end
)
244 /* If we can't fit SIZE bytes in that heap,
245 try successive later heaps. */
246 while (heap
&& address
+ size
> heap
->end
)
249 if (heap
== NIL_HEAP
)
251 address
= heap
->bloc_start
;
254 /* If we can't fit them within any existing heap,
256 if (heap
== NIL_HEAP
)
258 POINTER
new = (*real_morecore
)(0);
261 already_available
= (char *)last_heap
->end
- (char *)address
;
263 if (new != last_heap
->end
)
265 /* Someone else called sbrk. Make a new heap. */
267 heap_ptr new_heap
= (heap_ptr
) MEM_ROUNDUP (new);
268 POINTER bloc_start
= (POINTER
) MEM_ROUNDUP ((POINTER
)(new_heap
+ 1));
270 if ((*real_morecore
) (bloc_start
- new) != new)
273 new_heap
->start
= new;
274 new_heap
->end
= bloc_start
;
275 new_heap
->bloc_start
= bloc_start
;
276 new_heap
->free
= bloc_start
;
277 new_heap
->next
= NIL_HEAP
;
278 new_heap
->prev
= last_heap
;
279 new_heap
->first_bloc
= NIL_BLOC
;
280 new_heap
->last_bloc
= NIL_BLOC
;
281 last_heap
->next
= new_heap
;
282 last_heap
= new_heap
;
284 address
= bloc_start
;
285 already_available
= 0;
288 /* Add space to the last heap (which we may have just created).
289 Get some extra, so we can come here less often. */
291 get
= size
+ extra_bytes
- already_available
;
292 get
= (char *) ROUNDUP ((char *)last_heap
->end
+ get
)
293 - (char *) last_heap
->end
;
295 if ((*real_morecore
) (get
) != last_heap
->end
)
298 last_heap
->end
+= get
;
304 /* Return unused heap space to the system
305 if there is a lot of unused space now.
306 This can make the last heap smaller;
307 it can also eliminate the last heap entirely. */
315 /* Add the amount of space beyond break_value
316 in all heaps which have extend beyond break_value at all. */
318 for (h
= last_heap
; h
&& break_value
< h
->end
; h
= h
->prev
)
320 excess
+= (char *) h
->end
- (char *) ((break_value
< h
->bloc_start
)
321 ? h
->bloc_start
: break_value
);
324 if (excess
> extra_bytes
* 2 && (*real_morecore
) (0) == last_heap
->end
)
326 /* Keep extra_bytes worth of empty space.
327 And don't free anything unless we can free at least extra_bytes. */
328 excess
-= extra_bytes
;
330 if ((char *)last_heap
->end
- (char *)last_heap
->bloc_start
<= excess
)
332 /* This heap should have no blocs in it. */
333 if (last_heap
->first_bloc
!= NIL_BLOC
334 || last_heap
->last_bloc
!= NIL_BLOC
)
337 /* Return the last heap, with its header, to the system. */
338 excess
= (char *)last_heap
->end
- (char *)last_heap
->start
;
339 last_heap
= last_heap
->prev
;
340 last_heap
->next
= NIL_HEAP
;
344 excess
= (char *) last_heap
->end
345 - (char *) ROUNDUP ((char *)last_heap
->end
- excess
);
346 last_heap
->end
-= excess
;
349 if ((*real_morecore
) (- excess
) == 0)
354 /* Return the total size in use by relocating allocator,
355 above where malloc gets space. */
358 r_alloc_size_in_use ()
360 return break_value
- virtual_break_value
;
363 /* The meat - allocating, freeing, and relocating blocs. */
365 /* Find the bloc referenced by the address in PTR. Returns a pointer
372 register bloc_ptr p
= first_bloc
;
374 while (p
!= NIL_BLOC
)
376 if (p
->variable
== ptr
&& p
->data
== *ptr
)
385 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
386 Returns a pointer to the new bloc, or zero if we couldn't allocate
387 memory for the new block. */
393 register bloc_ptr new_bloc
;
394 register heap_ptr heap
;
396 if (! (new_bloc
= (bloc_ptr
) malloc (BLOC_PTR_SIZE
))
397 || ! (new_bloc
->data
= obtain (break_value
, size
)))
405 break_value
= new_bloc
->data
+ size
;
407 new_bloc
->size
= size
;
408 new_bloc
->next
= NIL_BLOC
;
409 new_bloc
->variable
= (POINTER
*) NIL
;
410 new_bloc
->new_data
= 0;
412 /* Record in the heap that this space is in use. */
413 heap
= find_heap (new_bloc
->data
);
414 heap
->free
= break_value
;
416 /* Maintain the correspondence between heaps and blocs. */
417 new_bloc
->heap
= heap
;
418 heap
->last_bloc
= new_bloc
;
419 if (heap
->first_bloc
== NIL_BLOC
)
420 heap
->first_bloc
= new_bloc
;
422 /* Put this bloc on the doubly-linked list of blocs. */
425 new_bloc
->prev
= last_bloc
;
426 last_bloc
->next
= new_bloc
;
427 last_bloc
= new_bloc
;
431 first_bloc
= last_bloc
= new_bloc
;
432 new_bloc
->prev
= NIL_BLOC
;
438 /* Calculate new locations of blocs in the list beginning with BLOC,
439 relocating it to start at ADDRESS, in heap HEAP. If enough space is
440 not presently available in our reserve, call obtain for
443 Store the new location of each bloc in its new_data field.
444 Do not touch the contents of blocs or break_value. */
447 relocate_blocs (bloc
, heap
, address
)
452 register bloc_ptr b
= bloc
;
456 /* If bloc B won't fit within HEAP,
457 move to the next heap and try again. */
458 while (heap
&& address
+ b
->size
> heap
->end
)
461 if (heap
== NIL_HEAP
)
463 address
= heap
->bloc_start
;
466 /* If BLOC won't fit in any heap,
467 get enough new space to hold BLOC and all following blocs. */
468 if (heap
== NIL_HEAP
)
470 register bloc_ptr tb
= b
;
473 /* Add up the size of all the following blocs. */
474 while (tb
!= NIL_BLOC
)
480 /* Get that space. */
481 address
= obtain (address
, s
);
488 /* Record the new address of this bloc
489 and update where the next bloc can start. */
490 b
->new_data
= address
;
498 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
499 This is necessary if we put the memory of space of BLOC
500 before that of BEFORE. */
503 reorder_bloc (bloc
, before
)
504 bloc_ptr bloc
, before
;
508 /* Splice BLOC out from where it is. */
517 /* Splice it in before BEFORE. */
528 /* Update the records of which heaps contain which blocs, starting
529 with heap HEAP and bloc BLOC. */
532 update_heap_bloc_correspondence (bloc
, heap
)
538 /* Initialize HEAP's status to reflect blocs before BLOC. */
539 if (bloc
!= NIL_BLOC
&& bloc
->prev
!= NIL_BLOC
&& bloc
->prev
->heap
== heap
)
541 /* The previous bloc is in HEAP. */
542 heap
->last_bloc
= bloc
->prev
;
543 heap
->free
= bloc
->prev
->data
+ bloc
->prev
->size
;
547 /* HEAP contains no blocs before BLOC. */
548 heap
->first_bloc
= NIL_BLOC
;
549 heap
->last_bloc
= NIL_BLOC
;
550 heap
->free
= heap
->bloc_start
;
553 /* Advance through blocs one by one. */
554 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
556 /* Advance through heaps, marking them empty,
557 till we get to the one that B is in. */
560 if (heap
->bloc_start
<= b
->data
&& b
->data
<= heap
->end
)
563 /* We know HEAP is not null now,
564 because there has to be space for bloc B. */
565 heap
->first_bloc
= NIL_BLOC
;
566 heap
->last_bloc
= NIL_BLOC
;
567 heap
->free
= heap
->bloc_start
;
570 /* Update HEAP's status for bloc B. */
571 heap
->free
= b
->data
+ b
->size
;
573 if (heap
->first_bloc
== NIL_BLOC
)
574 heap
->first_bloc
= b
;
576 /* Record that B is in HEAP. */
580 /* If there are any remaining heaps and no blocs left,
581 mark those heaps as empty. */
585 heap
->first_bloc
= NIL_BLOC
;
586 heap
->last_bloc
= NIL_BLOC
;
587 heap
->free
= heap
->bloc_start
;
592 /* Resize BLOC to SIZE bytes. This relocates the blocs
593 that come after BLOC in memory. */
596 resize_bloc (bloc
, size
)
605 if (bloc
== NIL_BLOC
|| size
== bloc
->size
)
608 for (heap
= first_heap
; heap
!= NIL_HEAP
; heap
= heap
->next
)
610 if (heap
->bloc_start
<= bloc
->data
&& bloc
->data
<= heap
->end
)
614 if (heap
== NIL_HEAP
)
617 old_size
= bloc
->size
;
620 /* Note that bloc could be moved into the previous heap. */
621 address
= (bloc
->prev
? bloc
->prev
->data
+ bloc
->prev
->size
622 : first_heap
->bloc_start
);
625 if (heap
->bloc_start
<= address
&& address
<= heap
->end
)
630 if (! relocate_blocs (bloc
, heap
, address
))
632 bloc
->size
= old_size
;
638 for (b
= last_bloc
; b
!= bloc
; b
= b
->prev
)
640 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
641 *b
->variable
= b
->data
= b
->new_data
;
643 safe_bcopy (bloc
->data
, bloc
->new_data
, old_size
);
644 bzero (bloc
->new_data
+ old_size
, size
- old_size
);
645 *bloc
->variable
= bloc
->data
= bloc
->new_data
;
649 for (b
= bloc
; b
!= NIL_BLOC
; b
= b
->next
)
651 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
652 *b
->variable
= b
->data
= b
->new_data
;
656 update_heap_bloc_correspondence (bloc
, heap
);
658 break_value
= (last_bloc
? last_bloc
->data
+ last_bloc
->size
659 : first_heap
->bloc_start
);
663 /* Free BLOC from the chain of blocs, relocating any blocs above it.
664 This may return space to the system. */
670 heap_ptr heap
= bloc
->heap
;
672 resize_bloc (bloc
, 0);
674 if (bloc
== first_bloc
&& bloc
== last_bloc
)
676 first_bloc
= last_bloc
= NIL_BLOC
;
678 else if (bloc
== last_bloc
)
680 last_bloc
= bloc
->prev
;
681 last_bloc
->next
= NIL_BLOC
;
683 else if (bloc
== first_bloc
)
685 first_bloc
= bloc
->next
;
686 first_bloc
->prev
= NIL_BLOC
;
690 bloc
->next
->prev
= bloc
->prev
;
691 bloc
->prev
->next
= bloc
->next
;
694 /* Update the records of which blocs are in HEAP. */
695 if (heap
->first_bloc
== bloc
)
697 if (bloc
->next
!= 0 && bloc
->next
->heap
== heap
)
698 heap
->first_bloc
= bloc
->next
;
700 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
702 if (heap
->last_bloc
== bloc
)
704 if (bloc
->prev
!= 0 && bloc
->prev
->heap
== heap
)
705 heap
->last_bloc
= bloc
->prev
;
707 heap
->first_bloc
= heap
->last_bloc
= NIL_BLOC
;
714 /* Interface routines. */
716 static int use_relocatable_buffers
;
717 static int r_alloc_freeze_level
;
719 /* Obtain SIZE bytes of storage from the free pool, or the system, as
720 necessary. If relocatable blocs are in use, this means relocating
721 them. This function gets plugged into the GNU malloc's __morecore
724 We provide hysteresis, never relocating by less than extra_bytes.
726 If we're out of memory, we should return zero, to imitate the other
727 __morecore hook values - in particular, __default_morecore in the
728 GNU malloc package. */
737 if (! r_alloc_initialized
)
740 if (! use_relocatable_buffers
)
741 return (*real_morecore
) (size
);
744 return virtual_break_value
;
748 /* Allocate a page-aligned space. GNU malloc would reclaim an
749 extra space if we passed an unaligned one. But we could
750 not always find a space which is contiguos to the previous. */
751 POINTER new_bloc_start
;
752 heap_ptr h
= first_heap
;
753 SIZE get
= ROUNDUP (size
);
755 address
= (POINTER
) ROUNDUP (virtual_break_value
);
757 /* Search the list upward for a heap which is large enough. */
758 while ((char *) h
->end
< (char *) MEM_ROUNDUP ((char *)address
+ get
))
763 address
= (POINTER
) ROUNDUP (h
->start
);
766 /* If not found, obtain more space. */
769 get
+= extra_bytes
+ page_size
;
771 if (r_alloc_freeze_level
> 0 || ! obtain (address
, get
))
774 if (first_heap
== last_heap
)
775 address
= (POINTER
) ROUNDUP (virtual_break_value
);
777 address
= (POINTER
) ROUNDUP (last_heap
->start
);
781 new_bloc_start
= (POINTER
) MEM_ROUNDUP ((char *)address
+ get
);
783 if (first_heap
->bloc_start
< new_bloc_start
)
785 /* Move all blocs upward. */
786 if (r_alloc_freeze_level
> 0
787 || ! relocate_blocs (first_bloc
, h
, new_bloc_start
))
790 /* Note that (POINTER)(h+1) <= new_bloc_start since
791 get >= page_size, so the following does not destroy the heap
793 for (b
= last_bloc
; b
!= NIL_BLOC
; b
= b
->prev
)
795 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
796 *b
->variable
= b
->data
= b
->new_data
;
799 h
->bloc_start
= new_bloc_start
;
801 update_heap_bloc_correspondence (first_bloc
, h
);
806 /* Give up managing heaps below the one the new
807 virtual_break_value points to. */
808 first_heap
->prev
= NIL_HEAP
;
809 first_heap
->next
= h
->next
;
810 first_heap
->start
= h
->start
;
811 first_heap
->end
= h
->end
;
812 first_heap
->free
= h
->free
;
813 first_heap
->first_bloc
= h
->first_bloc
;
814 first_heap
->last_bloc
= h
->last_bloc
;
815 first_heap
->bloc_start
= h
->bloc_start
;
817 if (first_heap
->next
)
818 first_heap
->next
->prev
= first_heap
;
820 last_heap
= first_heap
;
823 bzero (address
, size
);
827 SIZE excess
= (char *)first_heap
->bloc_start
828 - ((char *)virtual_break_value
+ size
);
830 address
= virtual_break_value
;
832 if (r_alloc_freeze_level
== 0 && excess
> 2 * extra_bytes
)
834 excess
-= extra_bytes
;
835 first_heap
->bloc_start
836 = (POINTER
) MEM_ROUNDUP ((char *)first_heap
->bloc_start
- excess
);
838 relocate_blocs (first_bloc
, first_heap
, first_heap
->bloc_start
);
840 for (b
= first_bloc
; b
!= NIL_BLOC
; b
= b
->next
)
842 safe_bcopy (b
->data
, b
->new_data
, b
->size
);
843 *b
->variable
= b
->data
= b
->new_data
;
847 if ((char *)virtual_break_value
+ size
< (char *)first_heap
->start
)
849 /* We found an additional space below the first heap */
850 first_heap
->start
= (POINTER
) ((char *)virtual_break_value
+ size
);
854 virtual_break_value
= (POINTER
) ((char *)address
+ size
);
855 break_value
= (last_bloc
856 ? last_bloc
->data
+ last_bloc
->size
857 : first_heap
->bloc_start
);
864 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
865 the data is returned in *PTR. PTR is thus the address of some variable
866 which will use the data area.
868 If we can't allocate the necessary memory, set *PTR to zero, and
876 register bloc_ptr new_bloc
;
878 if (! r_alloc_initialized
)
881 new_bloc
= get_bloc (MEM_ROUNDUP (size
));
884 new_bloc
->variable
= ptr
;
885 *ptr
= new_bloc
->data
;
893 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
894 Store 0 in *PTR to show there's no block allocated. */
898 register POINTER
*ptr
;
900 register bloc_ptr dead_bloc
;
902 if (! r_alloc_initialized
)
905 dead_bloc
= find_bloc (ptr
);
906 if (dead_bloc
== NIL_BLOC
)
909 free_bloc (dead_bloc
);
913 refill_memory_reserve ();
917 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
918 Do this by shifting all blocks above this one up in memory, unless
919 SIZE is less than or equal to the current bloc size, in which case
922 Change *PTR to reflect the new bloc, and return this value.
924 If more memory cannot be allocated, then leave *PTR unchanged, and
928 r_re_alloc (ptr
, size
)
932 register bloc_ptr bloc
;
934 if (! r_alloc_initialized
)
937 bloc
= find_bloc (ptr
);
938 if (bloc
== NIL_BLOC
)
941 if (size
<= bloc
->size
)
942 /* Wouldn't it be useful to actually resize the bloc here? */
945 if (! resize_bloc (bloc
, MEM_ROUNDUP (size
)))
951 /* Disable relocations, after making room for at least SIZE bytes
952 of non-relocatable heap if possible. The relocatable blocs are
953 guaranteed to hold still until thawed, even if this means that
954 malloc must return a null pointer. */
957 r_alloc_freeze (size
)
960 if (! r_alloc_initialized
)
963 /* If already frozen, we can't make any more room, so don't try. */
964 if (r_alloc_freeze_level
> 0)
966 /* If we can't get the amount requested, half is better than nothing. */
967 while (size
> 0 && r_alloc_sbrk (size
) == 0)
969 ++r_alloc_freeze_level
;
971 r_alloc_sbrk (-size
);
977 if (--r_alloc_freeze_level
< 0)
981 /* The hook `malloc' uses for the function which gets more space
983 extern POINTER (*__morecore
) ();
985 /* Initialize various things for memory allocation. */
990 if (r_alloc_initialized
)
993 r_alloc_initialized
= 1;
994 real_morecore
= __morecore
;
995 __morecore
= r_alloc_sbrk
;
997 first_heap
= last_heap
= &heap_base
;
998 first_heap
->next
= first_heap
->prev
= NIL_HEAP
;
999 first_heap
->start
= first_heap
->bloc_start
1000 = virtual_break_value
= break_value
= (*real_morecore
) (0);
1001 if (break_value
== NIL
)
1005 extra_bytes
= ROUNDUP (50000);
1007 /* Give GNU malloc's morecore some hysteresis
1008 so that we move all the relocatable blocks much less often. */
1009 __malloc_extra_blocks
= 64;
1011 first_heap
->end
= (POINTER
) ROUNDUP (first_heap
->start
);
1013 /* The extra call to real_morecore guarantees that the end of the
1014 address space is a multiple of page_size, even if page_size is
1015 not really the page size of the system running the binary in
1016 which page_size is stored. This allows a binary to be built on a
1017 system with one page size and run on a system with a smaller page
1019 (*real_morecore
) (first_heap
->end
- first_heap
->start
);
1021 /* Clear the rest of the last page; this memory is in our address space
1022 even though it is after the sbrk value. */
1023 /* Doubly true, with the additional call that explicitly adds the
1024 rest of that page to the address space. */
1025 bzero (first_heap
->start
, first_heap
->end
- first_heap
->start
);
1026 virtual_break_value
= break_value
= first_heap
->bloc_start
= first_heap
->end
;
1027 use_relocatable_buffers
= 1;
1039 if (!r_alloc_initialized
)
1042 assert (first_heap
);
1043 assert (last_heap
->end
<= (POINTER
) sbrk (0));
1044 assert ((POINTER
) first_heap
< first_heap
->start
);
1045 assert (first_heap
->start
<= virtual_break_value
);
1046 assert (virtual_break_value
<= first_heap
->end
);
1048 for (h
= first_heap
; h
; h
= h
->next
)
1050 assert (h
->prev
== ph
);
1051 assert ((POINTER
) ROUNDUP (h
->end
) == h
->end
);
1052 assert ((POINTER
) MEM_ROUNDUP (h
->start
) == h
->start
);
1053 assert ((POINTER
) MEM_ROUNDUP (h
->bloc_start
) == h
->bloc_start
);
1054 assert (h
->start
<= h
->bloc_start
&& h
->bloc_start
<= h
->end
);
1058 assert (ph
->end
< h
->start
);
1059 assert (h
->start
<= (POINTER
)h
&& (POINTER
)(h
+1) <= h
->bloc_start
);
1062 if (h
->bloc_start
<= break_value
&& break_value
<= h
->end
)
1069 assert (last_heap
== ph
);
1071 for (b
= first_bloc
; b
; b
= b
->next
)
1073 assert (b
->prev
== pb
);
1074 assert ((POINTER
) MEM_ROUNDUP (b
->data
) == b
->data
);
1075 assert ((SIZE
) MEM_ROUNDUP (b
->size
) == b
->size
);
1078 for (h
= first_heap
; h
; h
= h
->next
)
1080 if (h
->bloc_start
<= b
->data
&& b
->data
+ b
->size
<= h
->end
)
1087 if (pb
&& pb
->data
+ pb
->size
!= b
->data
)
1089 assert (ph
&& b
->data
== h
->bloc_start
);
1092 if (ph
->bloc_start
<= pb
->data
1093 && pb
->data
+ pb
->size
<= ph
->end
)
1095 assert (pb
->data
+ pb
->size
+ b
->size
> ph
->end
);
1100 assert (ph
->bloc_start
+ b
->size
> ph
->end
);
1108 assert (last_bloc
== pb
);
1111 assert (last_bloc
->data
+ last_bloc
->size
== break_value
);
1113 assert (first_heap
->bloc_start
== break_value
);