]> code.delx.au - gnu-emacs/blob - src/ralloc.c
Make real_morecore non-static.
[gnu-emacs] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
20
21 /* NOTES:
22
23 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
24 rather than all of them. This means allowing for a possible
25 hole between the first bloc and the end of malloc storage. */
26
27 #ifdef emacs
28
29 #include <config.h>
30 #include "lisp.h" /* Needed for VALBITS. */
31 #ifdef HAVE_UNISTD_H
32 #include <unistd.h>
33 #endif
34 #undef NULL
35
36 /* The important properties of this type are that 1) it's a pointer, and
37 2) arithmetic on it should work as if the size of the object pointed
38 to has a size of 1. */
39 #if 0 /* Arithmetic on void* is a GCC extension. */
40 #ifdef __STDC__
41 typedef void *POINTER;
42 #else
43
44 #ifdef HAVE_CONFIG_H
45 #include "config.h"
46 #endif
47
48 typedef char *POINTER;
49
50 #endif
51 #endif /* 0 */
52
53 /* Unconditionally use char * for this. */
54 typedef char *POINTER;
55
56 typedef unsigned long SIZE;
57
58 /* Declared in dispnew.c, this version doesn't screw up if regions
59 overlap. */
60 extern void safe_bcopy ();
61
62 #ifdef DOUG_LEA_MALLOC
63 #define M_TOP_PAD -2
64 extern int mallopt ();
65 #else
66 extern int __malloc_extra_blocks;
67 #endif
68
69 #else /* not emacs */
70
71 #include <stddef.h>
72
73 typedef size_t SIZE;
74 typedef void *POINTER;
75
76 #include <unistd.h>
77 #include <malloc.h>
78 #include <string.h>
79
80 #define safe_bcopy(x, y, z) memmove (y, x, z)
81 #define bzero(x, len) memset (x, 0, len)
82
83 #endif /* not emacs */
84
85 #include "getpagesize.h"
86
87 #define NIL ((POINTER) 0)
88
89 /* A flag to indicate whether we have initialized ralloc yet. For
90 Emacs's sake, please do not make this local to malloc_init; on some
91 machines, the dumping procedure makes all static variables
92 read-only. On these machines, the word static is #defined to be
93 the empty string, meaning that r_alloc_initialized becomes an
94 automatic variable, and loses its value each time Emacs is started up. */
95 static int r_alloc_initialized = 0;
96
97 static void r_alloc_init ();
98 \f
99 /* Declarations for working with the malloc, ralloc, and system breaks. */
100
101 /* Function to set the real break value. */
102 POINTER (*real_morecore) ();
103
104 /* The break value, as seen by malloc. */
105 static POINTER virtual_break_value;
106
107 /* The address of the end of the last data in use by ralloc,
108 including relocatable blocs as well as malloc data. */
109 static POINTER break_value;
110
111 /* This is the size of a page. We round memory requests to this boundary. */
112 static int page_size;
113
114 /* Whenever we get memory from the system, get this many extra bytes. This
115 must be a multiple of page_size. */
116 static int extra_bytes;
117
118 /* Macros for rounding. Note that rounding to any value is possible
119 by changing the definition of PAGE. */
120 #define PAGE (getpagesize ())
121 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
122 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
123 & ~(page_size - 1))
124 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
125
126 #define MEM_ALIGN sizeof(double)
127 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
128 & ~(MEM_ALIGN - 1))
129 \f
130 /* Data structures of heaps and blocs. */
131
132 /* The relocatable objects, or blocs, and the malloc data
133 both reside within one or more heaps.
134 Each heap contains malloc data, running from `start' to `bloc_start',
135 and relocatable objects, running from `bloc_start' to `free'.
136
137 Relocatable objects may relocate within the same heap
138 or may move into another heap; the heaps themselves may grow
139 but they never move.
140
141 We try to make just one heap and make it larger as necessary.
142 But sometimes we can't do that, because we can't get contiguous
143 space to add onto the heap. When that happens, we start a new heap. */
144
145 typedef struct heap
146 {
147 struct heap *next;
148 struct heap *prev;
149 /* Start of memory range of this heap. */
150 POINTER start;
151 /* End of memory range of this heap. */
152 POINTER end;
153 /* Start of relocatable data in this heap. */
154 POINTER bloc_start;
155 /* Start of unused space in this heap. */
156 POINTER free;
157 /* First bloc in this heap. */
158 struct bp *first_bloc;
159 /* Last bloc in this heap. */
160 struct bp *last_bloc;
161 } *heap_ptr;
162
163 #define NIL_HEAP ((heap_ptr) 0)
164 #define HEAP_PTR_SIZE (sizeof (struct heap))
165
166 /* This is the first heap object.
167 If we need additional heap objects, each one resides at the beginning of
168 the space it covers. */
169 static struct heap heap_base;
170
171 /* Head and tail of the list of heaps. */
172 static heap_ptr first_heap, last_heap;
173
174 /* These structures are allocated in the malloc arena.
175 The linked list is kept in order of increasing '.data' members.
176 The data blocks abut each other; if b->next is non-nil, then
177 b->data + b->size == b->next->data.
178
179 An element with variable==NIL denotes a freed block, which has not yet
180 been collected. They may only appear while r_alloc_freeze > 0, and will be
181 freed when the arena is thawed. Currently, these blocs are not reusable,
182 while the arena is frozen. Very inefficient. */
183
184 typedef struct bp
185 {
186 struct bp *next;
187 struct bp *prev;
188 POINTER *variable;
189 POINTER data;
190 SIZE size;
191 POINTER new_data; /* temporarily used for relocation */
192 struct heap *heap; /* Heap this bloc is in. */
193 } *bloc_ptr;
194
195 #define NIL_BLOC ((bloc_ptr) 0)
196 #define BLOC_PTR_SIZE (sizeof (struct bp))
197
198 /* Head and tail of the list of relocatable blocs. */
199 static bloc_ptr first_bloc, last_bloc;
200
201 static int use_relocatable_buffers;
202
203 /* If >0, no relocation whatsoever takes place. */
204 static int r_alloc_freeze_level;
205
206 \f
207 /* Functions to get and return memory from the system. */
208
209 /* Find the heap that ADDRESS falls within. */
210
211 static heap_ptr
212 find_heap (address)
213 POINTER address;
214 {
215 heap_ptr heap;
216
217 for (heap = last_heap; heap; heap = heap->prev)
218 {
219 if (heap->start <= address && address <= heap->end)
220 return heap;
221 }
222
223 return NIL_HEAP;
224 }
225
226 /* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
229
230 If enough space is not presently available in our reserve, this means
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
233
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
240
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
243
244 static POINTER
245 obtain (address, size)
246 POINTER address;
247 SIZE size;
248 {
249 heap_ptr heap;
250 SIZE already_available;
251
252 /* Find the heap that ADDRESS falls within. */
253 for (heap = last_heap; heap; heap = heap->prev)
254 {
255 if (heap->start <= address && address <= heap->end)
256 break;
257 }
258
259 if (! heap)
260 abort ();
261
262 /* If we can't fit SIZE bytes in that heap,
263 try successive later heaps. */
264 while (heap && address + size > heap->end)
265 {
266 heap = heap->next;
267 if (heap == NIL_HEAP)
268 break;
269 address = heap->bloc_start;
270 }
271
272 /* If we can't fit them within any existing heap,
273 get more space. */
274 if (heap == NIL_HEAP)
275 {
276 POINTER new = (*real_morecore)(0);
277 SIZE get;
278
279 already_available = (char *)last_heap->end - (char *)address;
280
281 if (new != last_heap->end)
282 {
283 /* Someone else called sbrk. Make a new heap. */
284
285 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
286 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
287
288 if ((*real_morecore) (bloc_start - new) != new)
289 return 0;
290
291 new_heap->start = new;
292 new_heap->end = bloc_start;
293 new_heap->bloc_start = bloc_start;
294 new_heap->free = bloc_start;
295 new_heap->next = NIL_HEAP;
296 new_heap->prev = last_heap;
297 new_heap->first_bloc = NIL_BLOC;
298 new_heap->last_bloc = NIL_BLOC;
299 last_heap->next = new_heap;
300 last_heap = new_heap;
301
302 address = bloc_start;
303 already_available = 0;
304 }
305
306 /* Add space to the last heap (which we may have just created).
307 Get some extra, so we can come here less often. */
308
309 get = size + extra_bytes - already_available;
310 get = (char *) ROUNDUP ((char *)last_heap->end + get)
311 - (char *) last_heap->end;
312
313 if ((*real_morecore) (get) != last_heap->end)
314 return 0;
315
316 last_heap->end += get;
317 }
318
319 return address;
320 }
321
322 /* Return unused heap space to the system
323 if there is a lot of unused space now.
324 This can make the last heap smaller;
325 it can also eliminate the last heap entirely. */
326
327 static void
328 relinquish ()
329 {
330 register heap_ptr h;
331 int excess = 0;
332
333 /* Add the amount of space beyond break_value
334 in all heaps which have extend beyond break_value at all. */
335
336 for (h = last_heap; h && break_value < h->end; h = h->prev)
337 {
338 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
339 ? h->bloc_start : break_value);
340 }
341
342 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
343 {
344 /* Keep extra_bytes worth of empty space.
345 And don't free anything unless we can free at least extra_bytes. */
346 excess -= extra_bytes;
347
348 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
349 {
350 /* This heap should have no blocs in it. */
351 if (last_heap->first_bloc != NIL_BLOC
352 || last_heap->last_bloc != NIL_BLOC)
353 abort ();
354
355 /* Return the last heap, with its header, to the system. */
356 excess = (char *)last_heap->end - (char *)last_heap->start;
357 last_heap = last_heap->prev;
358 last_heap->next = NIL_HEAP;
359 }
360 else
361 {
362 excess = (char *) last_heap->end
363 - (char *) ROUNDUP ((char *)last_heap->end - excess);
364 last_heap->end -= excess;
365 }
366
367 if ((*real_morecore) (- excess) == 0)
368 {
369 /* If the system didn't want that much memory back, adjust
370 the end of the last heap to reflect that. This can occur
371 if break_value is still within the original data segment. */
372 last_heap->end += excess;
373 /* Make sure that the result of the adjustment is accurate.
374 It should be, for the else clause above; the other case,
375 which returns the entire last heap to the system, seems
376 unlikely to trigger this mode of failure. */
377 if (last_heap->end != (*real_morecore) (0))
378 abort ();
379 }
380 }
381 }
382
383 /* Return the total size in use by relocating allocator,
384 above where malloc gets space. */
385
386 long
387 r_alloc_size_in_use ()
388 {
389 return break_value - virtual_break_value;
390 }
391 \f
392 /* The meat - allocating, freeing, and relocating blocs. */
393
394 /* Find the bloc referenced by the address in PTR. Returns a pointer
395 to that block. */
396
397 static bloc_ptr
398 find_bloc (ptr)
399 POINTER *ptr;
400 {
401 register bloc_ptr p = first_bloc;
402
403 while (p != NIL_BLOC)
404 {
405 if (p->variable == ptr && p->data == *ptr)
406 return p;
407
408 p = p->next;
409 }
410
411 return p;
412 }
413
414 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
415 Returns a pointer to the new bloc, or zero if we couldn't allocate
416 memory for the new block. */
417
418 static bloc_ptr
419 get_bloc (size)
420 SIZE size;
421 {
422 register bloc_ptr new_bloc;
423 register heap_ptr heap;
424
425 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
426 || ! (new_bloc->data = obtain (break_value, size)))
427 {
428 if (new_bloc)
429 free (new_bloc);
430
431 return 0;
432 }
433
434 break_value = new_bloc->data + size;
435
436 new_bloc->size = size;
437 new_bloc->next = NIL_BLOC;
438 new_bloc->variable = (POINTER *) NIL;
439 new_bloc->new_data = 0;
440
441 /* Record in the heap that this space is in use. */
442 heap = find_heap (new_bloc->data);
443 heap->free = break_value;
444
445 /* Maintain the correspondence between heaps and blocs. */
446 new_bloc->heap = heap;
447 heap->last_bloc = new_bloc;
448 if (heap->first_bloc == NIL_BLOC)
449 heap->first_bloc = new_bloc;
450
451 /* Put this bloc on the doubly-linked list of blocs. */
452 if (first_bloc)
453 {
454 new_bloc->prev = last_bloc;
455 last_bloc->next = new_bloc;
456 last_bloc = new_bloc;
457 }
458 else
459 {
460 first_bloc = last_bloc = new_bloc;
461 new_bloc->prev = NIL_BLOC;
462 }
463
464 return new_bloc;
465 }
466 \f
467 /* Calculate new locations of blocs in the list beginning with BLOC,
468 relocating it to start at ADDRESS, in heap HEAP. If enough space is
469 not presently available in our reserve, call obtain for
470 more space.
471
472 Store the new location of each bloc in its new_data field.
473 Do not touch the contents of blocs or break_value. */
474
475 static int
476 relocate_blocs (bloc, heap, address)
477 bloc_ptr bloc;
478 heap_ptr heap;
479 POINTER address;
480 {
481 register bloc_ptr b = bloc;
482
483 /* No need to ever call this if arena is frozen, bug somewhere! */
484 if (r_alloc_freeze_level)
485 abort();
486
487 while (b)
488 {
489 /* If bloc B won't fit within HEAP,
490 move to the next heap and try again. */
491 while (heap && address + b->size > heap->end)
492 {
493 heap = heap->next;
494 if (heap == NIL_HEAP)
495 break;
496 address = heap->bloc_start;
497 }
498
499 /* If BLOC won't fit in any heap,
500 get enough new space to hold BLOC and all following blocs. */
501 if (heap == NIL_HEAP)
502 {
503 register bloc_ptr tb = b;
504 register SIZE s = 0;
505
506 /* Add up the size of all the following blocs. */
507 while (tb != NIL_BLOC)
508 {
509 if (tb->variable)
510 s += tb->size;
511
512 tb = tb->next;
513 }
514
515 /* Get that space. */
516 address = obtain (address, s);
517 if (address == 0)
518 return 0;
519
520 heap = last_heap;
521 }
522
523 /* Record the new address of this bloc
524 and update where the next bloc can start. */
525 b->new_data = address;
526 if (b->variable)
527 address += b->size;
528 b = b->next;
529 }
530
531 return 1;
532 }
533
534 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
535 This is necessary if we put the memory of space of BLOC
536 before that of BEFORE. */
537
538 static void
539 reorder_bloc (bloc, before)
540 bloc_ptr bloc, before;
541 {
542 bloc_ptr prev, next;
543
544 /* Splice BLOC out from where it is. */
545 prev = bloc->prev;
546 next = bloc->next;
547
548 if (prev)
549 prev->next = next;
550 if (next)
551 next->prev = prev;
552
553 /* Splice it in before BEFORE. */
554 prev = before->prev;
555
556 if (prev)
557 prev->next = bloc;
558 bloc->prev = prev;
559
560 before->prev = bloc;
561 bloc->next = before;
562 }
563 \f
564 /* Update the records of which heaps contain which blocs, starting
565 with heap HEAP and bloc BLOC. */
566
567 static void
568 update_heap_bloc_correspondence (bloc, heap)
569 bloc_ptr bloc;
570 heap_ptr heap;
571 {
572 register bloc_ptr b;
573
574 /* Initialize HEAP's status to reflect blocs before BLOC. */
575 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
576 {
577 /* The previous bloc is in HEAP. */
578 heap->last_bloc = bloc->prev;
579 heap->free = bloc->prev->data + bloc->prev->size;
580 }
581 else
582 {
583 /* HEAP contains no blocs before BLOC. */
584 heap->first_bloc = NIL_BLOC;
585 heap->last_bloc = NIL_BLOC;
586 heap->free = heap->bloc_start;
587 }
588
589 /* Advance through blocs one by one. */
590 for (b = bloc; b != NIL_BLOC; b = b->next)
591 {
592 /* Advance through heaps, marking them empty,
593 till we get to the one that B is in. */
594 while (heap)
595 {
596 if (heap->bloc_start <= b->data && b->data <= heap->end)
597 break;
598 heap = heap->next;
599 /* We know HEAP is not null now,
600 because there has to be space for bloc B. */
601 heap->first_bloc = NIL_BLOC;
602 heap->last_bloc = NIL_BLOC;
603 heap->free = heap->bloc_start;
604 }
605
606 /* Update HEAP's status for bloc B. */
607 heap->free = b->data + b->size;
608 heap->last_bloc = b;
609 if (heap->first_bloc == NIL_BLOC)
610 heap->first_bloc = b;
611
612 /* Record that B is in HEAP. */
613 b->heap = heap;
614 }
615
616 /* If there are any remaining heaps and no blocs left,
617 mark those heaps as empty. */
618 heap = heap->next;
619 while (heap)
620 {
621 heap->first_bloc = NIL_BLOC;
622 heap->last_bloc = NIL_BLOC;
623 heap->free = heap->bloc_start;
624 heap = heap->next;
625 }
626 }
627 \f
628 /* Resize BLOC to SIZE bytes. This relocates the blocs
629 that come after BLOC in memory. */
630
631 static int
632 resize_bloc (bloc, size)
633 bloc_ptr bloc;
634 SIZE size;
635 {
636 register bloc_ptr b;
637 heap_ptr heap;
638 POINTER address;
639 SIZE old_size;
640
641 /* No need to ever call this if arena is frozen, bug somewhere! */
642 if (r_alloc_freeze_level)
643 abort();
644
645 if (bloc == NIL_BLOC || size == bloc->size)
646 return 1;
647
648 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
649 {
650 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
651 break;
652 }
653
654 if (heap == NIL_HEAP)
655 abort ();
656
657 old_size = bloc->size;
658 bloc->size = size;
659
660 /* Note that bloc could be moved into the previous heap. */
661 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
662 : first_heap->bloc_start);
663 while (heap)
664 {
665 if (heap->bloc_start <= address && address <= heap->end)
666 break;
667 heap = heap->prev;
668 }
669
670 if (! relocate_blocs (bloc, heap, address))
671 {
672 bloc->size = old_size;
673 return 0;
674 }
675
676 if (size > old_size)
677 {
678 for (b = last_bloc; b != bloc; b = b->prev)
679 {
680 if (!b->variable)
681 {
682 b->size = 0;
683 b->data = b->new_data;
684 }
685 else
686 {
687 safe_bcopy (b->data, b->new_data, b->size);
688 *b->variable = b->data = b->new_data;
689 }
690 }
691 if (!bloc->variable)
692 {
693 bloc->size = 0;
694 bloc->data = bloc->new_data;
695 }
696 else
697 {
698 safe_bcopy (bloc->data, bloc->new_data, old_size);
699 bzero (bloc->new_data + old_size, size - old_size);
700 *bloc->variable = bloc->data = bloc->new_data;
701 }
702 }
703 else
704 {
705 for (b = bloc; b != NIL_BLOC; b = b->next)
706 {
707 if (!b->variable)
708 {
709 b->size = 0;
710 b->data = b->new_data;
711 }
712 else
713 {
714 safe_bcopy (b->data, b->new_data, b->size);
715 *b->variable = b->data = b->new_data;
716 }
717 }
718 }
719
720 update_heap_bloc_correspondence (bloc, heap);
721
722 break_value = (last_bloc ? last_bloc->data + last_bloc->size
723 : first_heap->bloc_start);
724 return 1;
725 }
726 \f
727 /* Free BLOC from the chain of blocs, relocating any blocs above it.
728 This may return space to the system. */
729
730 static void
731 free_bloc (bloc)
732 bloc_ptr bloc;
733 {
734 heap_ptr heap = bloc->heap;
735
736 if (r_alloc_freeze_level)
737 {
738 bloc->variable = (POINTER *) NIL;
739 return;
740 }
741
742 resize_bloc (bloc, 0);
743
744 if (bloc == first_bloc && bloc == last_bloc)
745 {
746 first_bloc = last_bloc = NIL_BLOC;
747 }
748 else if (bloc == last_bloc)
749 {
750 last_bloc = bloc->prev;
751 last_bloc->next = NIL_BLOC;
752 }
753 else if (bloc == first_bloc)
754 {
755 first_bloc = bloc->next;
756 first_bloc->prev = NIL_BLOC;
757 }
758 else
759 {
760 bloc->next->prev = bloc->prev;
761 bloc->prev->next = bloc->next;
762 }
763
764 /* Update the records of which blocs are in HEAP. */
765 if (heap->first_bloc == bloc)
766 {
767 if (bloc->next != 0 && bloc->next->heap == heap)
768 heap->first_bloc = bloc->next;
769 else
770 heap->first_bloc = heap->last_bloc = NIL_BLOC;
771 }
772 if (heap->last_bloc == bloc)
773 {
774 if (bloc->prev != 0 && bloc->prev->heap == heap)
775 heap->last_bloc = bloc->prev;
776 else
777 heap->first_bloc = heap->last_bloc = NIL_BLOC;
778 }
779
780 relinquish ();
781 free (bloc);
782 }
783 \f
784 /* Interface routines. */
785
786 /* Obtain SIZE bytes of storage from the free pool, or the system, as
787 necessary. If relocatable blocs are in use, this means relocating
788 them. This function gets plugged into the GNU malloc's __morecore
789 hook.
790
791 We provide hysteresis, never relocating by less than extra_bytes.
792
793 If we're out of memory, we should return zero, to imitate the other
794 __morecore hook values - in particular, __default_morecore in the
795 GNU malloc package. */
796
797 POINTER
798 r_alloc_sbrk (size)
799 long size;
800 {
801 register bloc_ptr b;
802 POINTER address;
803
804 if (! r_alloc_initialized)
805 r_alloc_init ();
806
807 if (! use_relocatable_buffers)
808 return (*real_morecore) (size);
809
810 if (size == 0)
811 return virtual_break_value;
812
813 if (size > 0)
814 {
815 /* Allocate a page-aligned space. GNU malloc would reclaim an
816 extra space if we passed an unaligned one. But we could
817 not always find a space which is contiguous to the previous. */
818 POINTER new_bloc_start;
819 heap_ptr h = first_heap;
820 SIZE get = ROUNDUP (size);
821
822 address = (POINTER) ROUNDUP (virtual_break_value);
823
824 /* Search the list upward for a heap which is large enough. */
825 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
826 {
827 h = h->next;
828 if (h == NIL_HEAP)
829 break;
830 address = (POINTER) ROUNDUP (h->start);
831 }
832
833 /* If not found, obtain more space. */
834 if (h == NIL_HEAP)
835 {
836 get += extra_bytes + page_size;
837
838 if (! obtain (address, get))
839 return 0;
840
841 if (first_heap == last_heap)
842 address = (POINTER) ROUNDUP (virtual_break_value);
843 else
844 address = (POINTER) ROUNDUP (last_heap->start);
845 h = last_heap;
846 }
847
848 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
849
850 if (first_heap->bloc_start < new_bloc_start)
851 {
852 /* This is no clean solution - no idea how to do it better. */
853 if (r_alloc_freeze_level)
854 return NIL;
855
856 /* There is a bug here: if the above obtain call succeeded, but the
857 relocate_blocs call below does not succeed, we need to free
858 the memory that we got with obtain. */
859
860 /* Move all blocs upward. */
861 if (! relocate_blocs (first_bloc, h, new_bloc_start))
862 return 0;
863
864 /* Note that (POINTER)(h+1) <= new_bloc_start since
865 get >= page_size, so the following does not destroy the heap
866 header. */
867 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
868 {
869 safe_bcopy (b->data, b->new_data, b->size);
870 *b->variable = b->data = b->new_data;
871 }
872
873 h->bloc_start = new_bloc_start;
874
875 update_heap_bloc_correspondence (first_bloc, h);
876 }
877 if (h != first_heap)
878 {
879 /* Give up managing heaps below the one the new
880 virtual_break_value points to. */
881 first_heap->prev = NIL_HEAP;
882 first_heap->next = h->next;
883 first_heap->start = h->start;
884 first_heap->end = h->end;
885 first_heap->free = h->free;
886 first_heap->first_bloc = h->first_bloc;
887 first_heap->last_bloc = h->last_bloc;
888 first_heap->bloc_start = h->bloc_start;
889
890 if (first_heap->next)
891 first_heap->next->prev = first_heap;
892 else
893 last_heap = first_heap;
894 }
895
896 bzero (address, size);
897 }
898 else /* size < 0 */
899 {
900 SIZE excess = (char *)first_heap->bloc_start
901 - ((char *)virtual_break_value + size);
902
903 address = virtual_break_value;
904
905 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
906 {
907 excess -= extra_bytes;
908 first_heap->bloc_start
909 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
910
911 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
912
913 for (b = first_bloc; b != NIL_BLOC; b = b->next)
914 {
915 safe_bcopy (b->data, b->new_data, b->size);
916 *b->variable = b->data = b->new_data;
917 }
918 }
919
920 if ((char *)virtual_break_value + size < (char *)first_heap->start)
921 {
922 /* We found an additional space below the first heap */
923 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
924 }
925 }
926
927 virtual_break_value = (POINTER) ((char *)address + size);
928 break_value = (last_bloc
929 ? last_bloc->data + last_bloc->size
930 : first_heap->bloc_start);
931 if (size < 0)
932 relinquish ();
933
934 return address;
935 }
936
937 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
938 the data is returned in *PTR. PTR is thus the address of some variable
939 which will use the data area.
940
941 The allocation of 0 bytes is valid.
942 In case r_alloc_freeze is set, a best fit of unused blocs could be done
943 before allocating a new area. Not yet done.
944
945 If we can't allocate the necessary memory, set *PTR to zero, and
946 return zero. */
947
948 POINTER
949 r_alloc (ptr, size)
950 POINTER *ptr;
951 SIZE size;
952 {
953 register bloc_ptr new_bloc;
954
955 if (! r_alloc_initialized)
956 r_alloc_init ();
957
958 new_bloc = get_bloc (MEM_ROUNDUP (size));
959 if (new_bloc)
960 {
961 new_bloc->variable = ptr;
962 *ptr = new_bloc->data;
963 }
964 else
965 *ptr = 0;
966
967 return *ptr;
968 }
969
970 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
971 Store 0 in *PTR to show there's no block allocated. */
972
973 void
974 r_alloc_free (ptr)
975 register POINTER *ptr;
976 {
977 register bloc_ptr dead_bloc;
978
979 if (! r_alloc_initialized)
980 r_alloc_init ();
981
982 dead_bloc = find_bloc (ptr);
983 if (dead_bloc == NIL_BLOC)
984 abort ();
985
986 free_bloc (dead_bloc);
987 *ptr = 0;
988
989 #ifdef emacs
990 refill_memory_reserve ();
991 #endif
992 }
993
994 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
995 Do this by shifting all blocks above this one up in memory, unless
996 SIZE is less than or equal to the current bloc size, in which case
997 do nothing.
998
999 In case r_alloc_freeze is set, a new bloc is allocated, and the
1000 memory copied to it. Not very efficient. We could traverse the
1001 bloc_list for a best fit of free blocs first.
1002
1003 Change *PTR to reflect the new bloc, and return this value.
1004
1005 If more memory cannot be allocated, then leave *PTR unchanged, and
1006 return zero. */
1007
1008 POINTER
1009 r_re_alloc (ptr, size)
1010 POINTER *ptr;
1011 SIZE size;
1012 {
1013 register bloc_ptr bloc;
1014
1015 if (! r_alloc_initialized)
1016 r_alloc_init ();
1017
1018 if (!*ptr)
1019 return r_alloc (ptr, size);
1020 if (!size)
1021 {
1022 r_alloc_free (ptr);
1023 return r_alloc (ptr, 0);
1024 }
1025
1026 bloc = find_bloc (ptr);
1027 if (bloc == NIL_BLOC)
1028 abort ();
1029
1030 if (size < bloc->size)
1031 {
1032 /* Wouldn't it be useful to actually resize the bloc here? */
1033 /* I think so too, but not if it's too expensive... */
1034 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1035 && r_alloc_freeze_level == 0)
1036 {
1037 resize_bloc (bloc, MEM_ROUNDUP (size));
1038 /* Never mind if this fails, just do nothing... */
1039 /* It *should* be infallible! */
1040 }
1041 }
1042 else if (size > bloc->size)
1043 {
1044 if (r_alloc_freeze_level)
1045 {
1046 bloc_ptr new_bloc;
1047 new_bloc = get_bloc (MEM_ROUNDUP (size));
1048 if (new_bloc)
1049 {
1050 new_bloc->variable = ptr;
1051 *ptr = new_bloc->data;
1052 bloc->variable = (POINTER *) NIL;
1053 }
1054 else
1055 return NIL;
1056 }
1057 else
1058 {
1059 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1060 return NIL;
1061 }
1062 }
1063 return *ptr;
1064 }
1065
1066 /* Disable relocations, after making room for at least SIZE bytes
1067 of non-relocatable heap if possible. The relocatable blocs are
1068 guaranteed to hold still until thawed, even if this means that
1069 malloc must return a null pointer. */
1070
1071 void
1072 r_alloc_freeze (size)
1073 long size;
1074 {
1075 if (! r_alloc_initialized)
1076 r_alloc_init ();
1077
1078 /* If already frozen, we can't make any more room, so don't try. */
1079 if (r_alloc_freeze_level > 0)
1080 size = 0;
1081 /* If we can't get the amount requested, half is better than nothing. */
1082 while (size > 0 && r_alloc_sbrk (size) == 0)
1083 size /= 2;
1084 ++r_alloc_freeze_level;
1085 if (size > 0)
1086 r_alloc_sbrk (-size);
1087 }
1088
1089 void
1090 r_alloc_thaw ()
1091 {
1092
1093 if (! r_alloc_initialized)
1094 r_alloc_init ();
1095
1096 if (--r_alloc_freeze_level < 0)
1097 abort ();
1098
1099 /* This frees all unused blocs. It is not too inefficient, as the resize
1100 and bcopy is done only once. Afterwards, all unreferenced blocs are
1101 already shrunk to zero size. */
1102 if (!r_alloc_freeze_level)
1103 {
1104 bloc_ptr *b = &first_bloc;
1105 while (*b)
1106 if (!(*b)->variable)
1107 free_bloc (*b);
1108 else
1109 b = &(*b)->next;
1110 }
1111 }
1112
1113 \f
1114 /* The hook `malloc' uses for the function which gets more space
1115 from the system. */
1116 extern POINTER (*__morecore) ();
1117
1118 /* Initialize various things for memory allocation. */
1119
1120 static void
1121 r_alloc_init ()
1122 {
1123 if (r_alloc_initialized)
1124 return;
1125
1126 r_alloc_initialized = 1;
1127 real_morecore = __morecore;
1128 __morecore = r_alloc_sbrk;
1129
1130 first_heap = last_heap = &heap_base;
1131 first_heap->next = first_heap->prev = NIL_HEAP;
1132 first_heap->start = first_heap->bloc_start
1133 = virtual_break_value = break_value = (*real_morecore) (0);
1134 if (break_value == NIL)
1135 abort ();
1136
1137 page_size = PAGE;
1138 extra_bytes = ROUNDUP (50000);
1139
1140 #ifdef DOUG_LEA_MALLOC
1141 mallopt (M_TOP_PAD, 64 * 4096);
1142 #else
1143 /* Give GNU malloc's morecore some hysteresis
1144 so that we move all the relocatable blocks much less often. */
1145 __malloc_extra_blocks = 64;
1146 #endif
1147
1148 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1149
1150 /* The extra call to real_morecore guarantees that the end of the
1151 address space is a multiple of page_size, even if page_size is
1152 not really the page size of the system running the binary in
1153 which page_size is stored. This allows a binary to be built on a
1154 system with one page size and run on a system with a smaller page
1155 size. */
1156 (*real_morecore) (first_heap->end - first_heap->start);
1157
1158 /* Clear the rest of the last page; this memory is in our address space
1159 even though it is after the sbrk value. */
1160 /* Doubly true, with the additional call that explicitly adds the
1161 rest of that page to the address space. */
1162 bzero (first_heap->start, first_heap->end - first_heap->start);
1163 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1164 use_relocatable_buffers = 1;
1165 }
1166
1167 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1168
1169 /* Reinitialize the morecore hook variables after restarting a dumped
1170 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1171 void
1172 r_alloc_reinit ()
1173 {
1174 /* Only do this if the hook has been reset, so that we don't get an
1175 infinite loop, in case Emacs was linked statically. */
1176 if (__morecore != r_alloc_sbrk)
1177 {
1178 real_morecore = __morecore;
1179 __morecore = r_alloc_sbrk;
1180 }
1181 }
1182 #endif
1183
1184 #ifdef DEBUG
1185 #include <assert.h>
1186
1187 void
1188 r_alloc_check ()
1189 {
1190 int found = 0;
1191 heap_ptr h, ph = 0;
1192 bloc_ptr b, pb = 0;
1193
1194 if (!r_alloc_initialized)
1195 return;
1196
1197 assert (first_heap);
1198 assert (last_heap->end <= (POINTER) sbrk (0));
1199 assert ((POINTER) first_heap < first_heap->start);
1200 assert (first_heap->start <= virtual_break_value);
1201 assert (virtual_break_value <= first_heap->end);
1202
1203 for (h = first_heap; h; h = h->next)
1204 {
1205 assert (h->prev == ph);
1206 assert ((POINTER) ROUNDUP (h->end) == h->end);
1207 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1208 the heap start has any sort of alignment.
1209 Perhaps it should. */
1210 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1211 #endif
1212 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1213 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1214
1215 if (ph)
1216 {
1217 assert (ph->end < h->start);
1218 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1219 }
1220
1221 if (h->bloc_start <= break_value && break_value <= h->end)
1222 found = 1;
1223
1224 ph = h;
1225 }
1226
1227 assert (found);
1228 assert (last_heap == ph);
1229
1230 for (b = first_bloc; b; b = b->next)
1231 {
1232 assert (b->prev == pb);
1233 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1234 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1235
1236 ph = 0;
1237 for (h = first_heap; h; h = h->next)
1238 {
1239 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1240 break;
1241 ph = h;
1242 }
1243
1244 assert (h);
1245
1246 if (pb && pb->data + pb->size != b->data)
1247 {
1248 assert (ph && b->data == h->bloc_start);
1249 while (ph)
1250 {
1251 if (ph->bloc_start <= pb->data
1252 && pb->data + pb->size <= ph->end)
1253 {
1254 assert (pb->data + pb->size + b->size > ph->end);
1255 break;
1256 }
1257 else
1258 {
1259 assert (ph->bloc_start + b->size > ph->end);
1260 }
1261 ph = ph->prev;
1262 }
1263 }
1264 pb = b;
1265 }
1266
1267 assert (last_bloc == pb);
1268
1269 if (last_bloc)
1270 assert (last_bloc->data + last_bloc->size == break_value);
1271 else
1272 assert (first_heap->bloc_start == break_value);
1273 }
1274 #endif /* DEBUG */