]> code.delx.au - gnu-emacs/blob - src/ralloc.c
Use font-lock-maximum-decoration when setting ada-font-lock-keywords
[gnu-emacs] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20 /* NOTES:
21
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
25
26 #ifdef emacs
27
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30
31 #undef NULL
32
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
40
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
44
45 typedef char *POINTER;
46
47 #endif
48 #endif /* 0 */
49
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
52
53 typedef unsigned long SIZE;
54
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
58
59 extern int __malloc_extra_blocks;
60
61 #else /* not emacs */
62
63 #include <stddef.h>
64
65 typedef size_t SIZE;
66 typedef void *POINTER;
67
68 #include <unistd.h>
69 #include <malloc.h>
70 #include <string.h>
71
72 #define safe_bcopy(x, y, z) memmove (y, x, z)
73 #define bzero(x, len) memset (x, 0, len)
74
75 #endif /* not emacs */
76
77 #include "getpagesize.h"
78
79 #define NIL ((POINTER) 0)
80
81 /* A flag to indicate whether we have initialized ralloc yet. For
82 Emacs's sake, please do not make this local to malloc_init; on some
83 machines, the dumping procedure makes all static variables
84 read-only. On these machines, the word static is #defined to be
85 the empty string, meaning that r_alloc_initialized becomes an
86 automatic variable, and loses its value each time Emacs is started up. */
87 static int r_alloc_initialized = 0;
88
89 static void r_alloc_init ();
90 \f
91 /* Declarations for working with the malloc, ralloc, and system breaks. */
92
93 /* Function to set the real break value. */
94 static POINTER (*real_morecore) ();
95
96 /* The break value, as seen by malloc. */
97 static POINTER virtual_break_value;
98
99 /* The address of the end of the last data in use by ralloc,
100 including relocatable blocs as well as malloc data. */
101 static POINTER break_value;
102
103 /* This is the size of a page. We round memory requests to this boundary. */
104 static int page_size;
105
106 /* Whenever we get memory from the system, get this many extra bytes. This
107 must be a multiple of page_size. */
108 static int extra_bytes;
109
110 /* Macros for rounding. Note that rounding to any value is possible
111 by changing the definition of PAGE. */
112 #define PAGE (getpagesize ())
113 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
114 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
115 & ~(page_size - 1))
116 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
117
118 #define MEM_ALIGN sizeof(double)
119 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
120 & ~(MEM_ALIGN - 1))
121 \f
122 /* Data structures of heaps and blocs. */
123
124 /* The relocatable objects, or blocs, and the malloc data
125 both reside within one or more heaps.
126 Each heap contains malloc data, running from `start' to `bloc_start',
127 and relocatable objects, running from `bloc_start' to `free'.
128
129 Relocatable objects may relocate within the same heap
130 or may move into another heap; the heaps themselves may grow
131 but they never move.
132
133 We try to make just one heap and make it larger as necessary.
134 But sometimes we can't do that, because we can't get continguous
135 space to add onto the heap. When that happens, we start a new heap. */
136
137 typedef struct heap
138 {
139 struct heap *next;
140 struct heap *prev;
141 /* Start of memory range of this heap. */
142 POINTER start;
143 /* End of memory range of this heap. */
144 POINTER end;
145 /* Start of relocatable data in this heap. */
146 POINTER bloc_start;
147 /* Start of unused space in this heap. */
148 POINTER free;
149 /* First bloc in this heap. */
150 struct bp *first_bloc;
151 /* Last bloc in this heap. */
152 struct bp *last_bloc;
153 } *heap_ptr;
154
155 #define NIL_HEAP ((heap_ptr) 0)
156 #define HEAP_PTR_SIZE (sizeof (struct heap))
157
158 /* This is the first heap object.
159 If we need additional heap objects, each one resides at the beginning of
160 the space it covers. */
161 static struct heap heap_base;
162
163 /* Head and tail of the list of heaps. */
164 static heap_ptr first_heap, last_heap;
165
166 /* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data. */
170 typedef struct bp
171 {
172 struct bp *next;
173 struct bp *prev;
174 POINTER *variable;
175 POINTER data;
176 SIZE size;
177 POINTER new_data; /* tmporarily used for relocation */
178 /* Heap this bloc is in. */
179 struct heap *heap;
180 } *bloc_ptr;
181
182 #define NIL_BLOC ((bloc_ptr) 0)
183 #define BLOC_PTR_SIZE (sizeof (struct bp))
184
185 /* Head and tail of the list of relocatable blocs. */
186 static bloc_ptr first_bloc, last_bloc;
187
188 \f
189 /* Functions to get and return memory from the system. */
190
191 /* Find the heap that ADDRESS falls within. */
192
193 static heap_ptr
194 find_heap (address)
195 POINTER address;
196 {
197 heap_ptr heap;
198
199 for (heap = last_heap; heap; heap = heap->prev)
200 {
201 if (heap->start <= address && address <= heap->end)
202 return heap;
203 }
204
205 return NIL_HEAP;
206 }
207
208 /* Find SIZE bytes of space in a heap.
209 Try to get them at ADDRESS (which must fall within some heap's range)
210 if we can get that many within one heap.
211
212 If enough space is not presently available in our reserve, this means
213 getting more page-aligned space from the system. If the retuned space
214 is not contiguos to the last heap, allocate a new heap, and append it
215
216 obtain does not try to keep track of whether space is in use
217 or not in use. It just returns the address of SIZE bytes that
218 fall within a single heap. If you call obtain twice in a row
219 with the same arguments, you typically get the same value.
220 to the heap list. It's the caller's responsibility to keep
221 track of what space is in use.
222
223 Return the address of the space if all went well, or zero if we couldn't
224 allocate the memory. */
225
226 static POINTER
227 obtain (address, size)
228 POINTER address;
229 SIZE size;
230 {
231 heap_ptr heap;
232 SIZE already_available;
233
234 /* Find the heap that ADDRESS falls within. */
235 for (heap = last_heap; heap; heap = heap->prev)
236 {
237 if (heap->start <= address && address <= heap->end)
238 break;
239 }
240
241 if (! heap)
242 abort ();
243
244 /* If we can't fit SIZE bytes in that heap,
245 try successive later heaps. */
246 while (heap && address + size > heap->end)
247 {
248 heap = heap->next;
249 if (heap == NIL_HEAP)
250 break;
251 address = heap->bloc_start;
252 }
253
254 /* If we can't fit them within any existing heap,
255 get more space. */
256 if (heap == NIL_HEAP)
257 {
258 POINTER new = (*real_morecore)(0);
259 SIZE get;
260
261 already_available = (char *)last_heap->end - (char *)address;
262
263 if (new != last_heap->end)
264 {
265 /* Someone else called sbrk. Make a new heap. */
266
267 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
268 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
269
270 if ((*real_morecore) (bloc_start - new) != new)
271 return 0;
272
273 new_heap->start = new;
274 new_heap->end = bloc_start;
275 new_heap->bloc_start = bloc_start;
276 new_heap->free = bloc_start;
277 new_heap->next = NIL_HEAP;
278 new_heap->prev = last_heap;
279 new_heap->first_bloc = NIL_BLOC;
280 new_heap->last_bloc = NIL_BLOC;
281 last_heap->next = new_heap;
282 last_heap = new_heap;
283
284 address = bloc_start;
285 already_available = 0;
286 }
287
288 /* Add space to the last heap (which we may have just created).
289 Get some extra, so we can come here less often. */
290
291 get = size + extra_bytes - already_available;
292 get = (char *) ROUNDUP ((char *)last_heap->end + get)
293 - (char *) last_heap->end;
294
295 if ((*real_morecore) (get) != last_heap->end)
296 return 0;
297
298 last_heap->end += get;
299 }
300
301 return address;
302 }
303
304 /* Return unused heap space to the system
305 if there is a lot of unused space now.
306 This can make the last heap smaller;
307 it can also eliminate the last heap entirely. */
308
309 static void
310 relinquish ()
311 {
312 register heap_ptr h;
313 int excess = 0;
314
315 /* Add the amount of space beyond break_value
316 in all heaps which have extend beyond break_value at all. */
317
318 for (h = last_heap; h && break_value < h->end; h = h->prev)
319 {
320 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
321 ? h->bloc_start : break_value);
322 }
323
324 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
325 {
326 /* Keep extra_bytes worth of empty space.
327 And don't free anything unless we can free at least extra_bytes. */
328 excess -= extra_bytes;
329
330 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
331 {
332 /* This heap should have no blocs in it. */
333 if (last_heap->first_bloc != NIL_BLOC
334 || last_heap->last_bloc != NIL_BLOC)
335 abort ();
336
337 /* Return the last heap, with its header, to the system. */
338 excess = (char *)last_heap->end - (char *)last_heap->start;
339 last_heap = last_heap->prev;
340 last_heap->next = NIL_HEAP;
341 }
342 else
343 {
344 excess = (char *) last_heap->end
345 - (char *) ROUNDUP ((char *)last_heap->end - excess);
346 last_heap->end -= excess;
347 }
348
349 if ((*real_morecore) (- excess) == 0)
350 abort ();
351 }
352 }
353
354 /* Return the total size in use by relocating allocator,
355 above where malloc gets space. */
356
357 long
358 r_alloc_size_in_use ()
359 {
360 return break_value - virtual_break_value;
361 }
362 \f
363 /* The meat - allocating, freeing, and relocating blocs. */
364
365 /* Find the bloc referenced by the address in PTR. Returns a pointer
366 to that block. */
367
368 static bloc_ptr
369 find_bloc (ptr)
370 POINTER *ptr;
371 {
372 register bloc_ptr p = first_bloc;
373
374 while (p != NIL_BLOC)
375 {
376 if (p->variable == ptr && p->data == *ptr)
377 return p;
378
379 p = p->next;
380 }
381
382 return p;
383 }
384
385 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
386 Returns a pointer to the new bloc, or zero if we couldn't allocate
387 memory for the new block. */
388
389 static bloc_ptr
390 get_bloc (size)
391 SIZE size;
392 {
393 register bloc_ptr new_bloc;
394 register heap_ptr heap;
395
396 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
397 || ! (new_bloc->data = obtain (break_value, size)))
398 {
399 if (new_bloc)
400 free (new_bloc);
401
402 return 0;
403 }
404
405 break_value = new_bloc->data + size;
406
407 new_bloc->size = size;
408 new_bloc->next = NIL_BLOC;
409 new_bloc->variable = (POINTER *) NIL;
410 new_bloc->new_data = 0;
411
412 /* Record in the heap that this space is in use. */
413 heap = find_heap (new_bloc->data);
414 heap->free = break_value;
415
416 /* Maintain the correspondence between heaps and blocs. */
417 new_bloc->heap = heap;
418 heap->last_bloc = new_bloc;
419 if (heap->first_bloc == NIL_BLOC)
420 heap->first_bloc = new_bloc;
421
422 /* Put this bloc on the doubly-linked list of blocs. */
423 if (first_bloc)
424 {
425 new_bloc->prev = last_bloc;
426 last_bloc->next = new_bloc;
427 last_bloc = new_bloc;
428 }
429 else
430 {
431 first_bloc = last_bloc = new_bloc;
432 new_bloc->prev = NIL_BLOC;
433 }
434
435 return new_bloc;
436 }
437 \f
438 /* Calculate new locations of blocs in the list beginning with BLOC,
439 relocating it to start at ADDRESS, in heap HEAP. If enough space is
440 not presently available in our reserve, call obtain for
441 more space.
442
443 Store the new location of each bloc in its new_data field.
444 Do not touch the contents of blocs or break_value. */
445
446 static int
447 relocate_blocs (bloc, heap, address)
448 bloc_ptr bloc;
449 heap_ptr heap;
450 POINTER address;
451 {
452 register bloc_ptr b = bloc;
453
454 while (b)
455 {
456 /* If bloc B won't fit within HEAP,
457 move to the next heap and try again. */
458 while (heap && address + b->size > heap->end)
459 {
460 heap = heap->next;
461 if (heap == NIL_HEAP)
462 break;
463 address = heap->bloc_start;
464 }
465
466 /* If BLOC won't fit in any heap,
467 get enough new space to hold BLOC and all following blocs. */
468 if (heap == NIL_HEAP)
469 {
470 register bloc_ptr tb = b;
471 register SIZE s = 0;
472
473 /* Add up the size of all the following blocs. */
474 while (tb != NIL_BLOC)
475 {
476 s += tb->size;
477 tb = tb->next;
478 }
479
480 /* Get that space. */
481 address = obtain (address, s);
482 if (address == 0)
483 return 0;
484
485 heap = last_heap;
486 }
487
488 /* Record the new address of this bloc
489 and update where the next bloc can start. */
490 b->new_data = address;
491 address += b->size;
492 b = b->next;
493 }
494
495 return 1;
496 }
497
498 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
499 This is necessary if we put the memory of space of BLOC
500 before that of BEFORE. */
501
502 static void
503 reorder_bloc (bloc, before)
504 bloc_ptr bloc, before;
505 {
506 bloc_ptr prev, next;
507
508 /* Splice BLOC out from where it is. */
509 prev = bloc->prev;
510 next = bloc->next;
511
512 if (prev)
513 prev->next = next;
514 if (next)
515 next->prev = prev;
516
517 /* Splice it in before BEFORE. */
518 prev = before->prev;
519
520 if (prev)
521 prev->next = bloc;
522 bloc->prev = prev;
523
524 before->prev = bloc;
525 bloc->next = before;
526 }
527 \f
528 /* Update the records of which heaps contain which blocs, starting
529 with heap HEAP and bloc BLOC. */
530
531 static void
532 update_heap_bloc_correspondence (bloc, heap)
533 bloc_ptr bloc;
534 heap_ptr heap;
535 {
536 register bloc_ptr b;
537
538 /* Initialize HEAP's status to reflect blocs before BLOC. */
539 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
540 {
541 /* The previous bloc is in HEAP. */
542 heap->last_bloc = bloc->prev;
543 heap->free = bloc->prev->data + bloc->prev->size;
544 }
545 else
546 {
547 /* HEAP contains no blocs before BLOC. */
548 heap->first_bloc = NIL_BLOC;
549 heap->last_bloc = NIL_BLOC;
550 heap->free = heap->bloc_start;
551 }
552
553 /* Advance through blocs one by one. */
554 for (b = bloc; b != NIL_BLOC; b = b->next)
555 {
556 /* Advance through heaps, marking them empty,
557 till we get to the one that B is in. */
558 while (heap)
559 {
560 if (heap->bloc_start <= b->data && b->data <= heap->end)
561 break;
562 heap = heap->next;
563 /* We know HEAP is not null now,
564 because there has to be space for bloc B. */
565 heap->first_bloc = NIL_BLOC;
566 heap->last_bloc = NIL_BLOC;
567 heap->free = heap->bloc_start;
568 }
569
570 /* Update HEAP's status for bloc B. */
571 heap->free = b->data + b->size;
572 heap->last_bloc = b;
573 if (heap->first_bloc == NIL_BLOC)
574 heap->first_bloc = b;
575
576 /* Record that B is in HEAP. */
577 b->heap = heap;
578 }
579
580 /* If there are any remaining heaps and no blocs left,
581 mark those heaps as empty. */
582 heap = heap->next;
583 while (heap)
584 {
585 heap->first_bloc = NIL_BLOC;
586 heap->last_bloc = NIL_BLOC;
587 heap->free = heap->bloc_start;
588 heap = heap->next;
589 }
590 }
591 \f
592 /* Resize BLOC to SIZE bytes. This relocates the blocs
593 that come after BLOC in memory. */
594
595 static int
596 resize_bloc (bloc, size)
597 bloc_ptr bloc;
598 SIZE size;
599 {
600 register bloc_ptr b;
601 heap_ptr heap;
602 POINTER address;
603 SIZE old_size;
604
605 if (bloc == NIL_BLOC || size == bloc->size)
606 return 1;
607
608 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
609 {
610 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
611 break;
612 }
613
614 if (heap == NIL_HEAP)
615 abort ();
616
617 old_size = bloc->size;
618 bloc->size = size;
619
620 /* Note that bloc could be moved into the previous heap. */
621 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
622 : first_heap->bloc_start);
623 while (heap)
624 {
625 if (heap->bloc_start <= address && address <= heap->end)
626 break;
627 heap = heap->prev;
628 }
629
630 if (! relocate_blocs (bloc, heap, address))
631 {
632 bloc->size = old_size;
633 return 0;
634 }
635
636 if (size > old_size)
637 {
638 for (b = last_bloc; b != bloc; b = b->prev)
639 {
640 safe_bcopy (b->data, b->new_data, b->size);
641 *b->variable = b->data = b->new_data;
642 }
643 safe_bcopy (bloc->data, bloc->new_data, old_size);
644 bzero (bloc->new_data + old_size, size - old_size);
645 *bloc->variable = bloc->data = bloc->new_data;
646 }
647 else
648 {
649 for (b = bloc; b != NIL_BLOC; b = b->next)
650 {
651 safe_bcopy (b->data, b->new_data, b->size);
652 *b->variable = b->data = b->new_data;
653 }
654 }
655
656 update_heap_bloc_correspondence (bloc, heap);
657
658 break_value = (last_bloc ? last_bloc->data + last_bloc->size
659 : first_heap->bloc_start);
660 return 1;
661 }
662 \f
663 /* Free BLOC from the chain of blocs, relocating any blocs above it.
664 This may return space to the system. */
665
666 static void
667 free_bloc (bloc)
668 bloc_ptr bloc;
669 {
670 heap_ptr heap = bloc->heap;
671
672 resize_bloc (bloc, 0);
673
674 if (bloc == first_bloc && bloc == last_bloc)
675 {
676 first_bloc = last_bloc = NIL_BLOC;
677 }
678 else if (bloc == last_bloc)
679 {
680 last_bloc = bloc->prev;
681 last_bloc->next = NIL_BLOC;
682 }
683 else if (bloc == first_bloc)
684 {
685 first_bloc = bloc->next;
686 first_bloc->prev = NIL_BLOC;
687 }
688 else
689 {
690 bloc->next->prev = bloc->prev;
691 bloc->prev->next = bloc->next;
692 }
693
694 /* Update the records of which blocs are in HEAP. */
695 if (heap->first_bloc == bloc)
696 {
697 if (bloc->next != 0 && bloc->next->heap == heap)
698 heap->first_bloc = bloc->next;
699 else
700 heap->first_bloc = heap->last_bloc = NIL_BLOC;
701 }
702 if (heap->last_bloc == bloc)
703 {
704 if (bloc->prev != 0 && bloc->prev->heap == heap)
705 heap->last_bloc = bloc->prev;
706 else
707 heap->first_bloc = heap->last_bloc = NIL_BLOC;
708 }
709
710 relinquish ();
711 free (bloc);
712 }
713 \f
714 /* Interface routines. */
715
716 static int use_relocatable_buffers;
717 static int r_alloc_freeze_level;
718
719 /* Obtain SIZE bytes of storage from the free pool, or the system, as
720 necessary. If relocatable blocs are in use, this means relocating
721 them. This function gets plugged into the GNU malloc's __morecore
722 hook.
723
724 We provide hysteresis, never relocating by less than extra_bytes.
725
726 If we're out of memory, we should return zero, to imitate the other
727 __morecore hook values - in particular, __default_morecore in the
728 GNU malloc package. */
729
730 POINTER
731 r_alloc_sbrk (size)
732 long size;
733 {
734 register bloc_ptr b;
735 POINTER address;
736
737 if (! r_alloc_initialized)
738 r_alloc_init ();
739
740 if (! use_relocatable_buffers)
741 return (*real_morecore) (size);
742
743 if (size == 0)
744 return virtual_break_value;
745
746 if (size > 0)
747 {
748 /* Allocate a page-aligned space. GNU malloc would reclaim an
749 extra space if we passed an unaligned one. But we could
750 not always find a space which is contiguos to the previous. */
751 POINTER new_bloc_start;
752 heap_ptr h = first_heap;
753 SIZE get = ROUNDUP (size);
754
755 address = (POINTER) ROUNDUP (virtual_break_value);
756
757 /* Search the list upward for a heap which is large enough. */
758 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
759 {
760 h = h->next;
761 if (h == NIL_HEAP)
762 break;
763 address = (POINTER) ROUNDUP (h->start);
764 }
765
766 /* If not found, obtain more space. */
767 if (h == NIL_HEAP)
768 {
769 get += extra_bytes + page_size;
770
771 if (r_alloc_freeze_level > 0 || ! obtain (address, get))
772 return 0;
773
774 if (first_heap == last_heap)
775 address = (POINTER) ROUNDUP (virtual_break_value);
776 else
777 address = (POINTER) ROUNDUP (last_heap->start);
778 h = last_heap;
779 }
780
781 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
782
783 if (first_heap->bloc_start < new_bloc_start)
784 {
785 /* Move all blocs upward. */
786 if (r_alloc_freeze_level > 0
787 || ! relocate_blocs (first_bloc, h, new_bloc_start))
788 return 0;
789
790 /* Note that (POINTER)(h+1) <= new_bloc_start since
791 get >= page_size, so the following does not destroy the heap
792 header. */
793 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
794 {
795 safe_bcopy (b->data, b->new_data, b->size);
796 *b->variable = b->data = b->new_data;
797 }
798
799 h->bloc_start = new_bloc_start;
800
801 update_heap_bloc_correspondence (first_bloc, h);
802 }
803
804 if (h != first_heap)
805 {
806 /* Give up managing heaps below the one the new
807 virtual_break_value points to. */
808 first_heap->prev = NIL_HEAP;
809 first_heap->next = h->next;
810 first_heap->start = h->start;
811 first_heap->end = h->end;
812 first_heap->free = h->free;
813 first_heap->first_bloc = h->first_bloc;
814 first_heap->last_bloc = h->last_bloc;
815 first_heap->bloc_start = h->bloc_start;
816
817 if (first_heap->next)
818 first_heap->next->prev = first_heap;
819 else
820 last_heap = first_heap;
821 }
822
823 bzero (address, size);
824 }
825 else /* size < 0 */
826 {
827 SIZE excess = (char *)first_heap->bloc_start
828 - ((char *)virtual_break_value + size);
829
830 address = virtual_break_value;
831
832 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
833 {
834 excess -= extra_bytes;
835 first_heap->bloc_start
836 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
837
838 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
839
840 for (b = first_bloc; b != NIL_BLOC; b = b->next)
841 {
842 safe_bcopy (b->data, b->new_data, b->size);
843 *b->variable = b->data = b->new_data;
844 }
845 }
846
847 if ((char *)virtual_break_value + size < (char *)first_heap->start)
848 {
849 /* We found an additional space below the first heap */
850 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
851 }
852 }
853
854 virtual_break_value = (POINTER) ((char *)address + size);
855 break_value = (last_bloc
856 ? last_bloc->data + last_bloc->size
857 : first_heap->bloc_start);
858 if (size < 0)
859 relinquish ();
860
861 return address;
862 }
863
864 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
865 the data is returned in *PTR. PTR is thus the address of some variable
866 which will use the data area.
867
868 If we can't allocate the necessary memory, set *PTR to zero, and
869 return zero. */
870
871 POINTER
872 r_alloc (ptr, size)
873 POINTER *ptr;
874 SIZE size;
875 {
876 register bloc_ptr new_bloc;
877
878 if (! r_alloc_initialized)
879 r_alloc_init ();
880
881 new_bloc = get_bloc (MEM_ROUNDUP (size));
882 if (new_bloc)
883 {
884 new_bloc->variable = ptr;
885 *ptr = new_bloc->data;
886 }
887 else
888 *ptr = 0;
889
890 return *ptr;
891 }
892
893 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
894 Store 0 in *PTR to show there's no block allocated. */
895
896 void
897 r_alloc_free (ptr)
898 register POINTER *ptr;
899 {
900 register bloc_ptr dead_bloc;
901
902 if (! r_alloc_initialized)
903 r_alloc_init ();
904
905 dead_bloc = find_bloc (ptr);
906 if (dead_bloc == NIL_BLOC)
907 abort ();
908
909 free_bloc (dead_bloc);
910 *ptr = 0;
911
912 #ifdef emacs
913 refill_memory_reserve ();
914 #endif
915 }
916
917 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
918 Do this by shifting all blocks above this one up in memory, unless
919 SIZE is less than or equal to the current bloc size, in which case
920 do nothing.
921
922 Change *PTR to reflect the new bloc, and return this value.
923
924 If more memory cannot be allocated, then leave *PTR unchanged, and
925 return zero. */
926
927 POINTER
928 r_re_alloc (ptr, size)
929 POINTER *ptr;
930 SIZE size;
931 {
932 register bloc_ptr bloc;
933
934 if (! r_alloc_initialized)
935 r_alloc_init ();
936
937 bloc = find_bloc (ptr);
938 if (bloc == NIL_BLOC)
939 abort ();
940
941 if (size <= bloc->size)
942 /* Wouldn't it be useful to actually resize the bloc here? */
943 return *ptr;
944
945 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
946 return 0;
947
948 return *ptr;
949 }
950
951 /* Disable relocations, after making room for at least SIZE bytes
952 of non-relocatable heap if possible. The relocatable blocs are
953 guaranteed to hold still until thawed, even if this means that
954 malloc must return a null pointer. */
955
956 void
957 r_alloc_freeze (size)
958 long size;
959 {
960 if (! r_alloc_initialized)
961 r_alloc_init ();
962
963 /* If already frozen, we can't make any more room, so don't try. */
964 if (r_alloc_freeze_level > 0)
965 size = 0;
966 /* If we can't get the amount requested, half is better than nothing. */
967 while (size > 0 && r_alloc_sbrk (size) == 0)
968 size /= 2;
969 ++r_alloc_freeze_level;
970 if (size > 0)
971 r_alloc_sbrk (-size);
972 }
973
974 void
975 r_alloc_thaw ()
976 {
977 if (--r_alloc_freeze_level < 0)
978 abort ();
979 }
980 \f
981 /* The hook `malloc' uses for the function which gets more space
982 from the system. */
983 extern POINTER (*__morecore) ();
984
985 /* Initialize various things for memory allocation. */
986
987 static void
988 r_alloc_init ()
989 {
990 if (r_alloc_initialized)
991 return;
992
993 r_alloc_initialized = 1;
994 real_morecore = __morecore;
995 __morecore = r_alloc_sbrk;
996
997 first_heap = last_heap = &heap_base;
998 first_heap->next = first_heap->prev = NIL_HEAP;
999 first_heap->start = first_heap->bloc_start
1000 = virtual_break_value = break_value = (*real_morecore) (0);
1001 if (break_value == NIL)
1002 abort ();
1003
1004 page_size = PAGE;
1005 extra_bytes = ROUNDUP (50000);
1006
1007 /* Give GNU malloc's morecore some hysteresis
1008 so that we move all the relocatable blocks much less often. */
1009 __malloc_extra_blocks = 64;
1010
1011 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1012
1013 /* The extra call to real_morecore guarantees that the end of the
1014 address space is a multiple of page_size, even if page_size is
1015 not really the page size of the system running the binary in
1016 which page_size is stored. This allows a binary to be built on a
1017 system with one page size and run on a system with a smaller page
1018 size. */
1019 (*real_morecore) (first_heap->end - first_heap->start);
1020
1021 /* Clear the rest of the last page; this memory is in our address space
1022 even though it is after the sbrk value. */
1023 /* Doubly true, with the additional call that explicitly adds the
1024 rest of that page to the address space. */
1025 bzero (first_heap->start, first_heap->end - first_heap->start);
1026 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1027 use_relocatable_buffers = 1;
1028 }
1029 #ifdef DEBUG
1030 #include <assert.h>
1031
1032 void
1033 r_alloc_check ()
1034 {
1035 int found = 0;
1036 heap_ptr h, ph = 0;
1037 bloc_ptr b, pb = 0;
1038
1039 if (!r_alloc_initialized)
1040 return;
1041
1042 assert (first_heap);
1043 assert (last_heap->end <= (POINTER) sbrk (0));
1044 assert ((POINTER) first_heap < first_heap->start);
1045 assert (first_heap->start <= virtual_break_value);
1046 assert (virtual_break_value <= first_heap->end);
1047
1048 for (h = first_heap; h; h = h->next)
1049 {
1050 assert (h->prev == ph);
1051 assert ((POINTER) ROUNDUP (h->end) == h->end);
1052 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1053 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1054 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1055
1056 if (ph)
1057 {
1058 assert (ph->end < h->start);
1059 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1060 }
1061
1062 if (h->bloc_start <= break_value && break_value <= h->end)
1063 found = 1;
1064
1065 ph = h;
1066 }
1067
1068 assert (found);
1069 assert (last_heap == ph);
1070
1071 for (b = first_bloc; b; b = b->next)
1072 {
1073 assert (b->prev == pb);
1074 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1075 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1076
1077 ph = 0;
1078 for (h = first_heap; h; h = h->next)
1079 {
1080 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1081 break;
1082 ph = h;
1083 }
1084
1085 assert (h);
1086
1087 if (pb && pb->data + pb->size != b->data)
1088 {
1089 assert (ph && b->data == h->bloc_start);
1090 while (ph)
1091 {
1092 if (ph->bloc_start <= pb->data
1093 && pb->data + pb->size <= ph->end)
1094 {
1095 assert (pb->data + pb->size + b->size > ph->end);
1096 break;
1097 }
1098 else
1099 {
1100 assert (ph->bloc_start + b->size > ph->end);
1101 }
1102 ph = ph->prev;
1103 }
1104 }
1105 pb = b;
1106 }
1107
1108 assert (last_bloc == pb);
1109
1110 if (last_bloc)
1111 assert (last_bloc->data + last_bloc->size == break_value);
1112 else
1113 assert (first_heap->bloc_start == break_value);
1114 }
1115 #endif /* DEBUG */