]> code.delx.au - gnu-emacs/blob - src/ralloc.c
(r_alloc_free): Call refill_memory_reserve only if emacs.
[gnu-emacs] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995 Free Software Foundation, Inc.
3
4 This file is part of GNU Emacs.
5
6 GNU Emacs is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
9 any later version.
10
11 GNU Emacs is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GNU Emacs; see the file COPYING. If not, write to
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
19
20 /* NOTES:
21
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
25
26 #ifdef emacs
27
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30
31 #undef NULL
32
33 /* The important properties of this type are that 1) it's a pointer, and
34 2) arithmetic on it should work as if the size of the object pointed
35 to has a size of 1. */
36 #if 0 /* Arithmetic on void* is a GCC extension. */
37 #ifdef __STDC__
38 typedef void *POINTER;
39 #else
40
41 #ifdef HAVE_CONFIG_H
42 #include "config.h"
43 #endif
44
45 typedef char *POINTER;
46
47 #endif
48 #endif /* 0 */
49
50 /* Unconditionally use char * for this. */
51 typedef char *POINTER;
52
53 typedef unsigned long SIZE;
54
55 /* Declared in dispnew.c, this version doesn't screw up if regions
56 overlap. */
57 extern void safe_bcopy ();
58
59 #else /* not emacs */
60
61 #include <stddef.h>
62
63 typedef size_t SIZE;
64 typedef void *POINTER;
65
66 #include <unistd.h>
67 #include <malloc.h>
68 #include <string.h>
69
70 #define safe_bcopy(x, y, z) memmove (y, x, z)
71 #define bzero(x, len) memset (x, 0, len)
72
73 #endif /* not emacs */
74
75 #include "getpagesize.h"
76
77 #define NIL ((POINTER) 0)
78
79 /* A flag to indicate whether we have initialized ralloc yet. For
80 Emacs's sake, please do not make this local to malloc_init; on some
81 machines, the dumping procedure makes all static variables
82 read-only. On these machines, the word static is #defined to be
83 the empty string, meaning that r_alloc_initialized becomes an
84 automatic variable, and loses its value each time Emacs is started up. */
85 static int r_alloc_initialized = 0;
86
87 static void r_alloc_init ();
88 \f
89 /* Declarations for working with the malloc, ralloc, and system breaks. */
90
91 /* Function to set the real break value. */
92 static POINTER (*real_morecore) ();
93
94 /* The break value, as seen by malloc. */
95 static POINTER virtual_break_value;
96
97 /* The address of the end of the last data in use by ralloc,
98 including relocatable blocs as well as malloc data. */
99 static POINTER break_value;
100
101 /* This is the size of a page. We round memory requests to this boundary. */
102 static int page_size;
103
104 /* Whenever we get memory from the system, get this many extra bytes. This
105 must be a multiple of page_size. */
106 static int extra_bytes;
107
108 /* Macros for rounding. Note that rounding to any value is possible
109 by changing the definition of PAGE. */
110 #define PAGE (getpagesize ())
111 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
112 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
113 & ~(page_size - 1))
114 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
115
116 #define MEM_ALIGN sizeof(double)
117 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
118 & ~(MEM_ALIGN - 1))
119 \f
120 /* Data structures of heaps and blocs. */
121
122 /* The relocatable objects, or blocs, and the malloc data
123 both reside within one or more heaps.
124 Each heap contains malloc data, running from `start' to `bloc_start',
125 and relocatable objects, running from `bloc_start' to `free'.
126
127 Relocatable objects may relocate within the same heap
128 or may move into another heap; the heaps themselves may grow
129 but they never move.
130
131 We try to make just one heap and make it larger as necessary.
132 But sometimes we can't do that, because we can't get continguous
133 space to add onto the heap. When that happens, we start a new heap. */
134
135 typedef struct heap
136 {
137 struct heap *next;
138 struct heap *prev;
139 /* Start of memory range of this heap. */
140 POINTER start;
141 /* End of memory range of this heap. */
142 POINTER end;
143 /* Start of relocatable data in this heap. */
144 POINTER bloc_start;
145 /* Start of unused space in this heap. */
146 POINTER free;
147 /* First bloc in this heap. */
148 struct bp *first_bloc;
149 /* Last bloc in this heap. */
150 struct bp *last_bloc;
151 } *heap_ptr;
152
153 #define NIL_HEAP ((heap_ptr) 0)
154 #define HEAP_PTR_SIZE (sizeof (struct heap))
155
156 /* This is the first heap object.
157 If we need additional heap objects, each one resides at the beginning of
158 the space it covers. */
159 static struct heap heap_base;
160
161 /* Head and tail of the list of heaps. */
162 static heap_ptr first_heap, last_heap;
163
164 /* These structures are allocated in the malloc arena.
165 The linked list is kept in order of increasing '.data' members.
166 The data blocks abut each other; if b->next is non-nil, then
167 b->data + b->size == b->next->data. */
168 typedef struct bp
169 {
170 struct bp *next;
171 struct bp *prev;
172 POINTER *variable;
173 POINTER data;
174 SIZE size;
175 POINTER new_data; /* tmporarily used for relocation */
176 /* Heap this bloc is in. */
177 struct heap *heap;
178 } *bloc_ptr;
179
180 #define NIL_BLOC ((bloc_ptr) 0)
181 #define BLOC_PTR_SIZE (sizeof (struct bp))
182
183 /* Head and tail of the list of relocatable blocs. */
184 static bloc_ptr first_bloc, last_bloc;
185
186 \f
187 /* Functions to get and return memory from the system. */
188
189 /* Find the heap that ADDRESS falls within. */
190
191 static heap_ptr
192 find_heap (address)
193 POINTER address;
194 {
195 heap_ptr heap;
196
197 for (heap = last_heap; heap; heap = heap->prev)
198 {
199 if (heap->start <= address && address <= heap->end)
200 return heap;
201 }
202
203 return NIL_HEAP;
204 }
205
206 /* Find SIZE bytes of space in a heap.
207 Try to get them at ADDRESS (which must fall within some heap's range)
208 if we can get that many within one heap.
209
210 If enough space is not presently available in our reserve, this means
211 getting more page-aligned space from the system. If the retuned space
212 is not contiguos to the last heap, allocate a new heap, and append it
213
214 obtain does not try to keep track of whether space is in use
215 or not in use. It just returns the address of SIZE bytes that
216 fall within a single heap. If you call obtain twice in a row
217 with the same arguments, you typically get the same value.
218 to the heap list. It's the caller's responsibility to keep
219 track of what space is in use.
220
221 Return the address of the space if all went well, or zero if we couldn't
222 allocate the memory. */
223
224 static POINTER
225 obtain (address, size)
226 POINTER address;
227 SIZE size;
228 {
229 heap_ptr heap;
230 SIZE already_available;
231
232 /* Find the heap that ADDRESS falls within. */
233 for (heap = last_heap; heap; heap = heap->prev)
234 {
235 if (heap->start <= address && address <= heap->end)
236 break;
237 }
238
239 if (! heap)
240 abort ();
241
242 /* If we can't fit SIZE bytes in that heap,
243 try successive later heaps. */
244 while (heap && address + size > heap->end)
245 {
246 heap = heap->next;
247 if (heap == NIL_HEAP)
248 break;
249 address = heap->bloc_start;
250 }
251
252 /* If we can't fit them within any existing heap,
253 get more space. */
254 if (heap == NIL_HEAP)
255 {
256 POINTER new = (*real_morecore)(0);
257 SIZE get;
258
259 already_available = (char *)last_heap->end - (char *)address;
260
261 if (new != last_heap->end)
262 {
263 /* Someone else called sbrk. Make a new heap. */
264
265 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
266 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
267
268 if ((*real_morecore) (bloc_start - new) != new)
269 return 0;
270
271 new_heap->start = new;
272 new_heap->end = bloc_start;
273 new_heap->bloc_start = bloc_start;
274 new_heap->free = bloc_start;
275 new_heap->next = NIL_HEAP;
276 new_heap->prev = last_heap;
277 new_heap->first_bloc = NIL_BLOC;
278 new_heap->last_bloc = NIL_BLOC;
279 last_heap->next = new_heap;
280 last_heap = new_heap;
281
282 address = bloc_start;
283 already_available = 0;
284 }
285
286 /* Add space to the last heap (which we may have just created).
287 Get some extra, so we can come here less often. */
288
289 get = size + extra_bytes - already_available;
290 get = (char *) ROUNDUP ((char *)last_heap->end + get)
291 - (char *) last_heap->end;
292
293 if ((*real_morecore) (get) != last_heap->end)
294 return 0;
295
296 last_heap->end += get;
297 }
298
299 return address;
300 }
301
302 /* Return unused heap space to the system
303 if there is a lot of unused space now.
304 This can make the last heap smaller;
305 it can also eliminate the last heap entirely. */
306
307 static void
308 relinquish ()
309 {
310 register heap_ptr h;
311 int excess = 0;
312
313 /* Add the amount of space beyond break_value
314 in all heaps which have extend beyond break_value at all. */
315
316 for (h = last_heap; h && break_value < h->end; h = h->prev)
317 {
318 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
319 ? h->bloc_start : break_value);
320 }
321
322 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
323 {
324 /* Keep extra_bytes worth of empty space.
325 And don't free anything unless we can free at least extra_bytes. */
326 excess -= extra_bytes;
327
328 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
329 {
330 /* This heap should have no blocs in it. */
331 if (last_heap->first_bloc != NIL_BLOC
332 || last_heap->last_bloc != NIL_BLOC)
333 abort ();
334
335 /* Return the last heap, with its header, to the system. */
336 excess = (char *)last_heap->end - (char *)last_heap->start;
337 last_heap = last_heap->prev;
338 last_heap->next = NIL_HEAP;
339 }
340 else
341 {
342 excess = (char *) last_heap->end
343 - (char *) ROUNDUP ((char *)last_heap->end - excess);
344 last_heap->end -= excess;
345 }
346
347 if ((*real_morecore) (- excess) == 0)
348 abort ();
349 }
350 }
351
352 /* Return the total size in use by relocating allocator,
353 above where malloc gets space. */
354
355 long
356 r_alloc_size_in_use ()
357 {
358 return break_value - virtual_break_value;
359 }
360 \f
361 /* The meat - allocating, freeing, and relocating blocs. */
362
363 /* Find the bloc referenced by the address in PTR. Returns a pointer
364 to that block. */
365
366 static bloc_ptr
367 find_bloc (ptr)
368 POINTER *ptr;
369 {
370 register bloc_ptr p = first_bloc;
371
372 while (p != NIL_BLOC)
373 {
374 if (p->variable == ptr && p->data == *ptr)
375 return p;
376
377 p = p->next;
378 }
379
380 return p;
381 }
382
383 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
384 Returns a pointer to the new bloc, or zero if we couldn't allocate
385 memory for the new block. */
386
387 static bloc_ptr
388 get_bloc (size)
389 SIZE size;
390 {
391 register bloc_ptr new_bloc;
392 register heap_ptr heap;
393
394 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
395 || ! (new_bloc->data = obtain (break_value, size)))
396 {
397 if (new_bloc)
398 free (new_bloc);
399
400 return 0;
401 }
402
403 break_value = new_bloc->data + size;
404
405 new_bloc->size = size;
406 new_bloc->next = NIL_BLOC;
407 new_bloc->variable = (POINTER *) NIL;
408 new_bloc->new_data = 0;
409
410 /* Record in the heap that this space is in use. */
411 heap = find_heap (new_bloc->data);
412 heap->free = break_value;
413
414 /* Maintain the correspondence between heaps and blocs. */
415 new_bloc->heap = heap;
416 heap->last_bloc = new_bloc;
417 if (heap->first_bloc == NIL_BLOC)
418 heap->first_bloc = new_bloc;
419
420 /* Put this bloc on the doubly-linked list of blocs. */
421 if (first_bloc)
422 {
423 new_bloc->prev = last_bloc;
424 last_bloc->next = new_bloc;
425 last_bloc = new_bloc;
426 }
427 else
428 {
429 first_bloc = last_bloc = new_bloc;
430 new_bloc->prev = NIL_BLOC;
431 }
432
433 return new_bloc;
434 }
435 \f
436 /* Calculate new locations of blocs in the list beginning with BLOC,
437 relocating it to start at ADDRESS, in heap HEAP. If enough space is
438 not presently available in our reserve, call obtain for
439 more space.
440
441 Store the new location of each bloc in its new_data field.
442 Do not touch the contents of blocs or break_value. */
443
444 static int
445 relocate_blocs (bloc, heap, address)
446 bloc_ptr bloc;
447 heap_ptr heap;
448 POINTER address;
449 {
450 register bloc_ptr b = bloc;
451
452 while (b)
453 {
454 /* If bloc B won't fit within HEAP,
455 move to the next heap and try again. */
456 while (heap && address + b->size > heap->end)
457 {
458 heap = heap->next;
459 if (heap == NIL_HEAP)
460 break;
461 address = heap->bloc_start;
462 }
463
464 /* If BLOC won't fit in any heap,
465 get enough new space to hold BLOC and all following blocs. */
466 if (heap == NIL_HEAP)
467 {
468 register bloc_ptr tb = b;
469 register SIZE s = 0;
470
471 /* Add up the size of all the following blocs. */
472 while (tb != NIL_BLOC)
473 {
474 s += tb->size;
475 tb = tb->next;
476 }
477
478 /* Get that space. */
479 address = obtain (address, s);
480 if (address == 0)
481 return 0;
482
483 heap = last_heap;
484 }
485
486 /* Record the new address of this bloc
487 and update where the next bloc can start. */
488 b->new_data = address;
489 address += b->size;
490 b = b->next;
491 }
492
493 return 1;
494 }
495
496 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
497 This is necessary if we put the memory of space of BLOC
498 before that of BEFORE. */
499
500 static void
501 reorder_bloc (bloc, before)
502 bloc_ptr bloc, before;
503 {
504 bloc_ptr prev, next;
505
506 /* Splice BLOC out from where it is. */
507 prev = bloc->prev;
508 next = bloc->next;
509
510 if (prev)
511 prev->next = next;
512 if (next)
513 next->prev = prev;
514
515 /* Splice it in before BEFORE. */
516 prev = before->prev;
517
518 if (prev)
519 prev->next = bloc;
520 bloc->prev = prev;
521
522 before->prev = bloc;
523 bloc->next = before;
524 }
525 \f
526 /* Update the records of which heaps contain which blocs, starting
527 with heap HEAP and bloc BLOC. */
528
529 static void
530 update_heap_bloc_correspondence (bloc, heap)
531 bloc_ptr bloc;
532 heap_ptr heap;
533 {
534 register bloc_ptr b;
535
536 /* Initialize HEAP's status to reflect blocs before BLOC. */
537 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
538 {
539 /* The previous bloc is in HEAP. */
540 heap->last_bloc = bloc->prev;
541 heap->free = bloc->prev->data + bloc->prev->size;
542 }
543 else
544 {
545 /* HEAP contains no blocs before BLOC. */
546 heap->first_bloc = NIL_BLOC;
547 heap->last_bloc = NIL_BLOC;
548 heap->free = heap->bloc_start;
549 }
550
551 /* Advance through blocs one by one. */
552 for (b = bloc; b != NIL_BLOC; b = b->next)
553 {
554 /* Advance through heaps, marking them empty,
555 till we get to the one that B is in. */
556 while (heap)
557 {
558 if (heap->bloc_start <= b->data && b->data <= heap->end)
559 break;
560 heap = heap->next;
561 /* We know HEAP is not null now,
562 because there has to be space for bloc B. */
563 heap->first_bloc = NIL_BLOC;
564 heap->last_bloc = NIL_BLOC;
565 heap->free = heap->bloc_start;
566 }
567
568 /* Update HEAP's status for bloc B. */
569 heap->free = b->data + b->size;
570 heap->last_bloc = b;
571 if (heap->first_bloc == NIL_BLOC)
572 heap->first_bloc = b;
573
574 /* Record that B is in HEAP. */
575 b->heap = heap;
576 }
577
578 /* If there are any remaining heaps and no blocs left,
579 mark those heaps as empty. */
580 heap = heap->next;
581 while (heap)
582 {
583 heap->first_bloc = NIL_BLOC;
584 heap->last_bloc = NIL_BLOC;
585 heap->free = heap->bloc_start;
586 heap = heap->next;
587 }
588 }
589 \f
590 /* Resize BLOC to SIZE bytes. This relocates the blocs
591 that come after BLOC in memory. */
592
593 static int
594 resize_bloc (bloc, size)
595 bloc_ptr bloc;
596 SIZE size;
597 {
598 register bloc_ptr b;
599 heap_ptr heap;
600 POINTER address;
601 SIZE old_size;
602
603 if (bloc == NIL_BLOC || size == bloc->size)
604 return 1;
605
606 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
607 {
608 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
609 break;
610 }
611
612 if (heap == NIL_HEAP)
613 abort ();
614
615 old_size = bloc->size;
616 bloc->size = size;
617
618 /* Note that bloc could be moved into the previous heap. */
619 address = (bloc->prev ? bloc->prev->data + bloc->prev->size
620 : first_heap->bloc_start);
621 while (heap)
622 {
623 if (heap->bloc_start <= address && address <= heap->end)
624 break;
625 heap = heap->prev;
626 }
627
628 if (! relocate_blocs (bloc, heap, address))
629 {
630 bloc->size = old_size;
631 return 0;
632 }
633
634 if (size > old_size)
635 {
636 for (b = last_bloc; b != bloc; b = b->prev)
637 {
638 safe_bcopy (b->data, b->new_data, b->size);
639 *b->variable = b->data = b->new_data;
640 }
641 safe_bcopy (bloc->data, bloc->new_data, old_size);
642 bzero (bloc->new_data + old_size, size - old_size);
643 *bloc->variable = bloc->data = bloc->new_data;
644 }
645 else
646 {
647 for (b = bloc; b != NIL_BLOC; b = b->next)
648 {
649 safe_bcopy (b->data, b->new_data, b->size);
650 *b->variable = b->data = b->new_data;
651 }
652 }
653
654 update_heap_bloc_correspondence (bloc, heap);
655
656 break_value = (last_bloc ? last_bloc->data + last_bloc->size
657 : first_heap->bloc_start);
658 return 1;
659 }
660 \f
661 /* Free BLOC from the chain of blocs, relocating any blocs above it.
662 This may return space to the system. */
663
664 static void
665 free_bloc (bloc)
666 bloc_ptr bloc;
667 {
668 heap_ptr heap = bloc->heap;
669
670 resize_bloc (bloc, 0);
671
672 if (bloc == first_bloc && bloc == last_bloc)
673 {
674 first_bloc = last_bloc = NIL_BLOC;
675 }
676 else if (bloc == last_bloc)
677 {
678 last_bloc = bloc->prev;
679 last_bloc->next = NIL_BLOC;
680 }
681 else if (bloc == first_bloc)
682 {
683 first_bloc = bloc->next;
684 first_bloc->prev = NIL_BLOC;
685 }
686 else
687 {
688 bloc->next->prev = bloc->prev;
689 bloc->prev->next = bloc->next;
690 }
691
692 /* Update the records of which blocs are in HEAP. */
693 if (heap->first_bloc == bloc)
694 {
695 if (bloc->next != 0 && bloc->next->heap == heap)
696 heap->first_bloc = bloc->next;
697 else
698 heap->first_bloc = heap->last_bloc = NIL_BLOC;
699 }
700 if (heap->last_bloc == bloc)
701 {
702 if (bloc->prev != 0 && bloc->prev->heap == heap)
703 heap->last_bloc = bloc->prev;
704 else
705 heap->first_bloc = heap->last_bloc = NIL_BLOC;
706 }
707
708 relinquish ();
709 free (bloc);
710 }
711 \f
712 /* Interface routines. */
713
714 static int use_relocatable_buffers;
715 static int r_alloc_freeze_level;
716
717 /* Obtain SIZE bytes of storage from the free pool, or the system, as
718 necessary. If relocatable blocs are in use, this means relocating
719 them. This function gets plugged into the GNU malloc's __morecore
720 hook.
721
722 We provide hysteresis, never relocating by less than extra_bytes.
723
724 If we're out of memory, we should return zero, to imitate the other
725 __morecore hook values - in particular, __default_morecore in the
726 GNU malloc package. */
727
728 POINTER
729 r_alloc_sbrk (size)
730 long size;
731 {
732 register bloc_ptr b;
733 POINTER address;
734
735 if (! use_relocatable_buffers)
736 return (*real_morecore) (size);
737
738 if (size == 0)
739 return virtual_break_value;
740
741 if (size > 0)
742 {
743 /* Allocate a page-aligned space. GNU malloc would reclaim an
744 extra space if we passed an unaligned one. But we could
745 not always find a space which is contiguos to the previous. */
746 POINTER new_bloc_start;
747 heap_ptr h = first_heap;
748 SIZE get = ROUNDUP (size);
749
750 address = (POINTER) ROUNDUP (virtual_break_value);
751
752 /* Search the list upward for a heap which is large enough. */
753 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
754 {
755 h = h->next;
756 if (h == NIL_HEAP)
757 break;
758 address = (POINTER) ROUNDUP (h->start);
759 }
760
761 /* If not found, obtain more space. */
762 if (h == NIL_HEAP)
763 {
764 get += extra_bytes + page_size;
765
766 if (r_alloc_freeze_level > 0 || ! obtain (address, get))
767 return 0;
768
769 if (first_heap == last_heap)
770 address = (POINTER) ROUNDUP (virtual_break_value);
771 else
772 address = (POINTER) ROUNDUP (last_heap->start);
773 h = last_heap;
774 }
775
776 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
777
778 if (first_heap->bloc_start < new_bloc_start)
779 {
780 /* Move all blocs upward. */
781 if (r_alloc_freeze_level > 0
782 || ! relocate_blocs (first_bloc, h, new_bloc_start))
783 return 0;
784
785 /* Note that (POINTER)(h+1) <= new_bloc_start since
786 get >= page_size, so the following does not destroy the heap
787 header. */
788 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
789 {
790 safe_bcopy (b->data, b->new_data, b->size);
791 *b->variable = b->data = b->new_data;
792 }
793
794 h->bloc_start = new_bloc_start;
795
796 update_heap_bloc_correspondence (first_bloc, h);
797 }
798
799 if (h != first_heap)
800 {
801 /* Give up managing heaps below the one the new
802 virtual_break_value points to. */
803 first_heap->prev = NIL_HEAP;
804 first_heap->next = h->next;
805 first_heap->start = h->start;
806 first_heap->end = h->end;
807 first_heap->free = h->free;
808 first_heap->first_bloc = h->first_bloc;
809 first_heap->last_bloc = h->last_bloc;
810 first_heap->bloc_start = h->bloc_start;
811
812 if (first_heap->next)
813 first_heap->next->prev = first_heap;
814 else
815 last_heap = first_heap;
816 }
817
818 bzero (address, size);
819 }
820 else /* size < 0 */
821 {
822 SIZE excess = (char *)first_heap->bloc_start
823 - ((char *)virtual_break_value + size);
824
825 address = virtual_break_value;
826
827 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
828 {
829 excess -= extra_bytes;
830 first_heap->bloc_start
831 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
832
833 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
834
835 for (b = first_bloc; b != NIL_BLOC; b = b->next)
836 {
837 safe_bcopy (b->data, b->new_data, b->size);
838 *b->variable = b->data = b->new_data;
839 }
840 }
841
842 if ((char *)virtual_break_value + size < (char *)first_heap->start)
843 {
844 /* We found an additional space below the first heap */
845 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
846 }
847 }
848
849 virtual_break_value = (POINTER) ((char *)address + size);
850 break_value = (last_bloc
851 ? last_bloc->data + last_bloc->size
852 : first_heap->bloc_start);
853 if (size < 0)
854 relinquish ();
855
856 return address;
857 }
858
859 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
860 the data is returned in *PTR. PTR is thus the address of some variable
861 which will use the data area.
862
863 If we can't allocate the necessary memory, set *PTR to zero, and
864 return zero. */
865
866 POINTER
867 r_alloc (ptr, size)
868 POINTER *ptr;
869 SIZE size;
870 {
871 register bloc_ptr new_bloc;
872
873 if (! r_alloc_initialized)
874 r_alloc_init ();
875
876 new_bloc = get_bloc (MEM_ROUNDUP (size));
877 if (new_bloc)
878 {
879 new_bloc->variable = ptr;
880 *ptr = new_bloc->data;
881 }
882 else
883 *ptr = 0;
884
885 return *ptr;
886 }
887
888 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
889 Store 0 in *PTR to show there's no block allocated. */
890
891 void
892 r_alloc_free (ptr)
893 register POINTER *ptr;
894 {
895 register bloc_ptr dead_bloc;
896
897 dead_bloc = find_bloc (ptr);
898 if (dead_bloc == NIL_BLOC)
899 abort ();
900
901 free_bloc (dead_bloc);
902 *ptr = 0;
903
904 #ifdef emacs
905 refill_memory_reserve ();
906 #endif
907 }
908
909 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
910 Do this by shifting all blocks above this one up in memory, unless
911 SIZE is less than or equal to the current bloc size, in which case
912 do nothing.
913
914 Change *PTR to reflect the new bloc, and return this value.
915
916 If more memory cannot be allocated, then leave *PTR unchanged, and
917 return zero. */
918
919 POINTER
920 r_re_alloc (ptr, size)
921 POINTER *ptr;
922 SIZE size;
923 {
924 register bloc_ptr bloc;
925
926 bloc = find_bloc (ptr);
927 if (bloc == NIL_BLOC)
928 abort ();
929
930 if (size <= bloc->size)
931 /* Wouldn't it be useful to actually resize the bloc here? */
932 return *ptr;
933
934 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
935 return 0;
936
937 return *ptr;
938 }
939
940 /* Disable relocations, after making room for at least SIZE bytes
941 of non-relocatable heap if possible. The relocatable blocs are
942 guaranteed to hold still until thawed, even if this means that
943 malloc must return a null pointer. */
944
945 void
946 r_alloc_freeze (size)
947 long size;
948 {
949 /* If already frozen, we can't make any more room, so don't try. */
950 if (r_alloc_freeze_level > 0)
951 size = 0;
952 /* If we can't get the amount requested, half is better than nothing. */
953 while (size > 0 && r_alloc_sbrk (size) == 0)
954 size /= 2;
955 ++r_alloc_freeze_level;
956 if (size > 0)
957 r_alloc_sbrk (-size);
958 }
959
960 void
961 r_alloc_thaw ()
962 {
963 if (--r_alloc_freeze_level < 0)
964 abort ();
965 }
966 \f
967 /* The hook `malloc' uses for the function which gets more space
968 from the system. */
969 extern POINTER (*__morecore) ();
970
971 /* Initialize various things for memory allocation. */
972
973 static void
974 r_alloc_init ()
975 {
976 if (r_alloc_initialized)
977 return;
978
979 r_alloc_initialized = 1;
980 real_morecore = __morecore;
981 __morecore = r_alloc_sbrk;
982
983 first_heap = last_heap = &heap_base;
984 first_heap->next = first_heap->prev = NIL_HEAP;
985 first_heap->start = first_heap->bloc_start
986 = virtual_break_value = break_value = (*real_morecore) (0);
987 if (break_value == NIL)
988 abort ();
989
990 page_size = PAGE;
991 extra_bytes = ROUNDUP (50000);
992
993 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
994
995 /* The extra call to real_morecore guarantees that the end of the
996 address space is a multiple of page_size, even if page_size is
997 not really the page size of the system running the binary in
998 which page_size is stored. This allows a binary to be built on a
999 system with one page size and run on a system with a smaller page
1000 size. */
1001 (*real_morecore) (first_heap->end - first_heap->start);
1002
1003 /* Clear the rest of the last page; this memory is in our address space
1004 even though it is after the sbrk value. */
1005 /* Doubly true, with the additional call that explicitly adds the
1006 rest of that page to the address space. */
1007 bzero (first_heap->start, first_heap->end - first_heap->start);
1008 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1009 use_relocatable_buffers = 1;
1010 }
1011 #ifdef DEBUG
1012 #include <assert.h>
1013
1014 int
1015 r_alloc_check ()
1016 {
1017 int found = 0;
1018 heap_ptr h, ph = 0;
1019 bloc_ptr b, pb = 0;
1020
1021 if (!r_alloc_initialized)
1022 return;
1023
1024 assert (first_heap);
1025 assert (last_heap->end <= (POINTER) sbrk (0));
1026 assert ((POINTER) first_heap < first_heap->start);
1027 assert (first_heap->start <= virtual_break_value);
1028 assert (virtual_break_value <= first_heap->end);
1029
1030 for (h = first_heap; h; h = h->next)
1031 {
1032 assert (h->prev == ph);
1033 assert ((POINTER) ROUNDUP (h->end) == h->end);
1034 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1035 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1036 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1037
1038 if (ph)
1039 {
1040 assert (ph->end < h->start);
1041 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1042 }
1043
1044 if (h->bloc_start <= break_value && break_value <= h->end)
1045 found = 1;
1046
1047 ph = h;
1048 }
1049
1050 assert (found);
1051 assert (last_heap == ph);
1052
1053 for (b = first_bloc; b; b = b->next)
1054 {
1055 assert (b->prev == pb);
1056 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1057 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1058
1059 ph = 0;
1060 for (h = first_heap; h; h = h->next)
1061 {
1062 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1063 break;
1064 ph = h;
1065 }
1066
1067 assert (h);
1068
1069 if (pb && pb->data + pb->size != b->data)
1070 {
1071 assert (ph && b->data == h->bloc_start);
1072 while (ph)
1073 {
1074 if (ph->bloc_start <= pb->data
1075 && pb->data + pb->size <= ph->end)
1076 {
1077 assert (pb->data + pb->size + b->size > ph->end);
1078 break;
1079 }
1080 else
1081 {
1082 assert (ph->bloc_start + b->size > ph->end);
1083 }
1084 ph = ph->prev;
1085 }
1086 }
1087 pb = b;
1088 }
1089
1090 assert (last_bloc == pb);
1091
1092 if (last_bloc)
1093 assert (last_bloc->data + last_bloc->size == break_value);
1094 else
1095 assert (first_heap->bloc_start == break_value);
1096 }
1097 #endif /* DEBUG */