]> code.delx.au - gnu-emacs/blob - src/ralloc.c
*** empty log message ***
[gnu-emacs] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of GNU Emacs.
6
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* NOTES:
21
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
25
26 #ifdef emacs
27
28 #include <config.h>
29 #include "lisp.h" /* Needed for VALBITS. */
30 #include "blockinput.h"
31
32 #ifdef HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
35
36 typedef POINTER_TYPE *POINTER;
37 typedef size_t SIZE;
38
39 /* Declared in dispnew.c, this version doesn't screw up if regions
40 overlap. */
41
42 extern void safe_bcopy ();
43
44 #ifdef DOUG_LEA_MALLOC
45 #define M_TOP_PAD -2
46 extern int mallopt ();
47 #else /* not DOUG_LEA_MALLOC */
48 #ifndef SYSTEM_MALLOC
49 extern size_t __malloc_extra_blocks;
50 #endif /* SYSTEM_MALLOC */
51 #endif /* not DOUG_LEA_MALLOC */
52
53 #else /* not emacs */
54
55 #include <stddef.h>
56
57 typedef size_t SIZE;
58 typedef void *POINTER;
59
60 #include <unistd.h>
61 #include <malloc.h>
62
63 #define safe_bcopy(x, y, z) memmove (y, x, z)
64 #define bzero(x, len) memset (x, 0, len)
65
66 #endif /* not emacs */
67
68
69 #include "getpagesize.h"
70
71 #define NIL ((POINTER) 0)
72
73 /* A flag to indicate whether we have initialized ralloc yet. For
74 Emacs's sake, please do not make this local to malloc_init; on some
75 machines, the dumping procedure makes all static variables
76 read-only. On these machines, the word static is #defined to be
77 the empty string, meaning that r_alloc_initialized becomes an
78 automatic variable, and loses its value each time Emacs is started
79 up. */
80
81 static int r_alloc_initialized = 0;
82
83 static void r_alloc_init ();
84
85 \f
86 /* Declarations for working with the malloc, ralloc, and system breaks. */
87
88 /* Function to set the real break value. */
89 POINTER (*real_morecore) ();
90
91 /* The break value, as seen by malloc. */
92 static POINTER virtual_break_value;
93
94 /* The address of the end of the last data in use by ralloc,
95 including relocatable blocs as well as malloc data. */
96 static POINTER break_value;
97
98 /* This is the size of a page. We round memory requests to this boundary. */
99 static int page_size;
100
101 /* Whenever we get memory from the system, get this many extra bytes. This
102 must be a multiple of page_size. */
103 static int extra_bytes;
104
105 /* Macros for rounding. Note that rounding to any value is possible
106 by changing the definition of PAGE. */
107 #define PAGE (getpagesize ())
108 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
109 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
110 & ~(page_size - 1))
111 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
112
113 #define MEM_ALIGN sizeof(double)
114 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
115 & ~(MEM_ALIGN - 1))
116
117 /* The hook `malloc' uses for the function which gets more space
118 from the system. */
119
120 #ifndef SYSTEM_MALLOC
121 extern POINTER (*__morecore) ();
122 #endif
123
124
125 \f
126 /***********************************************************************
127 Implementation using sbrk
128 ***********************************************************************/
129
130 /* Data structures of heaps and blocs. */
131
132 /* The relocatable objects, or blocs, and the malloc data
133 both reside within one or more heaps.
134 Each heap contains malloc data, running from `start' to `bloc_start',
135 and relocatable objects, running from `bloc_start' to `free'.
136
137 Relocatable objects may relocate within the same heap
138 or may move into another heap; the heaps themselves may grow
139 but they never move.
140
141 We try to make just one heap and make it larger as necessary.
142 But sometimes we can't do that, because we can't get contiguous
143 space to add onto the heap. When that happens, we start a new heap. */
144
145 typedef struct heap
146 {
147 struct heap *next;
148 struct heap *prev;
149 /* Start of memory range of this heap. */
150 POINTER start;
151 /* End of memory range of this heap. */
152 POINTER end;
153 /* Start of relocatable data in this heap. */
154 POINTER bloc_start;
155 /* Start of unused space in this heap. */
156 POINTER free;
157 /* First bloc in this heap. */
158 struct bp *first_bloc;
159 /* Last bloc in this heap. */
160 struct bp *last_bloc;
161 } *heap_ptr;
162
163 #define NIL_HEAP ((heap_ptr) 0)
164 #define HEAP_PTR_SIZE (sizeof (struct heap))
165
166 /* This is the first heap object.
167 If we need additional heap objects, each one resides at the beginning of
168 the space it covers. */
169 static struct heap heap_base;
170
171 /* Head and tail of the list of heaps. */
172 static heap_ptr first_heap, last_heap;
173
174 /* These structures are allocated in the malloc arena.
175 The linked list is kept in order of increasing '.data' members.
176 The data blocks abut each other; if b->next is non-nil, then
177 b->data + b->size == b->next->data.
178
179 An element with variable==NIL denotes a freed block, which has not yet
180 been collected. They may only appear while r_alloc_freeze_level > 0,
181 and will be freed when the arena is thawed. Currently, these blocs are
182 not reusable, while the arena is frozen. Very inefficient. */
183
184 typedef struct bp
185 {
186 struct bp *next;
187 struct bp *prev;
188 POINTER *variable;
189 POINTER data;
190 SIZE size;
191 POINTER new_data; /* temporarily used for relocation */
192 struct heap *heap; /* Heap this bloc is in. */
193 } *bloc_ptr;
194
195 #define NIL_BLOC ((bloc_ptr) 0)
196 #define BLOC_PTR_SIZE (sizeof (struct bp))
197
198 /* Head and tail of the list of relocatable blocs. */
199 static bloc_ptr first_bloc, last_bloc;
200
201 static int use_relocatable_buffers;
202
203 /* If >0, no relocation whatsoever takes place. */
204 static int r_alloc_freeze_level;
205
206 \f
207 /* Functions to get and return memory from the system. */
208
209 /* Find the heap that ADDRESS falls within. */
210
211 static heap_ptr
212 find_heap (address)
213 POINTER address;
214 {
215 heap_ptr heap;
216
217 for (heap = last_heap; heap; heap = heap->prev)
218 {
219 if (heap->start <= address && address <= heap->end)
220 return heap;
221 }
222
223 return NIL_HEAP;
224 }
225
226 /* Find SIZE bytes of space in a heap.
227 Try to get them at ADDRESS (which must fall within some heap's range)
228 if we can get that many within one heap.
229
230 If enough space is not presently available in our reserve, this means
231 getting more page-aligned space from the system. If the returned space
232 is not contiguous to the last heap, allocate a new heap, and append it
233
234 obtain does not try to keep track of whether space is in use
235 or not in use. It just returns the address of SIZE bytes that
236 fall within a single heap. If you call obtain twice in a row
237 with the same arguments, you typically get the same value.
238 to the heap list. It's the caller's responsibility to keep
239 track of what space is in use.
240
241 Return the address of the space if all went well, or zero if we couldn't
242 allocate the memory. */
243
244 static POINTER
245 obtain (address, size)
246 POINTER address;
247 SIZE size;
248 {
249 heap_ptr heap;
250 SIZE already_available;
251
252 /* Find the heap that ADDRESS falls within. */
253 for (heap = last_heap; heap; heap = heap->prev)
254 {
255 if (heap->start <= address && address <= heap->end)
256 break;
257 }
258
259 if (! heap)
260 abort ();
261
262 /* If we can't fit SIZE bytes in that heap,
263 try successive later heaps. */
264 while (heap && (char *) address + size > (char *) heap->end)
265 {
266 heap = heap->next;
267 if (heap == NIL_HEAP)
268 break;
269 address = heap->bloc_start;
270 }
271
272 /* If we can't fit them within any existing heap,
273 get more space. */
274 if (heap == NIL_HEAP)
275 {
276 POINTER new = (*real_morecore)(0);
277 SIZE get;
278
279 already_available = (char *)last_heap->end - (char *)address;
280
281 if (new != last_heap->end)
282 {
283 /* Someone else called sbrk. Make a new heap. */
284
285 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
286 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
287
288 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
289 return 0;
290
291 new_heap->start = new;
292 new_heap->end = bloc_start;
293 new_heap->bloc_start = bloc_start;
294 new_heap->free = bloc_start;
295 new_heap->next = NIL_HEAP;
296 new_heap->prev = last_heap;
297 new_heap->first_bloc = NIL_BLOC;
298 new_heap->last_bloc = NIL_BLOC;
299 last_heap->next = new_heap;
300 last_heap = new_heap;
301
302 address = bloc_start;
303 already_available = 0;
304 }
305
306 /* Add space to the last heap (which we may have just created).
307 Get some extra, so we can come here less often. */
308
309 get = size + extra_bytes - already_available;
310 get = (char *) ROUNDUP ((char *)last_heap->end + get)
311 - (char *) last_heap->end;
312
313 if ((*real_morecore) (get) != last_heap->end)
314 return 0;
315
316 last_heap->end = (char *) last_heap->end + get;
317 }
318
319 return address;
320 }
321
322 /* Return unused heap space to the system
323 if there is a lot of unused space now.
324 This can make the last heap smaller;
325 it can also eliminate the last heap entirely. */
326
327 static void
328 relinquish ()
329 {
330 register heap_ptr h;
331 long excess = 0;
332
333 /* Add the amount of space beyond break_value
334 in all heaps which have extend beyond break_value at all. */
335
336 for (h = last_heap; h && break_value < h->end; h = h->prev)
337 {
338 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
339 ? h->bloc_start : break_value);
340 }
341
342 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
343 {
344 /* Keep extra_bytes worth of empty space.
345 And don't free anything unless we can free at least extra_bytes. */
346 excess -= extra_bytes;
347
348 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
349 {
350 /* This heap should have no blocs in it. */
351 if (last_heap->first_bloc != NIL_BLOC
352 || last_heap->last_bloc != NIL_BLOC)
353 abort ();
354
355 /* Return the last heap, with its header, to the system. */
356 excess = (char *)last_heap->end - (char *)last_heap->start;
357 last_heap = last_heap->prev;
358 last_heap->next = NIL_HEAP;
359 }
360 else
361 {
362 excess = (char *) last_heap->end
363 - (char *) ROUNDUP ((char *)last_heap->end - excess);
364 last_heap->end = (char *) last_heap->end - excess;
365 }
366
367 if ((*real_morecore) (- excess) == 0)
368 {
369 /* If the system didn't want that much memory back, adjust
370 the end of the last heap to reflect that. This can occur
371 if break_value is still within the original data segment. */
372 last_heap->end = (char *) last_heap->end + excess;
373 /* Make sure that the result of the adjustment is accurate.
374 It should be, for the else clause above; the other case,
375 which returns the entire last heap to the system, seems
376 unlikely to trigger this mode of failure. */
377 if (last_heap->end != (*real_morecore) (0))
378 abort ();
379 }
380 }
381 }
382
383 /* Return the total size in use by relocating allocator,
384 above where malloc gets space. */
385
386 long
387 r_alloc_size_in_use ()
388 {
389 return (char *) break_value - (char *) virtual_break_value;
390 }
391 \f
392 /* The meat - allocating, freeing, and relocating blocs. */
393
394 /* Find the bloc referenced by the address in PTR. Returns a pointer
395 to that block. */
396
397 static bloc_ptr
398 find_bloc (ptr)
399 POINTER *ptr;
400 {
401 register bloc_ptr p = first_bloc;
402
403 while (p != NIL_BLOC)
404 {
405 /* Consistency check. Don't return inconsistent blocs.
406 Don't abort here, as callers might be expecting this, but
407 callers that always expect a bloc to be returned should abort
408 if one isn't to avoid a memory corruption bug that is
409 difficult to track down. */
410 if (p->variable == ptr && p->data == *ptr)
411 return p;
412
413 p = p->next;
414 }
415
416 return p;
417 }
418
419 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
420 Returns a pointer to the new bloc, or zero if we couldn't allocate
421 memory for the new block. */
422
423 static bloc_ptr
424 get_bloc (size)
425 SIZE size;
426 {
427 register bloc_ptr new_bloc;
428 register heap_ptr heap;
429
430 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
431 || ! (new_bloc->data = obtain (break_value, size)))
432 {
433 free (new_bloc);
434
435 return 0;
436 }
437
438 break_value = (char *) new_bloc->data + size;
439
440 new_bloc->size = size;
441 new_bloc->next = NIL_BLOC;
442 new_bloc->variable = (POINTER *) NIL;
443 new_bloc->new_data = 0;
444
445 /* Record in the heap that this space is in use. */
446 heap = find_heap (new_bloc->data);
447 heap->free = break_value;
448
449 /* Maintain the correspondence between heaps and blocs. */
450 new_bloc->heap = heap;
451 heap->last_bloc = new_bloc;
452 if (heap->first_bloc == NIL_BLOC)
453 heap->first_bloc = new_bloc;
454
455 /* Put this bloc on the doubly-linked list of blocs. */
456 if (first_bloc)
457 {
458 new_bloc->prev = last_bloc;
459 last_bloc->next = new_bloc;
460 last_bloc = new_bloc;
461 }
462 else
463 {
464 first_bloc = last_bloc = new_bloc;
465 new_bloc->prev = NIL_BLOC;
466 }
467
468 return new_bloc;
469 }
470 \f
471 /* Calculate new locations of blocs in the list beginning with BLOC,
472 relocating it to start at ADDRESS, in heap HEAP. If enough space is
473 not presently available in our reserve, call obtain for
474 more space.
475
476 Store the new location of each bloc in its new_data field.
477 Do not touch the contents of blocs or break_value. */
478
479 static int
480 relocate_blocs (bloc, heap, address)
481 bloc_ptr bloc;
482 heap_ptr heap;
483 POINTER address;
484 {
485 register bloc_ptr b = bloc;
486
487 /* No need to ever call this if arena is frozen, bug somewhere! */
488 if (r_alloc_freeze_level)
489 abort();
490
491 while (b)
492 {
493 /* If bloc B won't fit within HEAP,
494 move to the next heap and try again. */
495 while (heap && (char *) address + b->size > (char *) heap->end)
496 {
497 heap = heap->next;
498 if (heap == NIL_HEAP)
499 break;
500 address = heap->bloc_start;
501 }
502
503 /* If BLOC won't fit in any heap,
504 get enough new space to hold BLOC and all following blocs. */
505 if (heap == NIL_HEAP)
506 {
507 register bloc_ptr tb = b;
508 register SIZE s = 0;
509
510 /* Add up the size of all the following blocs. */
511 while (tb != NIL_BLOC)
512 {
513 if (tb->variable)
514 s += tb->size;
515
516 tb = tb->next;
517 }
518
519 /* Get that space. */
520 address = obtain (address, s);
521 if (address == 0)
522 return 0;
523
524 heap = last_heap;
525 }
526
527 /* Record the new address of this bloc
528 and update where the next bloc can start. */
529 b->new_data = address;
530 if (b->variable)
531 address = (char *) address + b->size;
532 b = b->next;
533 }
534
535 return 1;
536 }
537
538 /* Reorder the bloc BLOC to go before bloc BEFORE in the doubly linked list.
539 This is necessary if we put the memory of space of BLOC
540 before that of BEFORE. */
541
542 static void
543 reorder_bloc (bloc, before)
544 bloc_ptr bloc, before;
545 {
546 bloc_ptr prev, next;
547
548 /* Splice BLOC out from where it is. */
549 prev = bloc->prev;
550 next = bloc->next;
551
552 if (prev)
553 prev->next = next;
554 if (next)
555 next->prev = prev;
556
557 /* Splice it in before BEFORE. */
558 prev = before->prev;
559
560 if (prev)
561 prev->next = bloc;
562 bloc->prev = prev;
563
564 before->prev = bloc;
565 bloc->next = before;
566 }
567 \f
568 /* Update the records of which heaps contain which blocs, starting
569 with heap HEAP and bloc BLOC. */
570
571 static void
572 update_heap_bloc_correspondence (bloc, heap)
573 bloc_ptr bloc;
574 heap_ptr heap;
575 {
576 register bloc_ptr b;
577
578 /* Initialize HEAP's status to reflect blocs before BLOC. */
579 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
580 {
581 /* The previous bloc is in HEAP. */
582 heap->last_bloc = bloc->prev;
583 heap->free = (char *) bloc->prev->data + bloc->prev->size;
584 }
585 else
586 {
587 /* HEAP contains no blocs before BLOC. */
588 heap->first_bloc = NIL_BLOC;
589 heap->last_bloc = NIL_BLOC;
590 heap->free = heap->bloc_start;
591 }
592
593 /* Advance through blocs one by one. */
594 for (b = bloc; b != NIL_BLOC; b = b->next)
595 {
596 /* Advance through heaps, marking them empty,
597 till we get to the one that B is in. */
598 while (heap)
599 {
600 if (heap->bloc_start <= b->data && b->data <= heap->end)
601 break;
602 heap = heap->next;
603 /* We know HEAP is not null now,
604 because there has to be space for bloc B. */
605 heap->first_bloc = NIL_BLOC;
606 heap->last_bloc = NIL_BLOC;
607 heap->free = heap->bloc_start;
608 }
609
610 /* Update HEAP's status for bloc B. */
611 heap->free = (char *) b->data + b->size;
612 heap->last_bloc = b;
613 if (heap->first_bloc == NIL_BLOC)
614 heap->first_bloc = b;
615
616 /* Record that B is in HEAP. */
617 b->heap = heap;
618 }
619
620 /* If there are any remaining heaps and no blocs left,
621 mark those heaps as empty. */
622 heap = heap->next;
623 while (heap)
624 {
625 heap->first_bloc = NIL_BLOC;
626 heap->last_bloc = NIL_BLOC;
627 heap->free = heap->bloc_start;
628 heap = heap->next;
629 }
630 }
631 \f
632 /* Resize BLOC to SIZE bytes. This relocates the blocs
633 that come after BLOC in memory. */
634
635 static int
636 resize_bloc (bloc, size)
637 bloc_ptr bloc;
638 SIZE size;
639 {
640 register bloc_ptr b;
641 heap_ptr heap;
642 POINTER address;
643 SIZE old_size;
644
645 /* No need to ever call this if arena is frozen, bug somewhere! */
646 if (r_alloc_freeze_level)
647 abort();
648
649 if (bloc == NIL_BLOC || size == bloc->size)
650 return 1;
651
652 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
653 {
654 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
655 break;
656 }
657
658 if (heap == NIL_HEAP)
659 abort ();
660
661 old_size = bloc->size;
662 bloc->size = size;
663
664 /* Note that bloc could be moved into the previous heap. */
665 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
666 : (char *) first_heap->bloc_start);
667 while (heap)
668 {
669 if (heap->bloc_start <= address && address <= heap->end)
670 break;
671 heap = heap->prev;
672 }
673
674 if (! relocate_blocs (bloc, heap, address))
675 {
676 bloc->size = old_size;
677 return 0;
678 }
679
680 if (size > old_size)
681 {
682 for (b = last_bloc; b != bloc; b = b->prev)
683 {
684 if (!b->variable)
685 {
686 b->size = 0;
687 b->data = b->new_data;
688 }
689 else
690 {
691 safe_bcopy (b->data, b->new_data, b->size);
692 *b->variable = b->data = b->new_data;
693 }
694 }
695 if (!bloc->variable)
696 {
697 bloc->size = 0;
698 bloc->data = bloc->new_data;
699 }
700 else
701 {
702 safe_bcopy (bloc->data, bloc->new_data, old_size);
703 bzero ((char *) bloc->new_data + old_size, size - old_size);
704 *bloc->variable = bloc->data = bloc->new_data;
705 }
706 }
707 else
708 {
709 for (b = bloc; b != NIL_BLOC; b = b->next)
710 {
711 if (!b->variable)
712 {
713 b->size = 0;
714 b->data = b->new_data;
715 }
716 else
717 {
718 safe_bcopy (b->data, b->new_data, b->size);
719 *b->variable = b->data = b->new_data;
720 }
721 }
722 }
723
724 update_heap_bloc_correspondence (bloc, heap);
725
726 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
727 : (char *) first_heap->bloc_start);
728 return 1;
729 }
730 \f
731 /* Free BLOC from the chain of blocs, relocating any blocs above it.
732 This may return space to the system. */
733
734 static void
735 free_bloc (bloc)
736 bloc_ptr bloc;
737 {
738 heap_ptr heap = bloc->heap;
739
740 if (r_alloc_freeze_level)
741 {
742 bloc->variable = (POINTER *) NIL;
743 return;
744 }
745
746 resize_bloc (bloc, 0);
747
748 if (bloc == first_bloc && bloc == last_bloc)
749 {
750 first_bloc = last_bloc = NIL_BLOC;
751 }
752 else if (bloc == last_bloc)
753 {
754 last_bloc = bloc->prev;
755 last_bloc->next = NIL_BLOC;
756 }
757 else if (bloc == first_bloc)
758 {
759 first_bloc = bloc->next;
760 first_bloc->prev = NIL_BLOC;
761 }
762 else
763 {
764 bloc->next->prev = bloc->prev;
765 bloc->prev->next = bloc->next;
766 }
767
768 /* Update the records of which blocs are in HEAP. */
769 if (heap->first_bloc == bloc)
770 {
771 if (bloc->next != 0 && bloc->next->heap == heap)
772 heap->first_bloc = bloc->next;
773 else
774 heap->first_bloc = heap->last_bloc = NIL_BLOC;
775 }
776 if (heap->last_bloc == bloc)
777 {
778 if (bloc->prev != 0 && bloc->prev->heap == heap)
779 heap->last_bloc = bloc->prev;
780 else
781 heap->first_bloc = heap->last_bloc = NIL_BLOC;
782 }
783
784 relinquish ();
785 free (bloc);
786 }
787 \f
788 /* Interface routines. */
789
790 /* Obtain SIZE bytes of storage from the free pool, or the system, as
791 necessary. If relocatable blocs are in use, this means relocating
792 them. This function gets plugged into the GNU malloc's __morecore
793 hook.
794
795 We provide hysteresis, never relocating by less than extra_bytes.
796
797 If we're out of memory, we should return zero, to imitate the other
798 __morecore hook values - in particular, __default_morecore in the
799 GNU malloc package. */
800
801 POINTER
802 r_alloc_sbrk (size)
803 long size;
804 {
805 register bloc_ptr b;
806 POINTER address;
807
808 if (! r_alloc_initialized)
809 r_alloc_init ();
810
811 if (! use_relocatable_buffers)
812 return (*real_morecore) (size);
813
814 if (size == 0)
815 return virtual_break_value;
816
817 if (size > 0)
818 {
819 /* Allocate a page-aligned space. GNU malloc would reclaim an
820 extra space if we passed an unaligned one. But we could
821 not always find a space which is contiguous to the previous. */
822 POINTER new_bloc_start;
823 heap_ptr h = first_heap;
824 SIZE get = ROUNDUP (size);
825
826 address = (POINTER) ROUNDUP (virtual_break_value);
827
828 /* Search the list upward for a heap which is large enough. */
829 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
830 {
831 h = h->next;
832 if (h == NIL_HEAP)
833 break;
834 address = (POINTER) ROUNDUP (h->start);
835 }
836
837 /* If not found, obtain more space. */
838 if (h == NIL_HEAP)
839 {
840 get += extra_bytes + page_size;
841
842 if (! obtain (address, get))
843 return 0;
844
845 if (first_heap == last_heap)
846 address = (POINTER) ROUNDUP (virtual_break_value);
847 else
848 address = (POINTER) ROUNDUP (last_heap->start);
849 h = last_heap;
850 }
851
852 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
853
854 if (first_heap->bloc_start < new_bloc_start)
855 {
856 /* This is no clean solution - no idea how to do it better. */
857 if (r_alloc_freeze_level)
858 return NIL;
859
860 /* There is a bug here: if the above obtain call succeeded, but the
861 relocate_blocs call below does not succeed, we need to free
862 the memory that we got with obtain. */
863
864 /* Move all blocs upward. */
865 if (! relocate_blocs (first_bloc, h, new_bloc_start))
866 return 0;
867
868 /* Note that (POINTER)(h+1) <= new_bloc_start since
869 get >= page_size, so the following does not destroy the heap
870 header. */
871 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
872 {
873 safe_bcopy (b->data, b->new_data, b->size);
874 *b->variable = b->data = b->new_data;
875 }
876
877 h->bloc_start = new_bloc_start;
878
879 update_heap_bloc_correspondence (first_bloc, h);
880 }
881 if (h != first_heap)
882 {
883 /* Give up managing heaps below the one the new
884 virtual_break_value points to. */
885 first_heap->prev = NIL_HEAP;
886 first_heap->next = h->next;
887 first_heap->start = h->start;
888 first_heap->end = h->end;
889 first_heap->free = h->free;
890 first_heap->first_bloc = h->first_bloc;
891 first_heap->last_bloc = h->last_bloc;
892 first_heap->bloc_start = h->bloc_start;
893
894 if (first_heap->next)
895 first_heap->next->prev = first_heap;
896 else
897 last_heap = first_heap;
898 }
899
900 bzero (address, size);
901 }
902 else /* size < 0 */
903 {
904 SIZE excess = (char *)first_heap->bloc_start
905 - ((char *)virtual_break_value + size);
906
907 address = virtual_break_value;
908
909 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
910 {
911 excess -= extra_bytes;
912 first_heap->bloc_start
913 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
914
915 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
916
917 for (b = first_bloc; b != NIL_BLOC; b = b->next)
918 {
919 safe_bcopy (b->data, b->new_data, b->size);
920 *b->variable = b->data = b->new_data;
921 }
922 }
923
924 if ((char *)virtual_break_value + size < (char *)first_heap->start)
925 {
926 /* We found an additional space below the first heap */
927 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
928 }
929 }
930
931 virtual_break_value = (POINTER) ((char *)address + size);
932 break_value = (last_bloc
933 ? (char *) last_bloc->data + last_bloc->size
934 : (char *) first_heap->bloc_start);
935 if (size < 0)
936 relinquish ();
937
938 return address;
939 }
940
941
942 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
943 the data is returned in *PTR. PTR is thus the address of some variable
944 which will use the data area.
945
946 The allocation of 0 bytes is valid.
947 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
948 done before allocating a new area. Not yet done.
949
950 If we can't allocate the necessary memory, set *PTR to zero, and
951 return zero. */
952
953 POINTER
954 r_alloc (ptr, size)
955 POINTER *ptr;
956 SIZE size;
957 {
958 register bloc_ptr new_bloc;
959
960 if (! r_alloc_initialized)
961 r_alloc_init ();
962
963 new_bloc = get_bloc (MEM_ROUNDUP (size));
964 if (new_bloc)
965 {
966 new_bloc->variable = ptr;
967 *ptr = new_bloc->data;
968 }
969 else
970 *ptr = 0;
971
972 return *ptr;
973 }
974
975 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
976 Store 0 in *PTR to show there's no block allocated. */
977
978 void
979 r_alloc_free (ptr)
980 register POINTER *ptr;
981 {
982 register bloc_ptr dead_bloc;
983
984 if (! r_alloc_initialized)
985 r_alloc_init ();
986
987 dead_bloc = find_bloc (ptr);
988 if (dead_bloc == NIL_BLOC)
989 abort (); /* Double free? PTR not originally used to allocate? */
990
991 free_bloc (dead_bloc);
992 *ptr = 0;
993
994 #ifdef emacs
995 refill_memory_reserve ();
996 #endif
997 }
998
999 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
1000 Do this by shifting all blocks above this one up in memory, unless
1001 SIZE is less than or equal to the current bloc size, in which case
1002 do nothing.
1003
1004 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
1005 memory copied to it. Not very efficient. We could traverse the
1006 bloc_list for a best fit of free blocs first.
1007
1008 Change *PTR to reflect the new bloc, and return this value.
1009
1010 If more memory cannot be allocated, then leave *PTR unchanged, and
1011 return zero. */
1012
1013 POINTER
1014 r_re_alloc (ptr, size)
1015 POINTER *ptr;
1016 SIZE size;
1017 {
1018 register bloc_ptr bloc;
1019
1020 if (! r_alloc_initialized)
1021 r_alloc_init ();
1022
1023 if (!*ptr)
1024 return r_alloc (ptr, size);
1025 if (!size)
1026 {
1027 r_alloc_free (ptr);
1028 return r_alloc (ptr, 0);
1029 }
1030
1031 bloc = find_bloc (ptr);
1032 if (bloc == NIL_BLOC)
1033 abort (); /* Already freed? PTR not originally used to allocate? */
1034
1035 if (size < bloc->size)
1036 {
1037 /* Wouldn't it be useful to actually resize the bloc here? */
1038 /* I think so too, but not if it's too expensive... */
1039 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
1040 && r_alloc_freeze_level == 0)
1041 {
1042 resize_bloc (bloc, MEM_ROUNDUP (size));
1043 /* Never mind if this fails, just do nothing... */
1044 /* It *should* be infallible! */
1045 }
1046 }
1047 else if (size > bloc->size)
1048 {
1049 if (r_alloc_freeze_level)
1050 {
1051 bloc_ptr new_bloc;
1052 new_bloc = get_bloc (MEM_ROUNDUP (size));
1053 if (new_bloc)
1054 {
1055 new_bloc->variable = ptr;
1056 *ptr = new_bloc->data;
1057 bloc->variable = (POINTER *) NIL;
1058 }
1059 else
1060 return NIL;
1061 }
1062 else
1063 {
1064 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1065 return NIL;
1066 }
1067 }
1068 return *ptr;
1069 }
1070
1071 /* Disable relocations, after making room for at least SIZE bytes
1072 of non-relocatable heap if possible. The relocatable blocs are
1073 guaranteed to hold still until thawed, even if this means that
1074 malloc must return a null pointer. */
1075
1076 void
1077 r_alloc_freeze (size)
1078 long size;
1079 {
1080 if (! r_alloc_initialized)
1081 r_alloc_init ();
1082
1083 /* If already frozen, we can't make any more room, so don't try. */
1084 if (r_alloc_freeze_level > 0)
1085 size = 0;
1086 /* If we can't get the amount requested, half is better than nothing. */
1087 while (size > 0 && r_alloc_sbrk (size) == 0)
1088 size /= 2;
1089 ++r_alloc_freeze_level;
1090 if (size > 0)
1091 r_alloc_sbrk (-size);
1092 }
1093
1094 void
1095 r_alloc_thaw ()
1096 {
1097
1098 if (! r_alloc_initialized)
1099 r_alloc_init ();
1100
1101 if (--r_alloc_freeze_level < 0)
1102 abort ();
1103
1104 /* This frees all unused blocs. It is not too inefficient, as the resize
1105 and bcopy is done only once. Afterwards, all unreferenced blocs are
1106 already shrunk to zero size. */
1107 if (!r_alloc_freeze_level)
1108 {
1109 bloc_ptr *b = &first_bloc;
1110 while (*b)
1111 if (!(*b)->variable)
1112 free_bloc (*b);
1113 else
1114 b = &(*b)->next;
1115 }
1116 }
1117
1118
1119 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1120
1121 /* Reinitialize the morecore hook variables after restarting a dumped
1122 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1123 void
1124 r_alloc_reinit ()
1125 {
1126 /* Only do this if the hook has been reset, so that we don't get an
1127 infinite loop, in case Emacs was linked statically. */
1128 if (__morecore != r_alloc_sbrk)
1129 {
1130 real_morecore = __morecore;
1131 __morecore = r_alloc_sbrk;
1132 }
1133 }
1134
1135 #endif /* emacs && DOUG_LEA_MALLOC */
1136
1137 #ifdef DEBUG
1138
1139 #include <assert.h>
1140
1141 void
1142 r_alloc_check ()
1143 {
1144 int found = 0;
1145 heap_ptr h, ph = 0;
1146 bloc_ptr b, pb = 0;
1147
1148 if (!r_alloc_initialized)
1149 return;
1150
1151 assert (first_heap);
1152 assert (last_heap->end <= (POINTER) sbrk (0));
1153 assert ((POINTER) first_heap < first_heap->start);
1154 assert (first_heap->start <= virtual_break_value);
1155 assert (virtual_break_value <= first_heap->end);
1156
1157 for (h = first_heap; h; h = h->next)
1158 {
1159 assert (h->prev == ph);
1160 assert ((POINTER) ROUNDUP (h->end) == h->end);
1161 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1162 the heap start has any sort of alignment.
1163 Perhaps it should. */
1164 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1165 #endif
1166 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1167 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1168
1169 if (ph)
1170 {
1171 assert (ph->end < h->start);
1172 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1173 }
1174
1175 if (h->bloc_start <= break_value && break_value <= h->end)
1176 found = 1;
1177
1178 ph = h;
1179 }
1180
1181 assert (found);
1182 assert (last_heap == ph);
1183
1184 for (b = first_bloc; b; b = b->next)
1185 {
1186 assert (b->prev == pb);
1187 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1188 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1189
1190 ph = 0;
1191 for (h = first_heap; h; h = h->next)
1192 {
1193 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1194 break;
1195 ph = h;
1196 }
1197
1198 assert (h);
1199
1200 if (pb && pb->data + pb->size != b->data)
1201 {
1202 assert (ph && b->data == h->bloc_start);
1203 while (ph)
1204 {
1205 if (ph->bloc_start <= pb->data
1206 && pb->data + pb->size <= ph->end)
1207 {
1208 assert (pb->data + pb->size + b->size > ph->end);
1209 break;
1210 }
1211 else
1212 {
1213 assert (ph->bloc_start + b->size > ph->end);
1214 }
1215 ph = ph->prev;
1216 }
1217 }
1218 pb = b;
1219 }
1220
1221 assert (last_bloc == pb);
1222
1223 if (last_bloc)
1224 assert (last_bloc->data + last_bloc->size == break_value);
1225 else
1226 assert (first_heap->bloc_start == break_value);
1227 }
1228
1229 #endif /* DEBUG */
1230
1231 /* Update the internal record of which variable points to some data to NEW.
1232 Used by buffer-swap-text in Emacs to restore consistency after it
1233 swaps the buffer text between two buffer objects. The OLD pointer
1234 is checked to ensure that memory corruption does not occur due to
1235 misuse. */
1236 void
1237 r_alloc_reset_variable (old, new)
1238 POINTER *old, *new;
1239 {
1240 bloc_ptr bloc = first_bloc;
1241
1242 /* Find the bloc that corresponds to the data pointed to by pointer.
1243 find_bloc cannot be used, as it has internal consistency checks
1244 which fail when the variable needs reseting. */
1245 while (bloc != NIL_BLOC)
1246 {
1247 if (bloc->data == *new)
1248 break;
1249
1250 bloc = bloc->next;
1251 }
1252
1253 if (bloc == NIL_BLOC || bloc->variable != old)
1254 abort (); /* Already freed? OLD not originally used to allocate? */
1255
1256 /* Update variable to point to the new location. */
1257 bloc->variable = new;
1258 }
1259
1260 \f
1261 /***********************************************************************
1262 Initialization
1263 ***********************************************************************/
1264
1265 /* Initialize various things for memory allocation. */
1266
1267 static void
1268 r_alloc_init ()
1269 {
1270 if (r_alloc_initialized)
1271 return;
1272 r_alloc_initialized = 1;
1273
1274 page_size = PAGE;
1275 #ifndef SYSTEM_MALLOC
1276 real_morecore = __morecore;
1277 __morecore = r_alloc_sbrk;
1278
1279 first_heap = last_heap = &heap_base;
1280 first_heap->next = first_heap->prev = NIL_HEAP;
1281 first_heap->start = first_heap->bloc_start
1282 = virtual_break_value = break_value = (*real_morecore) (0);
1283 if (break_value == NIL)
1284 abort ();
1285
1286 extra_bytes = ROUNDUP (50000);
1287 #endif
1288
1289 #ifdef DOUG_LEA_MALLOC
1290 BLOCK_INPUT;
1291 mallopt (M_TOP_PAD, 64 * 4096);
1292 UNBLOCK_INPUT;
1293 #else
1294 #ifndef SYSTEM_MALLOC
1295 /* Give GNU malloc's morecore some hysteresis
1296 so that we move all the relocatable blocks much less often. */
1297 __malloc_extra_blocks = 64;
1298 #endif
1299 #endif
1300
1301 #ifndef SYSTEM_MALLOC
1302 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1303
1304 /* The extra call to real_morecore guarantees that the end of the
1305 address space is a multiple of page_size, even if page_size is
1306 not really the page size of the system running the binary in
1307 which page_size is stored. This allows a binary to be built on a
1308 system with one page size and run on a system with a smaller page
1309 size. */
1310 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1311
1312 /* Clear the rest of the last page; this memory is in our address space
1313 even though it is after the sbrk value. */
1314 /* Doubly true, with the additional call that explicitly adds the
1315 rest of that page to the address space. */
1316 bzero (first_heap->start,
1317 (char *) first_heap->end - (char *) first_heap->start);
1318 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1319 #endif
1320
1321 use_relocatable_buffers = 1;
1322 }
1323
1324 /* arch-tag: 6a524a15-faff-44c8-95d4-a5da6f55110f
1325 (do not change this comment) */