]> code.delx.au - gnu-emacs/blob - src/ralloc.c
62a65c24623522fc4d1c9976ef94cb504388f11f
[gnu-emacs] / src / ralloc.c
1 /* Block-relocating memory allocator.
2 Copyright (C) 1993, 1995, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4
5 This file is part of GNU Emacs.
6
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* NOTES:
21
22 Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
23 rather than all of them. This means allowing for a possible
24 hole between the first bloc and the end of malloc storage. */
25
26 #ifdef emacs
27
28 #include <config.h>
29 #include <setjmp.h>
30 #include "lisp.h" /* Needed for VALBITS. */
31 #include "blockinput.h"
32
33 #include <unistd.h>
34
35 typedef POINTER_TYPE *POINTER;
36 typedef size_t SIZE;
37
38 #ifdef DOUG_LEA_MALLOC
39 #define M_TOP_PAD -2
40 extern int mallopt (int, int);
41 #else /* not DOUG_LEA_MALLOC */
42 #ifndef SYSTEM_MALLOC
43 extern size_t __malloc_extra_blocks;
44 #endif /* SYSTEM_MALLOC */
45 #endif /* not DOUG_LEA_MALLOC */
46
47 #else /* not emacs */
48
49 #include <stddef.h>
50
51 typedef size_t SIZE;
52 typedef void *POINTER;
53
54 #include <unistd.h>
55 #include <malloc.h>
56
57 #endif /* not emacs */
58
59
60 #include "getpagesize.h"
61
62 #define NIL ((POINTER) 0)
63
64 /* A flag to indicate whether we have initialized ralloc yet. For
65 Emacs's sake, please do not make this local to malloc_init; on some
66 machines, the dumping procedure makes all static variables
67 read-only. On these machines, the word static is #defined to be
68 the empty string, meaning that r_alloc_initialized becomes an
69 automatic variable, and loses its value each time Emacs is started
70 up. */
71
72 static int r_alloc_initialized = 0;
73
74 static void r_alloc_init (void);
75
76 \f
77 /* Declarations for working with the malloc, ralloc, and system breaks. */
78
79 /* Function to set the real break value. */
80 POINTER (*real_morecore) (long int);
81
82 /* The break value, as seen by malloc. */
83 static POINTER virtual_break_value;
84
85 /* The address of the end of the last data in use by ralloc,
86 including relocatable blocs as well as malloc data. */
87 static POINTER break_value;
88
89 /* This is the size of a page. We round memory requests to this boundary. */
90 static int page_size;
91
92 /* Whenever we get memory from the system, get this many extra bytes. This
93 must be a multiple of page_size. */
94 static int extra_bytes;
95
96 /* Macros for rounding. Note that rounding to any value is possible
97 by changing the definition of PAGE. */
98 #define PAGE (getpagesize ())
99 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
100 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
101 & ~(page_size - 1))
102 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
103
104 #define MEM_ALIGN sizeof(double)
105 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
106 & ~(MEM_ALIGN - 1))
107
108 /* The hook `malloc' uses for the function which gets more space
109 from the system. */
110
111 #ifndef SYSTEM_MALLOC
112 extern POINTER (*__morecore) (long int);
113 #endif
114
115
116 \f
117 /***********************************************************************
118 Implementation using sbrk
119 ***********************************************************************/
120
121 /* Data structures of heaps and blocs. */
122
123 /* The relocatable objects, or blocs, and the malloc data
124 both reside within one or more heaps.
125 Each heap contains malloc data, running from `start' to `bloc_start',
126 and relocatable objects, running from `bloc_start' to `free'.
127
128 Relocatable objects may relocate within the same heap
129 or may move into another heap; the heaps themselves may grow
130 but they never move.
131
132 We try to make just one heap and make it larger as necessary.
133 But sometimes we can't do that, because we can't get contiguous
134 space to add onto the heap. When that happens, we start a new heap. */
135
136 typedef struct heap
137 {
138 struct heap *next;
139 struct heap *prev;
140 /* Start of memory range of this heap. */
141 POINTER start;
142 /* End of memory range of this heap. */
143 POINTER end;
144 /* Start of relocatable data in this heap. */
145 POINTER bloc_start;
146 /* Start of unused space in this heap. */
147 POINTER free;
148 /* First bloc in this heap. */
149 struct bp *first_bloc;
150 /* Last bloc in this heap. */
151 struct bp *last_bloc;
152 } *heap_ptr;
153
154 #define NIL_HEAP ((heap_ptr) 0)
155 #define HEAP_PTR_SIZE (sizeof (struct heap))
156
157 /* This is the first heap object.
158 If we need additional heap objects, each one resides at the beginning of
159 the space it covers. */
160 static struct heap heap_base;
161
162 /* Head and tail of the list of heaps. */
163 static heap_ptr first_heap, last_heap;
164
165 /* These structures are allocated in the malloc arena.
166 The linked list is kept in order of increasing '.data' members.
167 The data blocks abut each other; if b->next is non-nil, then
168 b->data + b->size == b->next->data.
169
170 An element with variable==NIL denotes a freed block, which has not yet
171 been collected. They may only appear while r_alloc_freeze_level > 0,
172 and will be freed when the arena is thawed. Currently, these blocs are
173 not reusable, while the arena is frozen. Very inefficient. */
174
175 typedef struct bp
176 {
177 struct bp *next;
178 struct bp *prev;
179 POINTER *variable;
180 POINTER data;
181 SIZE size;
182 POINTER new_data; /* temporarily used for relocation */
183 struct heap *heap; /* Heap this bloc is in. */
184 } *bloc_ptr;
185
186 #define NIL_BLOC ((bloc_ptr) 0)
187 #define BLOC_PTR_SIZE (sizeof (struct bp))
188
189 /* Head and tail of the list of relocatable blocs. */
190 static bloc_ptr first_bloc, last_bloc;
191
192 static int use_relocatable_buffers;
193
194 /* If >0, no relocation whatsoever takes place. */
195 static int r_alloc_freeze_level;
196
197 \f
198 /* Functions to get and return memory from the system. */
199
200 /* Find the heap that ADDRESS falls within. */
201
202 static heap_ptr
203 find_heap (POINTER address)
204 {
205 heap_ptr heap;
206
207 for (heap = last_heap; heap; heap = heap->prev)
208 {
209 if (heap->start <= address && address <= heap->end)
210 return heap;
211 }
212
213 return NIL_HEAP;
214 }
215
216 /* Find SIZE bytes of space in a heap.
217 Try to get them at ADDRESS (which must fall within some heap's range)
218 if we can get that many within one heap.
219
220 If enough space is not presently available in our reserve, this means
221 getting more page-aligned space from the system. If the returned space
222 is not contiguous to the last heap, allocate a new heap, and append it
223
224 obtain does not try to keep track of whether space is in use
225 or not in use. It just returns the address of SIZE bytes that
226 fall within a single heap. If you call obtain twice in a row
227 with the same arguments, you typically get the same value.
228 to the heap list. It's the caller's responsibility to keep
229 track of what space is in use.
230
231 Return the address of the space if all went well, or zero if we couldn't
232 allocate the memory. */
233
234 static POINTER
235 obtain (POINTER address, SIZE size)
236 {
237 heap_ptr heap;
238 SIZE already_available;
239
240 /* Find the heap that ADDRESS falls within. */
241 for (heap = last_heap; heap; heap = heap->prev)
242 {
243 if (heap->start <= address && address <= heap->end)
244 break;
245 }
246
247 if (! heap)
248 abort ();
249
250 /* If we can't fit SIZE bytes in that heap,
251 try successive later heaps. */
252 while (heap && (char *) address + size > (char *) heap->end)
253 {
254 heap = heap->next;
255 if (heap == NIL_HEAP)
256 break;
257 address = heap->bloc_start;
258 }
259
260 /* If we can't fit them within any existing heap,
261 get more space. */
262 if (heap == NIL_HEAP)
263 {
264 POINTER new = (*real_morecore)(0);
265 SIZE get;
266
267 already_available = (char *)last_heap->end - (char *)address;
268
269 if (new != last_heap->end)
270 {
271 /* Someone else called sbrk. Make a new heap. */
272
273 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
274 POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1));
275
276 if ((*real_morecore) ((char *) bloc_start - (char *) new) != new)
277 return 0;
278
279 new_heap->start = new;
280 new_heap->end = bloc_start;
281 new_heap->bloc_start = bloc_start;
282 new_heap->free = bloc_start;
283 new_heap->next = NIL_HEAP;
284 new_heap->prev = last_heap;
285 new_heap->first_bloc = NIL_BLOC;
286 new_heap->last_bloc = NIL_BLOC;
287 last_heap->next = new_heap;
288 last_heap = new_heap;
289
290 address = bloc_start;
291 already_available = 0;
292 }
293
294 /* Add space to the last heap (which we may have just created).
295 Get some extra, so we can come here less often. */
296
297 get = size + extra_bytes - already_available;
298 get = (char *) ROUNDUP ((char *)last_heap->end + get)
299 - (char *) last_heap->end;
300
301 if ((*real_morecore) (get) != last_heap->end)
302 return 0;
303
304 last_heap->end = (char *) last_heap->end + get;
305 }
306
307 return address;
308 }
309
310 /* Return unused heap space to the system
311 if there is a lot of unused space now.
312 This can make the last heap smaller;
313 it can also eliminate the last heap entirely. */
314
315 static void
316 relinquish (void)
317 {
318 register heap_ptr h;
319 long excess = 0;
320
321 /* Add the amount of space beyond break_value
322 in all heaps which have extend beyond break_value at all. */
323
324 for (h = last_heap; h && break_value < h->end; h = h->prev)
325 {
326 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
327 ? h->bloc_start : break_value);
328 }
329
330 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
331 {
332 /* Keep extra_bytes worth of empty space.
333 And don't free anything unless we can free at least extra_bytes. */
334 excess -= extra_bytes;
335
336 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
337 {
338 /* This heap should have no blocs in it. */
339 if (last_heap->first_bloc != NIL_BLOC
340 || last_heap->last_bloc != NIL_BLOC)
341 abort ();
342
343 /* Return the last heap, with its header, to the system. */
344 excess = (char *)last_heap->end - (char *)last_heap->start;
345 last_heap = last_heap->prev;
346 last_heap->next = NIL_HEAP;
347 }
348 else
349 {
350 excess = (char *) last_heap->end
351 - (char *) ROUNDUP ((char *)last_heap->end - excess);
352 last_heap->end = (char *) last_heap->end - excess;
353 }
354
355 if ((*real_morecore) (- excess) == 0)
356 {
357 /* If the system didn't want that much memory back, adjust
358 the end of the last heap to reflect that. This can occur
359 if break_value is still within the original data segment. */
360 last_heap->end = (char *) last_heap->end + excess;
361 /* Make sure that the result of the adjustment is accurate.
362 It should be, for the else clause above; the other case,
363 which returns the entire last heap to the system, seems
364 unlikely to trigger this mode of failure. */
365 if (last_heap->end != (*real_morecore) (0))
366 abort ();
367 }
368 }
369 }
370
371 /* Return the total size in use by relocating allocator,
372 above where malloc gets space. */
373
374 long
375 r_alloc_size_in_use (void)
376 {
377 return (char *) break_value - (char *) virtual_break_value;
378 }
379 \f
380 /* The meat - allocating, freeing, and relocating blocs. */
381
382 /* Find the bloc referenced by the address in PTR. Returns a pointer
383 to that block. */
384
385 static bloc_ptr
386 find_bloc (POINTER *ptr)
387 {
388 register bloc_ptr p = first_bloc;
389
390 while (p != NIL_BLOC)
391 {
392 /* Consistency check. Don't return inconsistent blocs.
393 Don't abort here, as callers might be expecting this, but
394 callers that always expect a bloc to be returned should abort
395 if one isn't to avoid a memory corruption bug that is
396 difficult to track down. */
397 if (p->variable == ptr && p->data == *ptr)
398 return p;
399
400 p = p->next;
401 }
402
403 return p;
404 }
405
406 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
407 Returns a pointer to the new bloc, or zero if we couldn't allocate
408 memory for the new block. */
409
410 static bloc_ptr
411 get_bloc (SIZE size)
412 {
413 register bloc_ptr new_bloc;
414 register heap_ptr heap;
415
416 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
417 || ! (new_bloc->data = obtain (break_value, size)))
418 {
419 free (new_bloc);
420
421 return 0;
422 }
423
424 break_value = (char *) new_bloc->data + size;
425
426 new_bloc->size = size;
427 new_bloc->next = NIL_BLOC;
428 new_bloc->variable = (POINTER *) NIL;
429 new_bloc->new_data = 0;
430
431 /* Record in the heap that this space is in use. */
432 heap = find_heap (new_bloc->data);
433 heap->free = break_value;
434
435 /* Maintain the correspondence between heaps and blocs. */
436 new_bloc->heap = heap;
437 heap->last_bloc = new_bloc;
438 if (heap->first_bloc == NIL_BLOC)
439 heap->first_bloc = new_bloc;
440
441 /* Put this bloc on the doubly-linked list of blocs. */
442 if (first_bloc)
443 {
444 new_bloc->prev = last_bloc;
445 last_bloc->next = new_bloc;
446 last_bloc = new_bloc;
447 }
448 else
449 {
450 first_bloc = last_bloc = new_bloc;
451 new_bloc->prev = NIL_BLOC;
452 }
453
454 return new_bloc;
455 }
456 \f
457 /* Calculate new locations of blocs in the list beginning with BLOC,
458 relocating it to start at ADDRESS, in heap HEAP. If enough space is
459 not presently available in our reserve, call obtain for
460 more space.
461
462 Store the new location of each bloc in its new_data field.
463 Do not touch the contents of blocs or break_value. */
464
465 static int
466 relocate_blocs (bloc_ptr bloc, heap_ptr heap, POINTER address)
467 {
468 register bloc_ptr b = bloc;
469
470 /* No need to ever call this if arena is frozen, bug somewhere! */
471 if (r_alloc_freeze_level)
472 abort();
473
474 while (b)
475 {
476 /* If bloc B won't fit within HEAP,
477 move to the next heap and try again. */
478 while (heap && (char *) address + b->size > (char *) heap->end)
479 {
480 heap = heap->next;
481 if (heap == NIL_HEAP)
482 break;
483 address = heap->bloc_start;
484 }
485
486 /* If BLOC won't fit in any heap,
487 get enough new space to hold BLOC and all following blocs. */
488 if (heap == NIL_HEAP)
489 {
490 register bloc_ptr tb = b;
491 register SIZE s = 0;
492
493 /* Add up the size of all the following blocs. */
494 while (tb != NIL_BLOC)
495 {
496 if (tb->variable)
497 s += tb->size;
498
499 tb = tb->next;
500 }
501
502 /* Get that space. */
503 address = obtain (address, s);
504 if (address == 0)
505 return 0;
506
507 heap = last_heap;
508 }
509
510 /* Record the new address of this bloc
511 and update where the next bloc can start. */
512 b->new_data = address;
513 if (b->variable)
514 address = (char *) address + b->size;
515 b = b->next;
516 }
517
518 return 1;
519 }
520 \f
521 /* Update the records of which heaps contain which blocs, starting
522 with heap HEAP and bloc BLOC. */
523
524 static void
525 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
526 {
527 register bloc_ptr b;
528
529 /* Initialize HEAP's status to reflect blocs before BLOC. */
530 if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
531 {
532 /* The previous bloc is in HEAP. */
533 heap->last_bloc = bloc->prev;
534 heap->free = (char *) bloc->prev->data + bloc->prev->size;
535 }
536 else
537 {
538 /* HEAP contains no blocs before BLOC. */
539 heap->first_bloc = NIL_BLOC;
540 heap->last_bloc = NIL_BLOC;
541 heap->free = heap->bloc_start;
542 }
543
544 /* Advance through blocs one by one. */
545 for (b = bloc; b != NIL_BLOC; b = b->next)
546 {
547 /* Advance through heaps, marking them empty,
548 till we get to the one that B is in. */
549 while (heap)
550 {
551 if (heap->bloc_start <= b->data && b->data <= heap->end)
552 break;
553 heap = heap->next;
554 /* We know HEAP is not null now,
555 because there has to be space for bloc B. */
556 heap->first_bloc = NIL_BLOC;
557 heap->last_bloc = NIL_BLOC;
558 heap->free = heap->bloc_start;
559 }
560
561 /* Update HEAP's status for bloc B. */
562 heap->free = (char *) b->data + b->size;
563 heap->last_bloc = b;
564 if (heap->first_bloc == NIL_BLOC)
565 heap->first_bloc = b;
566
567 /* Record that B is in HEAP. */
568 b->heap = heap;
569 }
570
571 /* If there are any remaining heaps and no blocs left,
572 mark those heaps as empty. */
573 heap = heap->next;
574 while (heap)
575 {
576 heap->first_bloc = NIL_BLOC;
577 heap->last_bloc = NIL_BLOC;
578 heap->free = heap->bloc_start;
579 heap = heap->next;
580 }
581 }
582 \f
583 /* Resize BLOC to SIZE bytes. This relocates the blocs
584 that come after BLOC in memory. */
585
586 static int
587 resize_bloc (bloc_ptr bloc, SIZE size)
588 {
589 register bloc_ptr b;
590 heap_ptr heap;
591 POINTER address;
592 SIZE old_size;
593
594 /* No need to ever call this if arena is frozen, bug somewhere! */
595 if (r_alloc_freeze_level)
596 abort();
597
598 if (bloc == NIL_BLOC || size == bloc->size)
599 return 1;
600
601 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
602 {
603 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
604 break;
605 }
606
607 if (heap == NIL_HEAP)
608 abort ();
609
610 old_size = bloc->size;
611 bloc->size = size;
612
613 /* Note that bloc could be moved into the previous heap. */
614 address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
615 : (char *) first_heap->bloc_start);
616 while (heap)
617 {
618 if (heap->bloc_start <= address && address <= heap->end)
619 break;
620 heap = heap->prev;
621 }
622
623 if (! relocate_blocs (bloc, heap, address))
624 {
625 bloc->size = old_size;
626 return 0;
627 }
628
629 if (size > old_size)
630 {
631 for (b = last_bloc; b != bloc; b = b->prev)
632 {
633 if (!b->variable)
634 {
635 b->size = 0;
636 b->data = b->new_data;
637 }
638 else
639 {
640 memmove (b->new_data, b->data, b->size);
641 *b->variable = b->data = b->new_data;
642 }
643 }
644 if (!bloc->variable)
645 {
646 bloc->size = 0;
647 bloc->data = bloc->new_data;
648 }
649 else
650 {
651 memmove (bloc->new_data, bloc->data, old_size);
652 memset (bloc->new_data + old_size, 0, size - old_size);
653 *bloc->variable = bloc->data = bloc->new_data;
654 }
655 }
656 else
657 {
658 for (b = bloc; b != NIL_BLOC; b = b->next)
659 {
660 if (!b->variable)
661 {
662 b->size = 0;
663 b->data = b->new_data;
664 }
665 else
666 {
667 memmove (b->new_data, b->data, b->size);
668 *b->variable = b->data = b->new_data;
669 }
670 }
671 }
672
673 update_heap_bloc_correspondence (bloc, heap);
674
675 break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
676 : (char *) first_heap->bloc_start);
677 return 1;
678 }
679 \f
680 /* Free BLOC from the chain of blocs, relocating any blocs above it.
681 This may return space to the system. */
682
683 static void
684 free_bloc (bloc_ptr bloc)
685 {
686 heap_ptr heap = bloc->heap;
687
688 if (r_alloc_freeze_level)
689 {
690 bloc->variable = (POINTER *) NIL;
691 return;
692 }
693
694 resize_bloc (bloc, 0);
695
696 if (bloc == first_bloc && bloc == last_bloc)
697 {
698 first_bloc = last_bloc = NIL_BLOC;
699 }
700 else if (bloc == last_bloc)
701 {
702 last_bloc = bloc->prev;
703 last_bloc->next = NIL_BLOC;
704 }
705 else if (bloc == first_bloc)
706 {
707 first_bloc = bloc->next;
708 first_bloc->prev = NIL_BLOC;
709 }
710 else
711 {
712 bloc->next->prev = bloc->prev;
713 bloc->prev->next = bloc->next;
714 }
715
716 /* Update the records of which blocs are in HEAP. */
717 if (heap->first_bloc == bloc)
718 {
719 if (bloc->next != 0 && bloc->next->heap == heap)
720 heap->first_bloc = bloc->next;
721 else
722 heap->first_bloc = heap->last_bloc = NIL_BLOC;
723 }
724 if (heap->last_bloc == bloc)
725 {
726 if (bloc->prev != 0 && bloc->prev->heap == heap)
727 heap->last_bloc = bloc->prev;
728 else
729 heap->first_bloc = heap->last_bloc = NIL_BLOC;
730 }
731
732 relinquish ();
733 free (bloc);
734 }
735 \f
736 /* Interface routines. */
737
738 /* Obtain SIZE bytes of storage from the free pool, or the system, as
739 necessary. If relocatable blocs are in use, this means relocating
740 them. This function gets plugged into the GNU malloc's __morecore
741 hook.
742
743 We provide hysteresis, never relocating by less than extra_bytes.
744
745 If we're out of memory, we should return zero, to imitate the other
746 __morecore hook values - in particular, __default_morecore in the
747 GNU malloc package. */
748
749 POINTER
750 r_alloc_sbrk (long int size)
751 {
752 register bloc_ptr b;
753 POINTER address;
754
755 if (! r_alloc_initialized)
756 r_alloc_init ();
757
758 if (! use_relocatable_buffers)
759 return (*real_morecore) (size);
760
761 if (size == 0)
762 return virtual_break_value;
763
764 if (size > 0)
765 {
766 /* Allocate a page-aligned space. GNU malloc would reclaim an
767 extra space if we passed an unaligned one. But we could
768 not always find a space which is contiguous to the previous. */
769 POINTER new_bloc_start;
770 heap_ptr h = first_heap;
771 SIZE get = ROUNDUP (size);
772
773 address = (POINTER) ROUNDUP (virtual_break_value);
774
775 /* Search the list upward for a heap which is large enough. */
776 while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get))
777 {
778 h = h->next;
779 if (h == NIL_HEAP)
780 break;
781 address = (POINTER) ROUNDUP (h->start);
782 }
783
784 /* If not found, obtain more space. */
785 if (h == NIL_HEAP)
786 {
787 get += extra_bytes + page_size;
788
789 if (! obtain (address, get))
790 return 0;
791
792 if (first_heap == last_heap)
793 address = (POINTER) ROUNDUP (virtual_break_value);
794 else
795 address = (POINTER) ROUNDUP (last_heap->start);
796 h = last_heap;
797 }
798
799 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get);
800
801 if (first_heap->bloc_start < new_bloc_start)
802 {
803 /* This is no clean solution - no idea how to do it better. */
804 if (r_alloc_freeze_level)
805 return NIL;
806
807 /* There is a bug here: if the above obtain call succeeded, but the
808 relocate_blocs call below does not succeed, we need to free
809 the memory that we got with obtain. */
810
811 /* Move all blocs upward. */
812 if (! relocate_blocs (first_bloc, h, new_bloc_start))
813 return 0;
814
815 /* Note that (POINTER)(h+1) <= new_bloc_start since
816 get >= page_size, so the following does not destroy the heap
817 header. */
818 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
819 {
820 memmove (b->new_data, b->data, b->size);
821 *b->variable = b->data = b->new_data;
822 }
823
824 h->bloc_start = new_bloc_start;
825
826 update_heap_bloc_correspondence (first_bloc, h);
827 }
828 if (h != first_heap)
829 {
830 /* Give up managing heaps below the one the new
831 virtual_break_value points to. */
832 first_heap->prev = NIL_HEAP;
833 first_heap->next = h->next;
834 first_heap->start = h->start;
835 first_heap->end = h->end;
836 first_heap->free = h->free;
837 first_heap->first_bloc = h->first_bloc;
838 first_heap->last_bloc = h->last_bloc;
839 first_heap->bloc_start = h->bloc_start;
840
841 if (first_heap->next)
842 first_heap->next->prev = first_heap;
843 else
844 last_heap = first_heap;
845 }
846
847 memset (address, 0, size);
848 }
849 else /* size < 0 */
850 {
851 SIZE excess = (char *)first_heap->bloc_start
852 - ((char *)virtual_break_value + size);
853
854 address = virtual_break_value;
855
856 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
857 {
858 excess -= extra_bytes;
859 first_heap->bloc_start
860 = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess);
861
862 relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
863
864 for (b = first_bloc; b != NIL_BLOC; b = b->next)
865 {
866 memmove (b->new_data, b->data, b->size);
867 *b->variable = b->data = b->new_data;
868 }
869 }
870
871 if ((char *)virtual_break_value + size < (char *)first_heap->start)
872 {
873 /* We found an additional space below the first heap */
874 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
875 }
876 }
877
878 virtual_break_value = (POINTER) ((char *)address + size);
879 break_value = (last_bloc
880 ? (char *) last_bloc->data + last_bloc->size
881 : (char *) first_heap->bloc_start);
882 if (size < 0)
883 relinquish ();
884
885 return address;
886 }
887
888
889 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
890 the data is returned in *PTR. PTR is thus the address of some variable
891 which will use the data area.
892
893 The allocation of 0 bytes is valid.
894 In case r_alloc_freeze_level is set, a best fit of unused blocs could be
895 done before allocating a new area. Not yet done.
896
897 If we can't allocate the necessary memory, set *PTR to zero, and
898 return zero. */
899
900 POINTER
901 r_alloc (POINTER *ptr, SIZE size)
902 {
903 register bloc_ptr new_bloc;
904
905 if (! r_alloc_initialized)
906 r_alloc_init ();
907
908 new_bloc = get_bloc (MEM_ROUNDUP (size));
909 if (new_bloc)
910 {
911 new_bloc->variable = ptr;
912 *ptr = new_bloc->data;
913 }
914 else
915 *ptr = 0;
916
917 return *ptr;
918 }
919
920 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
921 Store 0 in *PTR to show there's no block allocated. */
922
923 void
924 r_alloc_free (register POINTER *ptr)
925 {
926 register bloc_ptr dead_bloc;
927
928 if (! r_alloc_initialized)
929 r_alloc_init ();
930
931 dead_bloc = find_bloc (ptr);
932 if (dead_bloc == NIL_BLOC)
933 abort (); /* Double free? PTR not originally used to allocate? */
934
935 free_bloc (dead_bloc);
936 *ptr = 0;
937
938 #ifdef emacs
939 refill_memory_reserve ();
940 #endif
941 }
942
943 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
944 Do this by shifting all blocks above this one up in memory, unless
945 SIZE is less than or equal to the current bloc size, in which case
946 do nothing.
947
948 In case r_alloc_freeze_level is set, a new bloc is allocated, and the
949 memory copied to it. Not very efficient. We could traverse the
950 bloc_list for a best fit of free blocs first.
951
952 Change *PTR to reflect the new bloc, and return this value.
953
954 If more memory cannot be allocated, then leave *PTR unchanged, and
955 return zero. */
956
957 POINTER
958 r_re_alloc (POINTER *ptr, SIZE size)
959 {
960 register bloc_ptr bloc;
961
962 if (! r_alloc_initialized)
963 r_alloc_init ();
964
965 if (!*ptr)
966 return r_alloc (ptr, size);
967 if (!size)
968 {
969 r_alloc_free (ptr);
970 return r_alloc (ptr, 0);
971 }
972
973 bloc = find_bloc (ptr);
974 if (bloc == NIL_BLOC)
975 abort (); /* Already freed? PTR not originally used to allocate? */
976
977 if (size < bloc->size)
978 {
979 /* Wouldn't it be useful to actually resize the bloc here? */
980 /* I think so too, but not if it's too expensive... */
981 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
982 && r_alloc_freeze_level == 0)
983 {
984 resize_bloc (bloc, MEM_ROUNDUP (size));
985 /* Never mind if this fails, just do nothing... */
986 /* It *should* be infallible! */
987 }
988 }
989 else if (size > bloc->size)
990 {
991 if (r_alloc_freeze_level)
992 {
993 bloc_ptr new_bloc;
994 new_bloc = get_bloc (MEM_ROUNDUP (size));
995 if (new_bloc)
996 {
997 new_bloc->variable = ptr;
998 *ptr = new_bloc->data;
999 bloc->variable = (POINTER *) NIL;
1000 }
1001 else
1002 return NIL;
1003 }
1004 else
1005 {
1006 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1007 return NIL;
1008 }
1009 }
1010 return *ptr;
1011 }
1012
1013 /* Disable relocations, after making room for at least SIZE bytes
1014 of non-relocatable heap if possible. The relocatable blocs are
1015 guaranteed to hold still until thawed, even if this means that
1016 malloc must return a null pointer. */
1017
1018 void
1019 r_alloc_freeze (long int size)
1020 {
1021 if (! r_alloc_initialized)
1022 r_alloc_init ();
1023
1024 /* If already frozen, we can't make any more room, so don't try. */
1025 if (r_alloc_freeze_level > 0)
1026 size = 0;
1027 /* If we can't get the amount requested, half is better than nothing. */
1028 while (size > 0 && r_alloc_sbrk (size) == 0)
1029 size /= 2;
1030 ++r_alloc_freeze_level;
1031 if (size > 0)
1032 r_alloc_sbrk (-size);
1033 }
1034
1035 void
1036 r_alloc_thaw (void)
1037 {
1038
1039 if (! r_alloc_initialized)
1040 r_alloc_init ();
1041
1042 if (--r_alloc_freeze_level < 0)
1043 abort ();
1044
1045 /* This frees all unused blocs. It is not too inefficient, as the resize
1046 and memcpy is done only once. Afterwards, all unreferenced blocs are
1047 already shrunk to zero size. */
1048 if (!r_alloc_freeze_level)
1049 {
1050 bloc_ptr *b = &first_bloc;
1051 while (*b)
1052 if (!(*b)->variable)
1053 free_bloc (*b);
1054 else
1055 b = &(*b)->next;
1056 }
1057 }
1058
1059
1060 #if defined (emacs) && defined (DOUG_LEA_MALLOC)
1061
1062 /* Reinitialize the morecore hook variables after restarting a dumped
1063 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */
1064 void
1065 r_alloc_reinit (void)
1066 {
1067 /* Only do this if the hook has been reset, so that we don't get an
1068 infinite loop, in case Emacs was linked statically. */
1069 if (__morecore != r_alloc_sbrk)
1070 {
1071 real_morecore = __morecore;
1072 __morecore = r_alloc_sbrk;
1073 }
1074 }
1075
1076 #endif /* emacs && DOUG_LEA_MALLOC */
1077
1078 #ifdef DEBUG
1079
1080 #include <assert.h>
1081
1082 void
1083 r_alloc_check ()
1084 {
1085 int found = 0;
1086 heap_ptr h, ph = 0;
1087 bloc_ptr b, pb = 0;
1088
1089 if (!r_alloc_initialized)
1090 return;
1091
1092 assert (first_heap);
1093 assert (last_heap->end <= (POINTER) sbrk (0));
1094 assert ((POINTER) first_heap < first_heap->start);
1095 assert (first_heap->start <= virtual_break_value);
1096 assert (virtual_break_value <= first_heap->end);
1097
1098 for (h = first_heap; h; h = h->next)
1099 {
1100 assert (h->prev == ph);
1101 assert ((POINTER) ROUNDUP (h->end) == h->end);
1102 #if 0 /* ??? The code in ralloc.c does not really try to ensure
1103 the heap start has any sort of alignment.
1104 Perhaps it should. */
1105 assert ((POINTER) MEM_ROUNDUP (h->start) == h->start);
1106 #endif
1107 assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
1108 assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
1109
1110 if (ph)
1111 {
1112 assert (ph->end < h->start);
1113 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
1114 }
1115
1116 if (h->bloc_start <= break_value && break_value <= h->end)
1117 found = 1;
1118
1119 ph = h;
1120 }
1121
1122 assert (found);
1123 assert (last_heap == ph);
1124
1125 for (b = first_bloc; b; b = b->next)
1126 {
1127 assert (b->prev == pb);
1128 assert ((POINTER) MEM_ROUNDUP (b->data) == b->data);
1129 assert ((SIZE) MEM_ROUNDUP (b->size) == b->size);
1130
1131 ph = 0;
1132 for (h = first_heap; h; h = h->next)
1133 {
1134 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
1135 break;
1136 ph = h;
1137 }
1138
1139 assert (h);
1140
1141 if (pb && pb->data + pb->size != b->data)
1142 {
1143 assert (ph && b->data == h->bloc_start);
1144 while (ph)
1145 {
1146 if (ph->bloc_start <= pb->data
1147 && pb->data + pb->size <= ph->end)
1148 {
1149 assert (pb->data + pb->size + b->size > ph->end);
1150 break;
1151 }
1152 else
1153 {
1154 assert (ph->bloc_start + b->size > ph->end);
1155 }
1156 ph = ph->prev;
1157 }
1158 }
1159 pb = b;
1160 }
1161
1162 assert (last_bloc == pb);
1163
1164 if (last_bloc)
1165 assert (last_bloc->data + last_bloc->size == break_value);
1166 else
1167 assert (first_heap->bloc_start == break_value);
1168 }
1169
1170 #endif /* DEBUG */
1171
1172 /* Update the internal record of which variable points to some data to NEW.
1173 Used by buffer-swap-text in Emacs to restore consistency after it
1174 swaps the buffer text between two buffer objects. The OLD pointer
1175 is checked to ensure that memory corruption does not occur due to
1176 misuse. */
1177 void
1178 r_alloc_reset_variable (POINTER *old, POINTER *new)
1179 {
1180 bloc_ptr bloc = first_bloc;
1181
1182 /* Find the bloc that corresponds to the data pointed to by pointer.
1183 find_bloc cannot be used, as it has internal consistency checks
1184 which fail when the variable needs reseting. */
1185 while (bloc != NIL_BLOC)
1186 {
1187 if (bloc->data == *new)
1188 break;
1189
1190 bloc = bloc->next;
1191 }
1192
1193 if (bloc == NIL_BLOC || bloc->variable != old)
1194 abort (); /* Already freed? OLD not originally used to allocate? */
1195
1196 /* Update variable to point to the new location. */
1197 bloc->variable = new;
1198 }
1199
1200 \f
1201 /***********************************************************************
1202 Initialization
1203 ***********************************************************************/
1204
1205 /* Initialize various things for memory allocation. */
1206
1207 static void
1208 r_alloc_init (void)
1209 {
1210 if (r_alloc_initialized)
1211 return;
1212 r_alloc_initialized = 1;
1213
1214 page_size = PAGE;
1215 #ifndef SYSTEM_MALLOC
1216 real_morecore = __morecore;
1217 __morecore = r_alloc_sbrk;
1218
1219 first_heap = last_heap = &heap_base;
1220 first_heap->next = first_heap->prev = NIL_HEAP;
1221 first_heap->start = first_heap->bloc_start
1222 = virtual_break_value = break_value = (*real_morecore) (0);
1223 if (break_value == NIL)
1224 abort ();
1225
1226 extra_bytes = ROUNDUP (50000);
1227 #endif
1228
1229 #ifdef DOUG_LEA_MALLOC
1230 BLOCK_INPUT;
1231 mallopt (M_TOP_PAD, 64 * 4096);
1232 UNBLOCK_INPUT;
1233 #else
1234 #ifndef SYSTEM_MALLOC
1235 /* Give GNU malloc's morecore some hysteresis
1236 so that we move all the relocatable blocks much less often. */
1237 __malloc_extra_blocks = 64;
1238 #endif
1239 #endif
1240
1241 #ifndef SYSTEM_MALLOC
1242 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
1243
1244 /* The extra call to real_morecore guarantees that the end of the
1245 address space is a multiple of page_size, even if page_size is
1246 not really the page size of the system running the binary in
1247 which page_size is stored. This allows a binary to be built on a
1248 system with one page size and run on a system with a smaller page
1249 size. */
1250 (*real_morecore) ((char *) first_heap->end - (char *) first_heap->start);
1251
1252 /* Clear the rest of the last page; this memory is in our address space
1253 even though it is after the sbrk value. */
1254 /* Doubly true, with the additional call that explicitly adds the
1255 rest of that page to the address space. */
1256 memset (first_heap->start, 0,
1257 (char *) first_heap->end - (char *) first_heap->start);
1258 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
1259 #endif
1260
1261 use_relocatable_buffers = 1;
1262 }
1263