/* Storage allocation and gc for GNU Emacs Lisp interpreter.
- Copyright (C) 1985, 86, 88, 93, 94, 95, 97, 98, 1999, 2000, 2001, 2002, 2003
+ Copyright (C) 1985,86,88,93,94,95,97,98,1999,2000,01,02,03,2004
Free Software Foundation, Inc.
This file is part of GNU Emacs.
#include <config.h>
#include <stdio.h>
+#include <limits.h> /* For CHAR_BIT. */
#ifdef ALLOC_DEBUG
#undef INLINE
#include <signal.h>
-/* GC_MALLOC_CHECK defined means perform validity checks of malloc'd
- memory. Can do this only if using gmalloc.c. */
-
-#if defined SYSTEM_MALLOC || defined DOUG_LEA_MALLOC
-#undef GC_MALLOC_CHECK
-#endif
-
/* This file is part of the core Lisp implementation, and thus must
deal with the real data structures. If the Lisp implementation is
replaced, this file likely will not be used. */
#include "syssignal.h"
#include <setjmp.h>
+/* GC_MALLOC_CHECK defined means perform validity checks of malloc'd
+ memory. Can do this only if using gmalloc.c. */
+
+#if defined SYSTEM_MALLOC || defined DOUG_LEA_MALLOC
+#undef GC_MALLOC_CHECK
+#endif
+
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#else
/* Mark, unmark, query mark bit of a Lisp string. S must be a pointer
to a struct Lisp_String. */
-#define MARK_STRING(S) ((S)->size |= MARKBIT)
-#define UNMARK_STRING(S) ((S)->size &= ~MARKBIT)
-#define STRING_MARKED_P(S) ((S)->size & MARKBIT)
+#define MARK_STRING(S) ((S)->size |= ARRAY_MARK_FLAG)
+#define UNMARK_STRING(S) ((S)->size &= ~ARRAY_MARK_FLAG)
+#define STRING_MARKED_P(S) ((S)->size & ARRAY_MARK_FLAG)
#define VECTOR_MARK(V) ((V)->size |= ARRAY_MARK_FLAG)
#define VECTOR_UNMARK(V) ((V)->size &= ~ARRAY_MARK_FLAG)
strings. */
#define GC_STRING_BYTES(S) (STRING_BYTES (S))
-#define GC_STRING_CHARS(S) ((S)->size & ~MARKBIT)
+#define GC_STRING_CHARS(S) ((S)->size & ~ARRAY_MARK_FLAG)
/* Number of bytes of consing done since the last gc. */
#ifndef HAVE_SHM
-/* Force it into data space! */
+/* Force it into data space! Initialize it to a nonzero value;
+ otherwise some compilers put it into BSS. */
-EMACS_INT pure[PURESIZE / sizeof (EMACS_INT)] = {0,};
+EMACS_INT pure[PURESIZE / sizeof (EMACS_INT)] = {1,};
#define PUREBEG (char *) pure
#else /* HAVE_SHM */
static void mark_buffer P_ ((Lisp_Object));
extern void mark_kboards P_ ((void));
+extern void mark_ttys P_ ((void));
static void gc_sweep P_ ((void));
static void mark_glyph_matrix P_ ((struct glyph_matrix *));
static void mark_face_cache P_ ((struct face_cache *));
struct gcpro *gcprolist;
-/* Addresses of staticpro'd variables. */
+/* Addresses of staticpro'd variables. Initialize it to a nonzero
+ value; otherwise some compilers put it into BSS. */
#define NSTATICS 1280
-Lisp_Object *staticvec[NSTATICS] = {0};
+Lisp_Object *staticvec[NSTATICS] = {&Vpurify_flag};
/* Index of next unused slot in staticvec. */
/* Value is SZ rounded up to the next multiple of ALIGNMENT.
ALIGNMENT must be a power of 2. */
-#define ALIGN(SZ, ALIGNMENT) \
- (((SZ) + (ALIGNMENT) - 1) & ~((ALIGNMENT) - 1))
+#define ALIGN(ptr, ALIGNMENT) \
+ ((POINTER_TYPE *) ((((EMACS_UINT)(ptr)) + (ALIGNMENT) - 1) \
+ & ~((ALIGNMENT) - 1)))
\f
}
-/* Like free but block interrupt input.. */
+/* Like free but block interrupt input. */
void
xfree (block)
val = (void *) malloc (nbytes);
+#ifndef USE_LSB_TAG
/* If the memory just allocated cannot be addressed thru a Lisp
object's pointer, and it needs to be,
that's equivalent to running out of memory. */
val = 0;
}
}
+#endif
#if GC_MARK_STACK && !defined GC_MALLOC_CHECK
if (val && type != MEM_TYPE_NON_LISP)
UNBLOCK_INPUT;
}
+/* Allocation of aligned blocks of memory to store Lisp data. */
+/* The entry point is lisp_align_malloc which returns blocks of at most */
+/* BLOCK_BYTES and guarantees they are aligned on a BLOCK_ALIGN boundary. */
+
+
+/* BLOCK_ALIGN has to be a power of 2. */
+#define BLOCK_ALIGN (1 << 10)
+
+/* Padding to leave at the end of a malloc'd block. This is to give
+ malloc a chance to minimize the amount of memory wasted to alignment.
+ It should be tuned to the particular malloc library used.
+ On glibc-2.3.2, malloc never tries to align, so a padding of 0 is best.
+ posix_memalign on the other hand would ideally prefer a value of 4
+ because otherwise, there's 1020 bytes wasted between each ablocks.
+ But testing shows that those 1020 will most of the time be efficiently
+ used by malloc to place other objects, so a value of 0 is still preferable
+ unless you have a lot of cons&floats and virtually nothing else. */
+#define BLOCK_PADDING 0
+#define BLOCK_BYTES \
+ (BLOCK_ALIGN - sizeof (struct aligned_block *) - BLOCK_PADDING)
+
+/* Internal data structures and constants. */
+
+#define ABLOCKS_SIZE 16
+
+/* An aligned block of memory. */
+struct ablock
+{
+ union
+ {
+ char payload[BLOCK_BYTES];
+ struct ablock *next_free;
+ } x;
+ /* `abase' is the aligned base of the ablocks. */
+ /* It is overloaded to hold the virtual `busy' field that counts
+ the number of used ablock in the parent ablocks.
+ The first ablock has the `busy' field, the others have the `abase'
+ field. To tell the difference, we assume that pointers will have
+ integer values larger than 2 * ABLOCKS_SIZE. The lowest bit of `busy'
+ is used to tell whether the real base of the parent ablocks is `abase'
+ (if not, the word before the first ablock holds a pointer to the
+ real base). */
+ struct ablocks *abase;
+ /* The padding of all but the last ablock is unused. The padding of
+ the last ablock in an ablocks is not allocated. */
+#if BLOCK_PADDING
+ char padding[BLOCK_PADDING];
+#endif
+};
+
+/* A bunch of consecutive aligned blocks. */
+struct ablocks
+{
+ struct ablock blocks[ABLOCKS_SIZE];
+};
+
+/* Size of the block requested from malloc or memalign. */
+#define ABLOCKS_BYTES (sizeof (struct ablocks) - BLOCK_PADDING)
+
+#define ABLOCK_ABASE(block) \
+ (((unsigned long) (block)->abase) <= (1 + 2 * ABLOCKS_SIZE) \
+ ? (struct ablocks *)(block) \
+ : (block)->abase)
+
+/* Virtual `busy' field. */
+#define ABLOCKS_BUSY(abase) ((abase)->blocks[0].abase)
+
+/* Pointer to the (not necessarily aligned) malloc block. */
+#ifdef HAVE_POSIX_MEMALIGN
+#define ABLOCKS_BASE(abase) (abase)
+#else
+#define ABLOCKS_BASE(abase) \
+ (1 & (long) ABLOCKS_BUSY (abase) ? abase : ((void**)abase)[-1])
+#endif
+
+/* The list of free ablock. */
+static struct ablock *free_ablock;
+
+/* Allocate an aligned block of nbytes.
+ Alignment is on a multiple of BLOCK_ALIGN and `nbytes' has to be
+ smaller or equal to BLOCK_BYTES. */
+static POINTER_TYPE *
+lisp_align_malloc (nbytes, type)
+ size_t nbytes;
+ enum mem_type type;
+{
+ void *base, *val;
+ struct ablocks *abase;
+
+ eassert (nbytes <= BLOCK_BYTES);
+
+ BLOCK_INPUT;
+
+#ifdef GC_MALLOC_CHECK
+ allocated_mem_type = type;
+#endif
+
+ if (!free_ablock)
+ {
+ int i;
+ EMACS_INT aligned; /* int gets warning casting to 64-bit pointer. */
+
+#ifdef DOUG_LEA_MALLOC
+ /* Prevent mmap'ing the chunk. Lisp data may not be mmap'ed
+ because mapped region contents are not preserved in
+ a dumped Emacs. */
+ mallopt (M_MMAP_MAX, 0);
+#endif
+
+#ifdef HAVE_POSIX_MEMALIGN
+ {
+ int err = posix_memalign (&base, BLOCK_ALIGN, ABLOCKS_BYTES);
+ abase = err ? (base = NULL) : base;
+ }
+#else
+ base = malloc (ABLOCKS_BYTES);
+ abase = ALIGN (base, BLOCK_ALIGN);
+ if (base == 0)
+ {
+ UNBLOCK_INPUT;
+ memory_full ();
+ }
+#endif
+
+ aligned = (base == abase);
+ if (!aligned)
+ ((void**)abase)[-1] = base;
+
+#ifdef DOUG_LEA_MALLOC
+ /* Back to a reasonable maximum of mmap'ed areas. */
+ mallopt (M_MMAP_MAX, MMAP_MAX_AREAS);
+#endif
+
+#ifndef USE_LSB_TAG
+ /* If the memory just allocated cannot be addressed thru a Lisp
+ object's pointer, and it needs to be, that's equivalent to
+ running out of memory. */
+ if (type != MEM_TYPE_NON_LISP)
+ {
+ Lisp_Object tem;
+ char *end = (char *) base + ABLOCKS_BYTES - 1;
+ XSETCONS (tem, end);
+ if ((char *) XCONS (tem) != end)
+ {
+ lisp_malloc_loser = base;
+ free (base);
+ UNBLOCK_INPUT;
+ memory_full ();
+ }
+ }
+#endif
+
+ /* Initialize the blocks and put them on the free list.
+ Is `base' was not properly aligned, we can't use the last block. */
+ for (i = 0; i < (aligned ? ABLOCKS_SIZE : ABLOCKS_SIZE - 1); i++)
+ {
+ abase->blocks[i].abase = abase;
+ abase->blocks[i].x.next_free = free_ablock;
+ free_ablock = &abase->blocks[i];
+ }
+ ABLOCKS_BUSY (abase) = (struct ablocks *) (long) aligned;
+
+ eassert (0 == ((EMACS_UINT)abase) % BLOCK_ALIGN);
+ eassert (ABLOCK_ABASE (&abase->blocks[3]) == abase); /* 3 is arbitrary */
+ eassert (ABLOCK_ABASE (&abase->blocks[0]) == abase);
+ eassert (ABLOCKS_BASE (abase) == base);
+ eassert (aligned == (long) ABLOCKS_BUSY (abase));
+ }
+
+ abase = ABLOCK_ABASE (free_ablock);
+ ABLOCKS_BUSY (abase) = (struct ablocks *) (2 + (long) ABLOCKS_BUSY (abase));
+ val = free_ablock;
+ free_ablock = free_ablock->x.next_free;
+
+#if GC_MARK_STACK && !defined GC_MALLOC_CHECK
+ if (val && type != MEM_TYPE_NON_LISP)
+ mem_insert (val, (char *) val + nbytes, type);
+#endif
+
+ UNBLOCK_INPUT;
+ if (!val && nbytes)
+ memory_full ();
+
+ eassert (0 == ((EMACS_UINT)val) % BLOCK_ALIGN);
+ return val;
+}
+
+static void
+lisp_align_free (block)
+ POINTER_TYPE *block;
+{
+ struct ablock *ablock = block;
+ struct ablocks *abase = ABLOCK_ABASE (ablock);
+
+ BLOCK_INPUT;
+#if GC_MARK_STACK && !defined GC_MALLOC_CHECK
+ mem_delete (mem_find (block));
+#endif
+ /* Put on free list. */
+ ablock->x.next_free = free_ablock;
+ free_ablock = ablock;
+ /* Update busy count. */
+ ABLOCKS_BUSY (abase) = (struct ablocks *) (-2 + (long) ABLOCKS_BUSY (abase));
+
+ if (2 > (long) ABLOCKS_BUSY (abase))
+ { /* All the blocks are free. */
+ int i = 0, aligned = (long) ABLOCKS_BUSY (abase);
+ struct ablock **tem = &free_ablock;
+ struct ablock *atop = &abase->blocks[aligned ? ABLOCKS_SIZE : ABLOCKS_SIZE - 1];
+
+ while (*tem)
+ {
+ if (*tem >= (struct ablock *) abase && *tem < atop)
+ {
+ i++;
+ *tem = (*tem)->x.next_free;
+ }
+ else
+ tem = &(*tem)->x.next_free;
+ }
+ eassert ((aligned & 1) == aligned);
+ eassert (i == (aligned ? ABLOCKS_SIZE : ABLOCKS_SIZE - 1));
+ free (ABLOCKS_BASE (abase));
+ }
+ UNBLOCK_INPUT;
+}
/* Return a new buffer structure allocated from the heap with
a call to lisp_malloc. */
struct interval_block
{
- struct interval_block *next;
+ /* Place `intervals' first, to preserve alignment. */
struct interval intervals[INTERVAL_BLOCK_SIZE];
+ struct interval_block *next;
};
/* Current interval block. Its `next' pointer points to older
static void
init_intervals ()
{
- interval_block
- = (struct interval_block *) lisp_malloc (sizeof *interval_block,
- MEM_TYPE_NON_LISP);
- interval_block->next = 0;
- bzero ((char *) interval_block->intervals, sizeof interval_block->intervals);
- interval_block_index = 0;
+ interval_block = NULL;
+ interval_block_index = INTERVAL_BLOCK_SIZE;
interval_free_list = 0;
- n_interval_blocks = 1;
+ n_interval_blocks = 0;
}
{
eassert (!i->gcmarkbit); /* Intervals are never shared. */
i->gcmarkbit = 1;
- mark_object (&i->plist);
+ mark_object (i->plist);
}
/* Number of Lisp strings in a string_block structure. The 1020 is
1024 minus malloc overhead. */
-#define STRINGS_IN_STRING_BLOCK \
+#define STRING_BLOCK_SIZE \
((1020 - sizeof (struct string_block *)) / sizeof (struct Lisp_String))
/* Structure describing a block from which Lisp_String structures
struct string_block
{
+ /* Place `strings' first, to preserve alignment. */
+ struct Lisp_String strings[STRING_BLOCK_SIZE];
struct string_block *next;
- struct Lisp_String strings[STRINGS_IN_STRING_BLOCK];
};
/* Head and tail of the list of sblock structures holding Lisp string
string_bytes (s)
struct Lisp_String *s;
{
- int nbytes = (s->size_byte < 0 ? s->size & ~MARKBIT : s->size_byte);
+ int nbytes = (s->size_byte < 0 ? s->size & ~ARRAY_MARK_FLAG : s->size_byte);
if (!PURE_POINTER_P (s)
&& s->data
&& nbytes != SDATA_NBYTES (SDATA_OF_STRING (s)))
string_blocks = b;
++n_string_blocks;
- for (i = STRINGS_IN_STRING_BLOCK - 1; i >= 0; --i)
+ for (i = STRING_BLOCK_SIZE - 1; i >= 0; --i)
{
s = b->strings + i;
NEXT_FREE_LISP_STRING (s) = string_free_list;
string_free_list = s;
}
- total_free_strings += STRINGS_IN_STRING_BLOCK;
+ total_free_strings += STRING_BLOCK_SIZE;
}
/* Pop a Lisp_String off the free-list. */
next = b->next;
- for (i = 0; i < STRINGS_IN_STRING_BLOCK; ++i)
+ for (i = 0; i < STRING_BLOCK_SIZE; ++i)
{
struct Lisp_String *s = b->strings + i;
/* Free blocks that contain free Lisp_Strings only, except
the first two of them. */
- if (nfree == STRINGS_IN_STRING_BLOCK
- && total_free_strings > STRINGS_IN_STRING_BLOCK)
+ if (nfree == STRING_BLOCK_SIZE
+ && total_free_strings > STRING_BLOCK_SIZE)
{
lisp_free (b);
--n_string_blocks;
CHECK_NATNUM (length);
- bits_per_value = sizeof (EMACS_INT) * BITS_PER_CHAR;
+ bits_per_value = sizeof (EMACS_INT) * BOOL_VECTOR_BITS_PER_CHAR;
length_in_elts = (XFASTINT (length) + bits_per_value - 1) / bits_per_value;
- length_in_chars = ((XFASTINT (length) + BITS_PER_CHAR - 1) / BITS_PER_CHAR);
+ length_in_chars = ((XFASTINT (length) + BOOL_VECTOR_BITS_PER_CHAR - 1)
+ / BOOL_VECTOR_BITS_PER_CHAR);
/* We must allocate one more elements than LENGTH_IN_ELTS for the
slot `size' of the struct Lisp_Bool_Vector. */
p->data[i] = real_init;
/* Clear the extraneous bits in the last byte. */
- if (XINT (length) != length_in_chars * BITS_PER_CHAR)
+ if (XINT (length) != length_in_chars * BOOL_VECTOR_BITS_PER_CHAR)
XBOOL_VECTOR (val)->data[length_in_chars - 1]
- &= (1 << (XINT (length) % BITS_PER_CHAR)) - 1;
+ &= (1 << (XINT (length) % BOOL_VECTOR_BITS_PER_CHAR)) - 1;
return val;
}
/* We store float cells inside of float_blocks, allocating a new
float_block with malloc whenever necessary. Float cells reclaimed
by GC are put on a free list to be reallocated before allocating
- any new float cells from the latest float_block.
+ any new float cells from the latest float_block. */
- Each float_block is just under 1020 bytes long, since malloc really
- allocates in units of powers of two and uses 4 bytes for its own
- overhead. */
+#define FLOAT_BLOCK_SIZE \
+ (((BLOCK_BYTES - sizeof (struct float_block *) \
+ /* The compiler might add padding at the end. */ \
+ - (sizeof (struct Lisp_Float) - sizeof (int))) * CHAR_BIT) \
+ / (sizeof (struct Lisp_Float) * CHAR_BIT + 1))
-#define FLOAT_BLOCK_SIZE \
- ((1020 - sizeof (struct float_block *)) / sizeof (struct Lisp_Float))
+#define GETMARKBIT(block,n) \
+ (((block)->gcmarkbits[(n) / (sizeof(int) * CHAR_BIT)] \
+ >> ((n) % (sizeof(int) * CHAR_BIT))) \
+ & 1)
+
+#define SETMARKBIT(block,n) \
+ (block)->gcmarkbits[(n) / (sizeof(int) * CHAR_BIT)] \
+ |= 1 << ((n) % (sizeof(int) * CHAR_BIT))
+
+#define UNSETMARKBIT(block,n) \
+ (block)->gcmarkbits[(n) / (sizeof(int) * CHAR_BIT)] \
+ &= ~(1 << ((n) % (sizeof(int) * CHAR_BIT)))
+
+#define FLOAT_BLOCK(fptr) \
+ ((struct float_block *)(((EMACS_UINT)(fptr)) & ~(BLOCK_ALIGN - 1)))
+
+#define FLOAT_INDEX(fptr) \
+ ((((EMACS_UINT)(fptr)) & (BLOCK_ALIGN - 1)) / sizeof (struct Lisp_Float))
struct float_block
{
- struct float_block *next;
+ /* Place `floats' at the beginning, to ease up FLOAT_INDEX's job. */
struct Lisp_Float floats[FLOAT_BLOCK_SIZE];
+ int gcmarkbits[1 + FLOAT_BLOCK_SIZE / (sizeof(int) * CHAR_BIT)];
+ struct float_block *next;
};
+#define FLOAT_MARKED_P(fptr) \
+ GETMARKBIT (FLOAT_BLOCK (fptr), FLOAT_INDEX ((fptr)))
+
+#define FLOAT_MARK(fptr) \
+ SETMARKBIT (FLOAT_BLOCK (fptr), FLOAT_INDEX ((fptr)))
+
+#define FLOAT_UNMARK(fptr) \
+ UNSETMARKBIT (FLOAT_BLOCK (fptr), FLOAT_INDEX ((fptr)))
+
/* Current float_block. */
struct float_block *float_block;
void
init_float ()
{
- float_block = (struct float_block *) lisp_malloc (sizeof *float_block,
- MEM_TYPE_FLOAT);
- float_block->next = 0;
- bzero ((char *) float_block->floats, sizeof float_block->floats);
- float_block_index = 0;
+ float_block = NULL;
+ float_block_index = FLOAT_BLOCK_SIZE; /* Force alloc of new float_block. */
float_free_list = 0;
- n_float_blocks = 1;
+ n_float_blocks = 0;
}
struct Lisp_Float *ptr;
{
*(struct Lisp_Float **)&ptr->data = float_free_list;
-#if GC_MARK_STACK
- ptr->type = Vdead;
-#endif
float_free_list = ptr;
}
{
register struct float_block *new;
- new = (struct float_block *) lisp_malloc (sizeof *new,
- MEM_TYPE_FLOAT);
+ new = (struct float_block *) lisp_align_malloc (sizeof *new,
+ MEM_TYPE_FLOAT);
new->next = float_block;
+ bzero ((char *) new->gcmarkbits, sizeof new->gcmarkbits);
float_block = new;
float_block_index = 0;
n_float_blocks++;
}
- XSETFLOAT (val, &float_block->floats[float_block_index++]);
+ XSETFLOAT (val, &float_block->floats[float_block_index]);
+ float_block_index++;
}
XFLOAT_DATA (val) = float_value;
- XSETFASTINT (XFLOAT (val)->type, 0); /* bug chasing -wsr */
+ eassert (!FLOAT_MARKED_P (XFLOAT (val)));
consing_since_gc += sizeof (struct Lisp_Float);
floats_consed++;
return val;
/* We store cons cells inside of cons_blocks, allocating a new
cons_block with malloc whenever necessary. Cons cells reclaimed by
GC are put on a free list to be reallocated before allocating
- any new cons cells from the latest cons_block.
-
- Each cons_block is just under 1020 bytes long,
- since malloc really allocates in units of powers of two
- and uses 4 bytes for its own overhead. */
+ any new cons cells from the latest cons_block. */
#define CONS_BLOCK_SIZE \
- ((1020 - sizeof (struct cons_block *)) / sizeof (struct Lisp_Cons))
+ (((BLOCK_BYTES - sizeof (struct cons_block *)) * CHAR_BIT) \
+ / (sizeof (struct Lisp_Cons) * CHAR_BIT + 1))
+
+#define CONS_BLOCK(fptr) \
+ ((struct cons_block *)(((EMACS_UINT)(fptr)) & ~(BLOCK_ALIGN - 1)))
+
+#define CONS_INDEX(fptr) \
+ ((((EMACS_UINT)(fptr)) & (BLOCK_ALIGN - 1)) / sizeof (struct Lisp_Cons))
struct cons_block
{
- struct cons_block *next;
+ /* Place `conses' at the beginning, to ease up CONS_INDEX's job. */
struct Lisp_Cons conses[CONS_BLOCK_SIZE];
+ int gcmarkbits[1 + CONS_BLOCK_SIZE / (sizeof(int) * CHAR_BIT)];
+ struct cons_block *next;
};
+#define CONS_MARKED_P(fptr) \
+ GETMARKBIT (CONS_BLOCK (fptr), CONS_INDEX ((fptr)))
+
+#define CONS_MARK(fptr) \
+ SETMARKBIT (CONS_BLOCK (fptr), CONS_INDEX ((fptr)))
+
+#define CONS_UNMARK(fptr) \
+ UNSETMARKBIT (CONS_BLOCK (fptr), CONS_INDEX ((fptr)))
+
/* Current cons_block. */
struct cons_block *cons_block;
void
init_cons ()
{
- cons_block = (struct cons_block *) lisp_malloc (sizeof *cons_block,
- MEM_TYPE_CONS);
- cons_block->next = 0;
- bzero ((char *) cons_block->conses, sizeof cons_block->conses);
- cons_block_index = 0;
+ cons_block = NULL;
+ cons_block_index = CONS_BLOCK_SIZE; /* Force alloc of new cons_block. */
cons_free_list = 0;
- n_cons_blocks = 1;
+ n_cons_blocks = 0;
}
if (cons_block_index == CONS_BLOCK_SIZE)
{
register struct cons_block *new;
- new = (struct cons_block *) lisp_malloc (sizeof *new,
- MEM_TYPE_CONS);
+ new = (struct cons_block *) lisp_align_malloc (sizeof *new,
+ MEM_TYPE_CONS);
+ bzero ((char *) new->gcmarkbits, sizeof new->gcmarkbits);
new->next = cons_block;
cons_block = new;
cons_block_index = 0;
n_cons_blocks++;
}
- XSETCONS (val, &cons_block->conses[cons_block_index++]);
+ XSETCONS (val, &cons_block->conses[cons_block_index]);
+ cons_block_index++;
}
XSETCAR (val, car);
XSETCDR (val, cdr);
+ eassert (!CONS_MARKED_P (XCONS (val)));
consing_since_gc += sizeof (struct Lisp_Cons);
cons_cells_consed++;
return val;
/* Prevent mmap'ing the chunk. Lisp data may not be mmap'ed
because mapped region contents are not preserved in
a dumped Emacs. */
+ BLOCK_INPUT;
mallopt (M_MMAP_MAX, 0);
+ UNBLOCK_INPUT;
#endif
nbytes = sizeof *p + (len - 1) * sizeof p->contents[0];
#ifdef DOUG_LEA_MALLOC
/* Back to a reasonable maximum of mmap'ed areas. */
+ BLOCK_INPUT;
mallopt (M_MMAP_MAX, MMAP_MAX_AREAS);
+ UNBLOCK_INPUT;
#endif
consing_since_gc += nbytes;
struct symbol_block
{
- struct symbol_block *next;
+ /* Place `symbols' first, to preserve alignment. */
struct Lisp_Symbol symbols[SYMBOL_BLOCK_SIZE];
+ struct symbol_block *next;
};
/* Current symbol block and index of first unused Lisp_Symbol
void
init_symbol ()
{
- symbol_block = (struct symbol_block *) lisp_malloc (sizeof *symbol_block,
- MEM_TYPE_SYMBOL);
- symbol_block->next = 0;
- bzero ((char *) symbol_block->symbols, sizeof symbol_block->symbols);
- symbol_block_index = 0;
+ symbol_block = NULL;
+ symbol_block_index = SYMBOL_BLOCK_SIZE;
symbol_free_list = 0;
- n_symbol_blocks = 1;
+ n_symbol_blocks = 0;
}
symbol_block_index = 0;
n_symbol_blocks++;
}
- XSETSYMBOL (val, &symbol_block->symbols[symbol_block_index++]);
+ XSETSYMBOL (val, &symbol_block->symbols[symbol_block_index]);
+ symbol_block_index++;
}
p = XSYMBOL (val);
struct marker_block
{
- struct marker_block *next;
+ /* Place `markers' first, to preserve alignment. */
union Lisp_Misc markers[MARKER_BLOCK_SIZE];
+ struct marker_block *next;
};
struct marker_block *marker_block;
void
init_marker ()
{
- marker_block = (struct marker_block *) lisp_malloc (sizeof *marker_block,
- MEM_TYPE_MISC);
- marker_block->next = 0;
- bzero ((char *) marker_block->markers, sizeof marker_block->markers);
- marker_block_index = 0;
+ marker_block = NULL;
+ marker_block_index = MARKER_BLOCK_SIZE;
marker_free_list = 0;
- n_marker_blocks = 1;
+ n_marker_blocks = 0;
}
/* Return a newly allocated Lisp_Misc object, with no substructure. */
marker_block_index = 0;
n_marker_blocks++;
}
- XSETMISC (val, &marker_block->markers[marker_block_index++]);
+ XSETMISC (val, &marker_block->markers[marker_block_index]);
+ marker_block_index++;
}
consing_since_gc += sizeof (union Lisp_Misc);
must not be on the free-list. */
return (offset >= 0
&& offset % sizeof b->strings[0] == 0
+ && offset < (STRING_BLOCK_SIZE * sizeof b->strings[0])
&& ((struct Lisp_String *) p)->data != NULL);
}
else
and not be on the free-list. */
return (offset >= 0
&& offset % sizeof b->conses[0] == 0
+ && offset < (CONS_BLOCK_SIZE * sizeof b->conses[0])
&& (b != cons_block
|| offset / sizeof b->conses[0] < cons_block_index)
&& !EQ (((struct Lisp_Cons *) p)->car, Vdead));
and not be on the free-list. */
return (offset >= 0
&& offset % sizeof b->symbols[0] == 0
+ && offset < (SYMBOL_BLOCK_SIZE * sizeof b->symbols[0])
&& (b != symbol_block
|| offset / sizeof b->symbols[0] < symbol_block_index)
&& !EQ (((struct Lisp_Symbol *) p)->function, Vdead));
struct float_block *b = (struct float_block *) m->start;
int offset = (char *) p - (char *) &b->floats[0];
- /* P must point to the start of a Lisp_Float, not be
- one of the unused cells in the current float block,
- and not be on the free-list. */
+ /* P must point to the start of a Lisp_Float and not be
+ one of the unused cells in the current float block. */
return (offset >= 0
&& offset % sizeof b->floats[0] == 0
+ && offset < (FLOAT_BLOCK_SIZE * sizeof b->floats[0])
&& (b != float_block
- || offset / sizeof b->floats[0] < float_block_index)
- && !EQ (((struct Lisp_Float *) p)->type, Vdead));
+ || offset / sizeof b->floats[0] < float_block_index));
}
else
return 0;
and not be on the free-list. */
return (offset >= 0
&& offset % sizeof b->markers[0] == 0
+ && offset < (MARKER_BLOCK_SIZE * sizeof b->markers[0])
&& (b != marker_block
|| offset / sizeof b->markers[0] < marker_block_index)
&& ((union Lisp_Misc *) p)->u_marker.type != Lisp_Misc_Free);
break;
case Lisp_Cons:
- mark_p = (live_cons_p (m, po)
- && !XMARKBIT (XCONS (obj)->car));
+ mark_p = (live_cons_p (m, po) && !CONS_MARKED_P (XCONS (obj)));
break;
case Lisp_Symbol:
break;
case Lisp_Float:
- mark_p = (live_float_p (m, po)
- && !XMARKBIT (XFLOAT (obj)->type));
+ mark_p = (live_float_p (m, po) && !FLOAT_MARKED_P (XFLOAT (obj)));
break;
case Lisp_Vectorlike:
zombies[nzombies] = obj;
++nzombies;
#endif
- mark_object (&obj);
+ mark_object (obj);
}
}
}
break;
case MEM_TYPE_CONS:
- if (live_cons_p (m, p)
- && !XMARKBIT (((struct Lisp_Cons *) p)->car))
+ if (live_cons_p (m, p) && !CONS_MARKED_P ((struct Lisp_Cons *) p))
XSETCONS (obj, p);
break;
break;
case MEM_TYPE_FLOAT:
- if (live_float_p (m, p)
- && !XMARKBIT (((struct Lisp_Float *) p)->type))
+ if (live_float_p (m, p) && !FLOAT_MARKED_P (p))
XSETFLOAT (obj, p);
break;
}
if (!GC_NILP (obj))
- mark_object (&obj);
+ mark_object (obj);
}
}
/* This trick flushes the register windows so that all the state of
the process is contained in the stack. */
- /* Fixme: Code in the Boehm GC sugests flushing (with `flushrs') is
+ /* Fixme: Code in the Boehm GC suggests flushing (with `flushrs') is
needed on ia64 too. See mach_dep.c, where it also says inline
assembler doesn't work with relevant proprietary compilers. */
#ifdef sparc
int type;
{
POINTER_TYPE *result;
+#ifdef USE_LSB_TAG
+ size_t alignment = (1 << GCTYPEBITS);
+#else
size_t alignment = sizeof (EMACS_INT);
/* Give Lisp_Floats an extra alignment. */
alignment = sizeof (struct Lisp_Float);
#endif
}
+#endif
again:
- result = (POINTER_TYPE *) ALIGN ((EMACS_UINT)purebeg + pure_bytes_used, alignment);
+ result = ALIGN (purebeg + pure_bytes_used, alignment);
pure_bytes_used = ((char *)result - (char *)purebeg) + size;
if (pure_bytes_used <= pure_size)
else if (COMPILEDP (obj) || VECTORP (obj))
{
register struct Lisp_Vector *vec;
- register int i, size;
+ register int i;
+ EMACS_INT size;
size = XVECTOR (obj)->size;
if (size & PSEUDOVECTOR_FLAG)
size &= PSEUDOVECTOR_SIZE_MASK;
- vec = XVECTOR (make_pure_vector ((EMACS_INT) size));
+ vec = XVECTOR (make_pure_vector (size));
for (i = 0; i < size; i++)
vec->contents[i] = Fpurecopy (XVECTOR (obj)->contents[i]);
if (COMPILEDP (obj))
DEFUN ("garbage-collect", Fgarbage_collect, Sgarbage_collect, 0, 0, "",
doc: /* Reclaim storage for Lisp objects no longer needed.
-Returns info on amount of space in use:
+Garbage collection happens automatically if you cons more than
+`gc-cons-threshold' bytes of Lisp data since previous garbage collection.
+`garbage-collect' normally returns a list with info on amount of space in use:
((USED-CONSES . FREE-CONSES) (USED-SYMS . FREE-SYMS)
(USED-MARKERS . FREE-MARKERS) USED-STRING-CHARS USED-VECTOR-SLOTS
(USED-FLOATS . FREE-FLOATS) (USED-INTERVALS . FREE-INTERVALS)
(USED-STRINGS . FREE-STRINGS))
-Garbage collection happens automatically if you cons more than
-`gc-cons-threshold' bytes of Lisp data since previous garbage collection. */)
+However, if there was overflow in pure space, `garbage-collect'
+returns nil, because real GC can't be done. */)
()
{
register struct specbinding *bind;
/* clear_marks (); */
- /* Mark all the special slots that serve as the roots of accessibility.
-
- Usually the special slots to mark are contained in particular structures.
- Then we know no slot is marked twice because the structures don't overlap.
- In some cases, the structures point to the slots to be marked.
- For these, we use MARKBIT to avoid double marking of the slot. */
+ /* Mark all the special slots that serve as the roots of accessibility. */
for (i = 0; i < staticidx; i++)
- mark_object (staticvec[i]);
+ mark_object (*staticvec[i]);
#if (GC_MARK_STACK == GC_MAKE_GCPROS_NOOPS \
|| GC_MARK_STACK == GC_MARK_STACK_CHECK_GCPROS)
register struct gcpro *tail;
for (tail = gcprolist; tail; tail = tail->next)
for (i = 0; i < tail->nvars; i++)
- if (!XMARKBIT (tail->var[i]))
- {
- /* Explicit casting prevents compiler warning about
- discarding the `volatile' qualifier. */
- mark_object ((Lisp_Object *)&tail->var[i]);
- XMARK (tail->var[i]);
- }
+ mark_object (tail->var[i]);
}
#endif
mark_byte_stack ();
for (bind = specpdl; bind != specpdl_ptr; bind++)
{
- /* These casts avoid a warning for discarding `volatile'. */
- mark_object ((Lisp_Object *) &bind->symbol);
- mark_object ((Lisp_Object *) &bind->old_value);
+ mark_object (bind->symbol);
+ mark_object (bind->old_value);
}
for (catch = catchlist; catch; catch = catch->next)
{
- mark_object (&catch->tag);
- mark_object (&catch->val);
+ mark_object (catch->tag);
+ mark_object (catch->val);
}
for (handler = handlerlist; handler; handler = handler->next)
{
- mark_object (&handler->handler);
- mark_object (&handler->var);
+ mark_object (handler->handler);
+ mark_object (handler->var);
}
for (backlist = backtrace_list; backlist; backlist = backlist->next)
{
- if (!XMARKBIT (*backlist->function))
- {
- mark_object (backlist->function);
- XMARK (*backlist->function);
- }
+ mark_object (*backlist->function);
+
if (backlist->nargs == UNEVALLED || backlist->nargs == MANY)
i = 0;
else
i = backlist->nargs - 1;
for (; i >= 0; i--)
- if (!XMARKBIT (backlist->args[i]))
- {
- mark_object (&backlist->args[i]);
- XMARK (backlist->args[i]);
- }
+ mark_object (backlist->args[i]);
}
mark_kboards ();
+ mark_ttys ();
/* Look thru every buffer's undo list
for elements that update markers that were not marked,
/* Clear the mark bits that we set in certain root slots. */
-#if (GC_MARK_STACK == GC_USE_GCPROS_AS_BEFORE \
- || GC_MARK_STACK == GC_USE_GCPROS_CHECK_ZOMBIES)
- {
- register struct gcpro *tail;
-
- for (tail = gcprolist; tail; tail = tail->next)
- for (i = 0; i < tail->nvars; i++)
- XUNMARK (tail->var[i]);
- }
-#endif
-
unmark_byte_stack ();
- for (backlist = backtrace_list; backlist; backlist = backlist->next)
- {
- XUNMARK (*backlist->function);
- if (backlist->nargs == UNEVALLED || backlist->nargs == MANY)
- i = 0;
- else
- i = backlist->nargs - 1;
- for (; i >= 0; i--)
- XUNMARK (backlist->args[i]);
- }
VECTOR_UNMARK (&buffer_defaults);
VECTOR_UNMARK (&buffer_local_symbols);
for (; glyph < end_glyph; ++glyph)
if (GC_STRINGP (glyph->object)
&& !STRING_MARKED_P (XSTRING (glyph->object)))
- mark_object (&glyph->object);
+ mark_object (glyph->object);
}
}
}
if (face)
{
for (j = 0; j < LFACE_VECTOR_SIZE; ++j)
- mark_object (&face->lface[j]);
+ mark_object (face->lface[j]);
}
}
}
mark_image (img)
struct image *img;
{
- mark_object (&img->spec);
+ mark_object (img->spec);
if (!NILP (img->data.lisp_val))
- mark_object (&img->data.lisp_val);
+ mark_object (img->data.lisp_val);
}
all the references contained in it. */
#define LAST_MARKED_SIZE 500
-Lisp_Object *last_marked[LAST_MARKED_SIZE];
+Lisp_Object last_marked[LAST_MARKED_SIZE];
int last_marked_index;
/* For debugging--call abort when we cdr down this many
int mark_object_loop_halt;
void
-mark_object (argptr)
- Lisp_Object *argptr;
+mark_object (arg)
+ Lisp_Object arg;
{
- Lisp_Object *objptr = argptr;
- register Lisp_Object obj;
+ register Lisp_Object obj = arg;
#ifdef GC_CHECK_MARKED_OBJECTS
void *po;
struct mem_node *m;
int cdr_count = 0;
loop:
- obj = *objptr;
- loop2:
- XUNMARK (obj);
if (PURE_POINTER_P (XPNTR (obj)))
return;
- last_marked[last_marked_index++] = objptr;
+ last_marked[last_marked_index++] = obj;
if (last_marked_index == LAST_MARKED_SIZE)
last_marked_index = 0;
for (i = 0; i < size; i++) /* and then mark its elements */
{
if (i != COMPILED_CONSTANTS)
- mark_object (&ptr->contents[i]);
+ mark_object (ptr->contents[i]);
}
- /* This cast should be unnecessary, but some Mips compiler complains
- (MIPS-ABI + SysVR4, DC/OSx, etc). */
- objptr = (Lisp_Object *) &ptr->contents[COMPILED_CONSTANTS];
+ obj = ptr->contents[COMPILED_CONSTANTS];
goto loop;
}
else if (GC_FRAMEP (obj))
VECTOR_MARK (ptr); /* Else mark it */
CHECK_LIVE (live_vector_p);
- mark_object (&ptr->name);
- mark_object (&ptr->icon_name);
- mark_object (&ptr->title);
- mark_object (&ptr->focus_frame);
- mark_object (&ptr->selected_window);
- mark_object (&ptr->minibuffer_window);
- mark_object (&ptr->param_alist);
- mark_object (&ptr->scroll_bars);
- mark_object (&ptr->condemned_scroll_bars);
- mark_object (&ptr->menu_bar_items);
- mark_object (&ptr->face_alist);
- mark_object (&ptr->menu_bar_vector);
- mark_object (&ptr->buffer_predicate);
- mark_object (&ptr->buffer_list);
- mark_object (&ptr->menu_bar_window);
- mark_object (&ptr->tool_bar_window);
+ mark_object (ptr->name);
+ mark_object (ptr->icon_name);
+ mark_object (ptr->title);
+ mark_object (ptr->focus_frame);
+ mark_object (ptr->selected_window);
+ mark_object (ptr->minibuffer_window);
+ mark_object (ptr->param_alist);
+ mark_object (ptr->scroll_bars);
+ mark_object (ptr->condemned_scroll_bars);
+ mark_object (ptr->menu_bar_items);
+ mark_object (ptr->face_alist);
+ mark_object (ptr->menu_bar_vector);
+ mark_object (ptr->buffer_predicate);
+ mark_object (ptr->buffer_list);
+ mark_object (ptr->menu_bar_window);
+ mark_object (ptr->tool_bar_window);
mark_face_cache (ptr->face_cache);
#ifdef HAVE_WINDOW_SYSTEM
mark_image_cache (ptr);
- mark_object (&ptr->tool_bar_items);
- mark_object (&ptr->desired_tool_bar_string);
- mark_object (&ptr->current_tool_bar_string);
+ mark_object (ptr->tool_bar_items);
+ mark_object (ptr->desired_tool_bar_string);
+ mark_object (ptr->current_tool_bar_string);
#endif /* HAVE_WINDOW_SYSTEM */
}
else if (GC_BOOL_VECTOR_P (obj))
for (i = 0;
(char *) &ptr->contents[i] < (char *) &w->current_matrix;
i++)
- mark_object (&ptr->contents[i]);
+ mark_object (ptr->contents[i]);
/* Mark glyphs for leaf windows. Marking window matrices is
sufficient because frame matrices use the same glyph
Being in the next_weak chain
should not keep the hash table alive.
No need to mark `count' since it is an integer. */
- mark_object (&h->test);
- mark_object (&h->weak);
- mark_object (&h->rehash_size);
- mark_object (&h->rehash_threshold);
- mark_object (&h->hash);
- mark_object (&h->next);
- mark_object (&h->index);
- mark_object (&h->user_hash_function);
- mark_object (&h->user_cmp_function);
+ mark_object (h->test);
+ mark_object (h->weak);
+ mark_object (h->rehash_size);
+ mark_object (h->rehash_threshold);
+ mark_object (h->hash);
+ mark_object (h->next);
+ mark_object (h->index);
+ mark_object (h->user_hash_function);
+ mark_object (h->user_cmp_function);
/* If hash table is not weak, mark all keys and values.
For weak tables, mark only the vector. */
if (GC_NILP (h->weak))
- mark_object (&h->key_and_value);
+ mark_object (h->key_and_value);
else
VECTOR_MARK (XVECTOR (h->key_and_value));
}
size &= PSEUDOVECTOR_SIZE_MASK;
for (i = 0; i < size; i++) /* and then mark its elements */
- mark_object (&ptr->contents[i]);
+ mark_object (ptr->contents[i]);
}
break;
if (ptr->gcmarkbit) break;
CHECK_ALLOCATED_AND_LIVE (live_symbol_p);
ptr->gcmarkbit = 1;
- mark_object ((Lisp_Object *) &ptr->value);
- mark_object (&ptr->function);
- mark_object (&ptr->plist);
+ mark_object (ptr->value);
+ mark_object (ptr->function);
+ mark_object (ptr->plist);
if (!PURE_POINTER_P (XSTRING (ptr->xname)))
MARK_STRING (XSTRING (ptr->xname));
ptr = ptr->next;
if (ptr)
{
- /* For the benefit of the last_marked log. */
- objptr = (Lisp_Object *)&XSYMBOL (obj)->next;
ptrx = ptr; /* Use of ptrx avoids compiler bug on Sun */
XSETSYMBOL (obj, ptrx);
- /* We can't goto loop here because *objptr doesn't contain an
- actual Lisp_Object with valid datatype field. */
- goto loop2;
+ goto loop;
}
}
break;
/* If the cdr is nil, avoid recursion for the car. */
if (EQ (ptr->cdr, Qnil))
{
- objptr = &ptr->realvalue;
+ obj = ptr->realvalue;
goto loop;
}
- mark_object (&ptr->realvalue);
- mark_object (&ptr->buffer);
- mark_object (&ptr->frame);
- objptr = &ptr->cdr;
+ mark_object (ptr->realvalue);
+ mark_object (ptr->buffer);
+ mark_object (ptr->frame);
+ obj = ptr->cdr;
goto loop;
}
since all markable slots in current buffer marked anyway. */
/* Don't need to do Lisp_Objfwd, since the places they point
are protected with staticpro. */
+ case Lisp_Misc_Save_Value:
break;
case Lisp_Misc_Overlay:
{
struct Lisp_Overlay *ptr = XOVERLAY (obj);
- mark_object (&ptr->start);
- mark_object (&ptr->end);
- objptr = &ptr->plist;
- goto loop;
+ mark_object (ptr->start);
+ mark_object (ptr->end);
+ mark_object (ptr->plist);
+ if (ptr->next)
+ {
+ XSETMISC (obj, ptr->next);
+ goto loop;
+ }
}
break;
case Lisp_Cons:
{
register struct Lisp_Cons *ptr = XCONS (obj);
- if (XMARKBIT (ptr->car)) break;
+ if (CONS_MARKED_P (ptr)) break;
CHECK_ALLOCATED_AND_LIVE (live_cons_p);
- XMARK (ptr->car);
+ CONS_MARK (ptr);
/* If the cdr is nil, avoid recursion for the car. */
if (EQ (ptr->cdr, Qnil))
{
- objptr = &ptr->car;
+ obj = ptr->car;
cdr_count = 0;
goto loop;
}
- mark_object (&ptr->car);
- objptr = &ptr->cdr;
+ mark_object (ptr->car);
+ obj = ptr->cdr;
cdr_count++;
if (cdr_count == mark_object_loop_halt)
abort ();
case Lisp_Float:
CHECK_ALLOCATED_AND_LIVE (live_float_p);
- XMARK (XFLOAT (obj)->type);
+ FLOAT_MARK (XFLOAT (obj));
break;
case Lisp_Int:
Lisp_Object buf;
{
register struct buffer *buffer = XBUFFER (buf);
- register Lisp_Object *ptr;
+ register Lisp_Object *ptr, tmp;
Lisp_Object base_buffer;
VECTOR_MARK (buffer);
{
register struct Lisp_Cons *ptr = XCONS (tail);
- if (XMARKBIT (ptr->car))
+ if (CONS_MARKED_P (ptr))
break;
- XMARK (ptr->car);
+ CONS_MARK (ptr);
if (GC_CONSP (ptr->car)
- && ! XMARKBIT (XCAR (ptr->car))
+ && !CONS_MARKED_P (XCONS (ptr->car))
&& GC_MARKERP (XCAR (ptr->car)))
{
- XMARK (XCAR_AS_LVALUE (ptr->car));
- mark_object (&XCDR_AS_LVALUE (ptr->car));
+ CONS_MARK (XCONS (ptr->car));
+ mark_object (XCDR (ptr->car));
}
else
- mark_object (&ptr->car);
+ mark_object (ptr->car);
if (CONSP (ptr->cdr))
tail = ptr->cdr;
break;
}
- mark_object (&XCDR_AS_LVALUE (tail));
+ mark_object (XCDR (tail));
}
else
- mark_object (&buffer->undo_list);
+ mark_object (buffer->undo_list);
+
+ if (buffer->overlays_before)
+ {
+ XSETMISC (tmp, buffer->overlays_before);
+ mark_object (tmp);
+ }
+ if (buffer->overlays_after)
+ {
+ XSETMISC (tmp, buffer->overlays_after);
+ mark_object (tmp);
+ }
for (ptr = &buffer->name;
(char *)ptr < (char *)buffer + sizeof (struct buffer);
ptr++)
- mark_object (ptr);
+ mark_object (*ptr);
/* If this is an indirect buffer, mark its base buffer. */
- if (buffer->base_buffer && !XMARKBIT (buffer->base_buffer->name))
+ if (buffer->base_buffer && !VECTOR_MARKED_P (buffer->base_buffer))
{
XSETBUFFER (base_buffer, buffer->base_buffer);
mark_buffer (base_buffer);
break;
case Lisp_String:
- {
- struct Lisp_String *s = XSTRING (obj);
- survives_p = STRING_MARKED_P (s);
- }
+ survives_p = STRING_MARKED_P (XSTRING (obj));
break;
case Lisp_Vectorlike:
- if (GC_BUFFERP (obj))
- survives_p = VECTOR_MARKED_P (XBUFFER (obj));
- else if (GC_SUBRP (obj))
- survives_p = 1;
- else
- survives_p = VECTOR_MARKED_P (XVECTOR (obj));
+ survives_p = GC_SUBRP (obj) || VECTOR_MARKED_P (XVECTOR (obj));
break;
case Lisp_Cons:
- survives_p = XMARKBIT (XCAR (obj));
+ survives_p = CONS_MARKED_P (XCONS (obj));
break;
case Lisp_Float:
- survives_p = XMARKBIT (XFLOAT (obj)->type);
+ survives_p = FLOAT_MARKED_P (XFLOAT (obj));
break;
default:
register int i;
int this_free = 0;
for (i = 0; i < lim; i++)
- if (!XMARKBIT (cblk->conses[i].car))
+ if (!CONS_MARKED_P (&cblk->conses[i]))
{
this_free++;
*(struct Lisp_Cons **)&cblk->conses[i].cdr = cons_free_list;
else
{
num_used++;
- XUNMARK (cblk->conses[i].car);
+ CONS_UNMARK (&cblk->conses[i]);
}
lim = CONS_BLOCK_SIZE;
/* If this block contains only free conses and we have already
*cprev = cblk->next;
/* Unhook from the free list. */
cons_free_list = *(struct Lisp_Cons **) &cblk->conses[0].cdr;
- lisp_free (cblk);
+ lisp_align_free (cblk);
n_cons_blocks--;
}
else
register int i;
int this_free = 0;
for (i = 0; i < lim; i++)
- if (!XMARKBIT (fblk->floats[i].type))
+ if (!FLOAT_MARKED_P (&fblk->floats[i]))
{
this_free++;
*(struct Lisp_Float **)&fblk->floats[i].data = float_free_list;
float_free_list = &fblk->floats[i];
-#if GC_MARK_STACK
- float_free_list->type = Vdead;
-#endif
}
else
{
num_used++;
- XUNMARK (fblk->floats[i].type);
+ FLOAT_UNMARK (&fblk->floats[i]);
}
lim = FLOAT_BLOCK_SIZE;
/* If this block contains only free floats and we have already
*fprev = fblk->next;
/* Unhook from the free list. */
float_free_list = *(struct Lisp_Float **) &fblk->floats[0].data;
- lisp_free (fblk);
+ lisp_align_free (fblk);
n_float_blocks--;
}
else
pure_bytes_used = 0;
pure_bytes_used_before_overflow = 0;
+ /* Initialize the list of free aligned blocks. */
+ free_ablock = NULL;
+
#if GC_MARK_STACK || defined GC_MALLOC_CHECK
mem_init ();
Vdead = make_pure_string ("DEAD", 4, 4, 0);
DEFVAR_LISP ("gc-elapsed", &Vgc_elapsed,
doc: /* Accumulated time elapsed in garbage collections.
-The time is in seconds as a floating point value.
-Programs may reset this to get statistics in a specific period. */);
+The time is in seconds as a floating point value. */);
DEFVAR_INT ("gcs-done", &gcs_done,
- doc: /* Accumulated number of garbage collections done.
-Programs may reset this to get statistics in a specific period. */);
+ doc: /* Accumulated number of garbage collections done. */);
defsubr (&Scons);
defsubr (&Slist);
defsubr (&Sgc_status);
#endif
}
+
+/* arch-tag: 6695ca10-e3c5-4c2c-8bc3-ed26a7dda857
+ (do not change this comment) */