+
+/* BLOCK_ALIGN has to be a power of 2. */
+#define BLOCK_ALIGN (1 << 10)
+
+/* Padding to leave at the end of a malloc'd block. This is to give
+ malloc a chance to minimize the amount of memory wasted to alignment.
+ It should be tuned to the particular malloc library used.
+ On glibc-2.3.2, malloc never tries to align, so a padding of 0 is best.
+ posix_memalign on the other hand would ideally prefer a value of 4
+ because otherwise, there's 1020 bytes wasted between each ablocks.
+ But testing shows that those 1020 will most of the time be efficiently
+ used by malloc to place other objects, so a value of 0 is still preferable
+ unless you have a lot of cons&floats and virtually nothing else. */
+#define BLOCK_PADDING 0
+#define BLOCK_BYTES \
+ (BLOCK_ALIGN - sizeof (struct aligned_block *) - BLOCK_PADDING)
+
+/* Internal data structures and constants. */
+
+#define ABLOCKS_SIZE 16
+
+/* An aligned block of memory. */
+struct ablock
+{
+ union
+ {
+ char payload[BLOCK_BYTES];
+ struct ablock *next_free;
+ } x;
+ /* `abase' is the aligned base of the ablocks. */
+ /* It is overloaded to hold the virtual `busy' field that counts
+ the number of used ablock in the parent ablocks.
+ The first ablock has the `busy' field, the others have the `abase'
+ field. To tell the difference, we assume that pointers will have
+ integer values larger than 2 * ABLOCKS_SIZE. The lowest bit of `busy'
+ is used to tell whether the real base of the parent ablocks is `abase'
+ (if not, the word before the first ablock holds a pointer to the
+ real base). */
+ struct ablocks *abase;
+ /* The padding of all but the last ablock is unused. The padding of
+ the last ablock in an ablocks is not allocated. */
+#if BLOCK_PADDING
+ char padding[BLOCK_PADDING];
+#endif
+};
+
+/* A bunch of consecutive aligned blocks. */
+struct ablocks
+{
+ struct ablock blocks[ABLOCKS_SIZE];
+};
+
+/* Size of the block requested from malloc or memalign. */
+#define ABLOCKS_BYTES (sizeof (struct ablocks) - BLOCK_PADDING)
+
+#define ABLOCK_ABASE(block) \
+ (((unsigned long) (block)->abase) <= (1 + 2 * ABLOCKS_SIZE) \
+ ? (struct ablocks *)(block) \
+ : (block)->abase)
+
+/* Virtual `busy' field. */
+#define ABLOCKS_BUSY(abase) ((abase)->blocks[0].abase)
+
+/* Pointer to the (not necessarily aligned) malloc block. */
+#ifdef HAVE_POSIX_MEMALIGN
+#define ABLOCKS_BASE(abase) (abase)
+#else
+#define ABLOCKS_BASE(abase) \
+ (1 & (long) ABLOCKS_BUSY (abase) ? abase : ((void**)abase)[-1])
+#endif
+
+/* The list of free ablock. */
+static struct ablock *free_ablock;
+
+/* Allocate an aligned block of nbytes.
+ Alignment is on a multiple of BLOCK_ALIGN and `nbytes' has to be
+ smaller or equal to BLOCK_BYTES. */
+static POINTER_TYPE *
+lisp_align_malloc (nbytes, type)
+ size_t nbytes;
+ enum mem_type type;
+{
+ void *base, *val;
+ struct ablocks *abase;
+
+ eassert (nbytes <= BLOCK_BYTES);
+
+ BLOCK_INPUT;
+
+#ifdef GC_MALLOC_CHECK
+ allocated_mem_type = type;
+#endif
+
+ if (!free_ablock)
+ {
+ int i;
+ EMACS_INT aligned; /* int gets warning casting to 64-bit pointer. */
+
+#ifdef DOUG_LEA_MALLOC
+ /* Prevent mmap'ing the chunk. Lisp data may not be mmap'ed
+ because mapped region contents are not preserved in
+ a dumped Emacs. */
+ mallopt (M_MMAP_MAX, 0);
+#endif
+
+#ifdef HAVE_POSIX_MEMALIGN
+ {
+ int err = posix_memalign (&base, BLOCK_ALIGN, ABLOCKS_BYTES);
+ abase = err ? (base = NULL) : base;
+ }
+#else
+ base = malloc (ABLOCKS_BYTES);
+ abase = ALIGN (base, BLOCK_ALIGN);
+ if (base == 0)
+ {
+ UNBLOCK_INPUT;
+ memory_full ();
+ }
+#endif
+
+ aligned = (base == abase);
+ if (!aligned)
+ ((void**)abase)[-1] = base;
+
+#ifdef DOUG_LEA_MALLOC
+ /* Back to a reasonable maximum of mmap'ed areas. */
+ mallopt (M_MMAP_MAX, MMAP_MAX_AREAS);
+#endif
+
+#ifndef USE_LSB_TAG
+ /* If the memory just allocated cannot be addressed thru a Lisp
+ object's pointer, and it needs to be, that's equivalent to
+ running out of memory. */
+ if (type != MEM_TYPE_NON_LISP)
+ {
+ Lisp_Object tem;
+ char *end = (char *) base + ABLOCKS_BYTES - 1;
+ XSETCONS (tem, end);
+ if ((char *) XCONS (tem) != end)
+ {
+ lisp_malloc_loser = base;
+ free (base);
+ UNBLOCK_INPUT;
+ memory_full ();
+ }
+ }
+#endif
+
+ /* Initialize the blocks and put them on the free list.
+ Is `base' was not properly aligned, we can't use the last block. */
+ for (i = 0; i < (aligned ? ABLOCKS_SIZE : ABLOCKS_SIZE - 1); i++)
+ {
+ abase->blocks[i].abase = abase;
+ abase->blocks[i].x.next_free = free_ablock;
+ free_ablock = &abase->blocks[i];
+ }
+ ABLOCKS_BUSY (abase) = (struct ablocks *) (long) aligned;
+
+ eassert (0 == ((EMACS_UINT)abase) % BLOCK_ALIGN);
+ eassert (ABLOCK_ABASE (&abase->blocks[3]) == abase); /* 3 is arbitrary */
+ eassert (ABLOCK_ABASE (&abase->blocks[0]) == abase);
+ eassert (ABLOCKS_BASE (abase) == base);
+ eassert (aligned == (long) ABLOCKS_BUSY (abase));
+ }
+
+ abase = ABLOCK_ABASE (free_ablock);
+ ABLOCKS_BUSY (abase) = (struct ablocks *) (2 + (long) ABLOCKS_BUSY (abase));
+ val = free_ablock;
+ free_ablock = free_ablock->x.next_free;
+
+#if GC_MARK_STACK && !defined GC_MALLOC_CHECK
+ if (val && type != MEM_TYPE_NON_LISP)
+ mem_insert (val, (char *) val + nbytes, type);
+#endif
+
+ UNBLOCK_INPUT;
+ if (!val && nbytes)
+ memory_full ();
+
+ eassert (0 == ((EMACS_UINT)val) % BLOCK_ALIGN);
+ return val;
+}