]> code.delx.au - gnu-emacs/blob - src/profiler.c
merge trunk
[gnu-emacs] / src / profiler.c
1 /* Profiler implementation.
2
3 Copyright (C) 2012 Free Software Foundation, Inc.
4
5 This file is part of GNU Emacs.
6
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <config.h>
21 #include "lisp.h"
22 #include "syssignal.h"
23 #include "systime.h"
24
25 /* Return A + B, but return the maximum fixnum if the result would overflow.
26 Assume A and B are nonnegative and in fixnum range. */
27
28 static EMACS_INT
29 saturated_add (EMACS_INT a, EMACS_INT b)
30 {
31 return min (a + b, MOST_POSITIVE_FIXNUM);
32 }
33
34 /* Logs. */
35
36 typedef struct Lisp_Hash_Table log_t;
37
38 static Lisp_Object
39 make_log (int heap_size, int max_stack_depth)
40 {
41 /* We use a standard Elisp hash-table object, but we use it in
42 a special way. This is OK as long as the object is not exposed
43 to Elisp, i.e. until it is returned by *-profiler-log, after which
44 it can't be used any more. */
45 Lisp_Object log = make_hash_table (Qequal, make_number (heap_size),
46 make_float (DEFAULT_REHASH_SIZE),
47 make_float (DEFAULT_REHASH_THRESHOLD),
48 Qnil, Qnil, Qnil);
49 struct Lisp_Hash_Table *h = XHASH_TABLE (log);
50
51 /* What is special about our hash-tables is that the keys are pre-filled
52 with the vectors we'll put in them. */
53 int i = ASIZE (h->key_and_value) / 2;
54 while (0 < i)
55 set_hash_key_slot (h, --i,
56 Fmake_vector (make_number (max_stack_depth), Qnil));
57 return log;
58 }
59
60 /* Evict the least used half of the hash_table.
61
62 When the table is full, we have to evict someone.
63 The easiest and most efficient is to evict the value we're about to add
64 (i.e. once the table is full, stop sampling).
65
66 We could also pick the element with the lowest count and evict it,
67 but finding it is O(N) and for that amount of work we get very
68 little in return: for the next sample, this latest sample will have
69 count==1 and will hence be a prime candidate for eviction :-(
70
71 So instead, we take O(N) time to eliminate more or less half of the
72 entries (the half with the lowest counts). So we get an amortized
73 cost of O(1) and we get O(N) time for a new entry to grow larger
74 than the other least counts before a new round of eviction. */
75
76 static EMACS_INT approximate_median (log_t *log,
77 ptrdiff_t start, ptrdiff_t size)
78 {
79 eassert (size > 0);
80 if (size < 2)
81 return XINT (HASH_VALUE (log, start));
82 if (size < 3)
83 /* Not an actual median, but better for our application than
84 choosing either of the two numbers. */
85 return ((XINT (HASH_VALUE (log, start))
86 + XINT (HASH_VALUE (log, start + 1)))
87 / 2);
88 else
89 {
90 ptrdiff_t newsize = size / 3;
91 ptrdiff_t start2 = start + newsize;
92 EMACS_INT i1 = approximate_median (log, start, newsize);
93 EMACS_INT i2 = approximate_median (log, start2, newsize);
94 EMACS_INT i3 = approximate_median (log, start2 + newsize,
95 size - 2 * newsize);
96 return (i1 < i2
97 ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1))
98 : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2)));
99 }
100 }
101
102 static void evict_lower_half (log_t *log)
103 {
104 ptrdiff_t size = ASIZE (log->key_and_value) / 2;
105 EMACS_INT median = approximate_median (log, 0, size);
106 ptrdiff_t i;
107
108 for (i = 0; i < size; i++)
109 /* Evict not only values smaller but also values equal to the median,
110 so as to make sure we evict something no matter what. */
111 if (XINT (HASH_VALUE (log, i)) <= median)
112 {
113 Lisp_Object key = HASH_KEY (log, i);
114 { /* FIXME: we could make this more efficient. */
115 Lisp_Object tmp;
116 XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */
117 Fremhash (key, tmp);
118 }
119 eassert (EQ (log->next_free, make_number (i)));
120 {
121 int j;
122 eassert (VECTORP (key));
123 for (j = 0; j < ASIZE (key); j++)
124 ASET (key, j, Qnil);
125 }
126 set_hash_key_slot (log, i, key);
127 }
128 }
129
130 /* Record the current backtrace in LOG. COUNT is the weight of this
131 current backtrace: milliseconds for CPU counts, and the allocation
132 size for memory logs. */
133
134 static void
135 record_backtrace (log_t *log, EMACS_INT count)
136 {
137 struct backtrace *backlist = backtrace_list;
138 Lisp_Object backtrace;
139 ptrdiff_t index, i = 0;
140 ptrdiff_t asize;
141
142 if (!INTEGERP (log->next_free))
143 /* FIXME: transfer the evicted counts to a special entry rather
144 than dropping them on the floor. */
145 evict_lower_half (log);
146 index = XINT (log->next_free);
147
148 /* Get a "working memory" vector. */
149 backtrace = HASH_KEY (log, index);
150 asize = ASIZE (backtrace);
151
152 /* Copy the backtrace contents into working memory. */
153 for (; i < asize && backlist; i++, backlist = backlist->next)
154 /* FIXME: For closures we should ignore the environment. */
155 ASET (backtrace, i, backlist->function);
156
157 /* Make sure that unused space of working memory is filled with nil. */
158 for (; i < asize; i++)
159 ASET (backtrace, i, Qnil);
160
161 { /* We basically do a `gethash+puthash' here, except that we have to be
162 careful to avoid memory allocation since we're in a signal
163 handler, and we optimize the code to try and avoid computing the
164 hash+lookup twice. See fns.c:Fputhash for reference. */
165 EMACS_UINT hash;
166 ptrdiff_t j = hash_lookup (log, backtrace, &hash);
167 if (j >= 0)
168 {
169 EMACS_INT old_val = XINT (HASH_VALUE (log, j));
170 EMACS_INT new_val = saturated_add (old_val, count);
171 set_hash_value_slot (log, j, make_number (new_val));
172 }
173 else
174 { /* BEWARE! hash_put in general can allocate memory.
175 But currently it only does that if log->next_free is nil. */
176 int j;
177 eassert (!NILP (log->next_free));
178 j = hash_put (log, backtrace, make_number (count), hash);
179 /* Let's make sure we've put `backtrace' right where it
180 already was to start with. */
181 eassert (index == j);
182
183 /* FIXME: If the hash-table is almost full, we should set
184 some global flag so that some Elisp code can offload its
185 data elsewhere, so as to avoid the eviction code.
186 There are 2 ways to do that, AFAICT:
187 - Set a flag checked in QUIT, such that QUIT can then call
188 Fprofiler_cpu_log and stash the full log for later use.
189 - Set a flag check in post-gc-hook, so that Elisp code can call
190 profiler-cpu-log. That gives us more flexibility since that
191 Elisp code can then do all kinds of fun stuff like write
192 the log to disk. Or turn it right away into a call tree.
193 Of course, using Elisp is generally preferable, but it may
194 take longer until we get a chance to run the Elisp code, so
195 there's more risk that the table will get full before we
196 get there. */
197 }
198 }
199 }
200 \f
201 /* Sample profiler. */
202
203 /* FIXME: Add support for the CPU profiler in W32. */
204
205 #ifdef PROFILER_CPU_SUPPORT
206
207 /* The profiler timer and whether it was properly initialized, if
208 POSIX timers are available. */
209 #ifdef HAVE_TIMER_SETTIME
210 static timer_t profiler_timer;
211 static bool profiler_timer_ok;
212 #endif
213
214 /* Status of sampling profiler. */
215 static enum profiler_cpu_running
216 { NOT_RUNNING, TIMER_SETTIME_RUNNING, SETITIMER_RUNNING }
217 profiler_cpu_running;
218
219 /* Hash-table log of CPU profiler. */
220 static Lisp_Object cpu_log;
221
222 /* Separate counter for the time spent in the GC. */
223 static EMACS_INT cpu_gc_count;
224
225 /* The current sample interval in milliseconds. */
226 static EMACS_INT current_sample_interval;
227
228 /* Signal handler for sample profiler. */
229
230 static void
231 handle_profiler_signal (int signal)
232 {
233 if (backtrace_list && EQ (backtrace_list->function, Qautomatic_gc))
234 /* Special case the time-count inside GC because the hash-table
235 code is not prepared to be used while the GC is running.
236 More specifically it uses ASIZE at many places where it does
237 not expect the ARRAY_MARK_FLAG to be set. We could try and
238 harden the hash-table code, but it doesn't seem worth the
239 effort. */
240 cpu_gc_count = saturated_add (cpu_gc_count, current_sample_interval);
241 else
242 {
243 eassert (HASH_TABLE_P (cpu_log));
244 record_backtrace (XHASH_TABLE (cpu_log), current_sample_interval);
245 }
246 }
247
248 static void
249 deliver_profiler_signal (int signal)
250 {
251 deliver_process_signal (signal, handle_profiler_signal);
252 }
253
254 static enum profiler_cpu_running
255 setup_cpu_timer (Lisp_Object sample_interval)
256 {
257 struct sigaction action;
258 struct itimerval timer;
259 struct timespec interval;
260
261 if (! RANGED_INTEGERP (1, sample_interval,
262 (TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / 1000
263 ? (EMACS_INT) TYPE_MAXIMUM (time_t) * 1000 + 999
264 : EMACS_INT_MAX)))
265 return NOT_RUNNING;
266
267 current_sample_interval = XINT (sample_interval);
268 interval = make_emacs_time (current_sample_interval / 1000,
269 current_sample_interval % 1000 * 1000000);
270 emacs_sigaction_init (&action, deliver_profiler_signal);
271 sigaction (SIGPROF, &action, 0);
272
273 #ifdef HAVE_TIMER_SETTIME
274 if (! profiler_timer_ok)
275 {
276 /* System clocks to try, in decreasing order of desirability. */
277 static clockid_t const system_clock[] = {
278 #ifdef CLOCK_THREAD_CPUTIME_ID
279 CLOCK_THREAD_CPUTIME_ID,
280 #endif
281 #ifdef CLOCK_PROCESS_CPUTIME_ID
282 CLOCK_PROCESS_CPUTIME_ID,
283 #endif
284 #ifdef CLOCK_MONOTONIC
285 CLOCK_MONOTONIC,
286 #endif
287 CLOCK_REALTIME
288 };
289 int i;
290 struct sigevent sigev;
291 sigev.sigev_value.sival_ptr = &profiler_timer;
292 sigev.sigev_signo = SIGPROF;
293 sigev.sigev_notify = SIGEV_SIGNAL;
294
295 for (i = 0; i < sizeof system_clock / sizeof *system_clock; i++)
296 if (timer_create (system_clock[i], &sigev, &profiler_timer) == 0)
297 {
298 profiler_timer_ok = 1;
299 break;
300 }
301 }
302
303 if (profiler_timer_ok)
304 {
305 struct itimerspec ispec;
306 ispec.it_value = ispec.it_interval = interval;
307 timer_settime (profiler_timer, 0, &ispec, 0);
308 return TIMER_SETTIME_RUNNING;
309 }
310 #endif
311
312 timer.it_value = timer.it_interval = make_timeval (interval);
313 setitimer (ITIMER_PROF, &timer, 0);
314 return SETITIMER_RUNNING;
315 }
316
317 DEFUN ("profiler-cpu-start", Fprofiler_cpu_start, Sprofiler_cpu_start,
318 1, 1, 0,
319 doc: /* Start or restart the cpu profiler.
320 It takes call-stack samples each SAMPLE-INTERVAL milliseconds.
321 See also `profiler-log-size' and `profiler-max-stack-depth'. */)
322 (Lisp_Object sample_interval)
323 {
324 if (profiler_cpu_running)
325 error ("Sample profiler is already running");
326
327 if (NILP (cpu_log))
328 {
329 cpu_gc_count = 0;
330 cpu_log = make_log (profiler_log_size,
331 profiler_max_stack_depth);
332 }
333
334 profiler_cpu_running = setup_cpu_timer (sample_interval);
335 if (! profiler_cpu_running)
336 error ("Invalid sample interval");
337
338 return Qt;
339 }
340
341 DEFUN ("profiler-cpu-stop", Fprofiler_cpu_stop, Sprofiler_cpu_stop,
342 0, 0, 0,
343 doc: /* Stop the cpu profiler. The profiler log is not affected.
344 Return non-nil if the profiler was running. */)
345 (void)
346 {
347 switch (profiler_cpu_running)
348 {
349 case NOT_RUNNING:
350 return Qnil;
351
352 #ifdef HAVE_TIMER_SETTIME
353 case TIMER_SETTIME_RUNNING:
354 {
355 struct itimerspec disable;
356 memset (&disable, 0, sizeof disable);
357 timer_settime (profiler_timer, 0, &disable, 0);
358 }
359 break;
360 #endif
361
362 case SETITIMER_RUNNING:
363 {
364 struct itimerval disable;
365 memset (&disable, 0, sizeof disable);
366 setitimer (ITIMER_PROF, &disable, 0);
367 }
368 break;
369 }
370
371 signal (SIGPROF, SIG_IGN);
372 profiler_cpu_running = NOT_RUNNING;
373 return Qt;
374 }
375
376 DEFUN ("profiler-cpu-running-p",
377 Fprofiler_cpu_running_p, Sprofiler_cpu_running_p,
378 0, 0, 0,
379 doc: /* Return non-nil iff cpu profiler is running. */)
380 (void)
381 {
382 return profiler_cpu_running ? Qt : Qnil;
383 }
384
385 DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
386 0, 0, 0,
387 doc: /* Return the current cpu profiler log.
388 The log is a hash-table mapping backtraces to counters which represent
389 the amount of time spent at those points. Every backtrace is a vector
390 of functions, where the last few elements may be nil.
391 Before returning, a new log is allocated for future samples. */)
392 (void)
393 {
394 Lisp_Object result = cpu_log;
395 /* Here we're making the log visible to Elisp, so it's not safe any
396 more for our use afterwards since we can't rely on its special
397 pre-allocated keys anymore. So we have to allocate a new one. */
398 cpu_log = (profiler_cpu_running
399 ? make_log (profiler_log_size, profiler_max_stack_depth)
400 : Qnil);
401 Fputhash (Fmake_vector (make_number (1), Qautomatic_gc),
402 make_number (cpu_gc_count),
403 result);
404 cpu_gc_count = 0;
405 return result;
406 }
407 #endif /* PROFILER_CPU_SUPPORT */
408 \f
409 /* Memory profiler. */
410
411 /* True if memory profiler is running. */
412 bool profiler_memory_running;
413
414 static Lisp_Object memory_log;
415
416 DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
417 0, 0, 0,
418 doc: /* Start/restart the memory profiler.
419 The memory profiler will take samples of the call-stack whenever a new
420 allocation takes place. Note that most small allocations only trigger
421 the profiler occasionally.
422 See also `profiler-log-size' and `profiler-max-stack-depth'. */)
423 (void)
424 {
425 if (profiler_memory_running)
426 error ("Memory profiler is already running");
427
428 if (NILP (memory_log))
429 memory_log = make_log (profiler_log_size,
430 profiler_max_stack_depth);
431
432 profiler_memory_running = true;
433
434 return Qt;
435 }
436
437 DEFUN ("profiler-memory-stop",
438 Fprofiler_memory_stop, Sprofiler_memory_stop,
439 0, 0, 0,
440 doc: /* Stop the memory profiler. The profiler log is not affected.
441 Return non-nil if the profiler was running. */)
442 (void)
443 {
444 if (!profiler_memory_running)
445 return Qnil;
446 profiler_memory_running = false;
447 return Qt;
448 }
449
450 DEFUN ("profiler-memory-running-p",
451 Fprofiler_memory_running_p, Sprofiler_memory_running_p,
452 0, 0, 0,
453 doc: /* Return non-nil if memory profiler is running. */)
454 (void)
455 {
456 return profiler_memory_running ? Qt : Qnil;
457 }
458
459 DEFUN ("profiler-memory-log",
460 Fprofiler_memory_log, Sprofiler_memory_log,
461 0, 0, 0,
462 doc: /* Return the current memory profiler log.
463 The log is a hash-table mapping backtraces to counters which represent
464 the amount of memory allocated at those points. Every backtrace is a vector
465 of functions, where the last few elements may be nil.
466 Before returning, a new log is allocated for future samples. */)
467 (void)
468 {
469 Lisp_Object result = memory_log;
470 /* Here we're making the log visible to Elisp , so it's not safe any
471 more for our use afterwards since we can't rely on its special
472 pre-allocated keys anymore. So we have to allocate a new one. */
473 memory_log = (profiler_memory_running
474 ? make_log (profiler_log_size, profiler_max_stack_depth)
475 : Qnil);
476 return result;
477 }
478
479 \f
480 /* Signals and probes. */
481
482 /* Record that the current backtrace allocated SIZE bytes. */
483 void
484 malloc_probe (size_t size)
485 {
486 eassert (HASH_TABLE_P (memory_log));
487 record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM));
488 }
489
490 void
491 syms_of_profiler (void)
492 {
493 DEFVAR_INT ("profiler-max-stack-depth", profiler_max_stack_depth,
494 doc: /* Number of elements from the call-stack recorded in the log. */);
495 profiler_max_stack_depth = 16;
496 DEFVAR_INT ("profiler-log-size", profiler_log_size,
497 doc: /* Number of distinct call-stacks that can be recorded in a profiler log.
498 If the log gets full, some of the least-seen call-stacks will be evicted
499 to make room for new entries. */);
500 profiler_log_size = 10000;
501
502 #ifdef PROFILER_CPU_SUPPORT
503 profiler_cpu_running = NOT_RUNNING;
504 cpu_log = Qnil;
505 staticpro (&cpu_log);
506 defsubr (&Sprofiler_cpu_start);
507 defsubr (&Sprofiler_cpu_stop);
508 defsubr (&Sprofiler_cpu_running_p);
509 defsubr (&Sprofiler_cpu_log);
510 #endif
511 profiler_memory_running = false;
512 memory_log = Qnil;
513 staticpro (&memory_log);
514 defsubr (&Sprofiler_memory_start);
515 defsubr (&Sprofiler_memory_stop);
516 defsubr (&Sprofiler_memory_running_p);
517 defsubr (&Sprofiler_memory_log);
518 }