]> code.delx.au - gnu-emacs/blob - src/profiler.c
* profiler.el (profiler-sampling-interval): Rename from
[gnu-emacs] / src / profiler.c
1 /* Profiler implementation.
2
3 Copyright (C) 2012 Free Software Foundation, Inc.
4
5 This file is part of GNU Emacs.
6
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <config.h>
21 #include "lisp.h"
22 #include "syssignal.h"
23 #include "systime.h"
24
25 /* Return A + B, but return the maximum fixnum if the result would overflow.
26 Assume A and B are nonnegative and in fixnum range. */
27
28 static EMACS_INT
29 saturated_add (EMACS_INT a, EMACS_INT b)
30 {
31 return min (a + b, MOST_POSITIVE_FIXNUM);
32 }
33
34 /* Logs. */
35
36 typedef struct Lisp_Hash_Table log_t;
37
38 static Lisp_Object
39 make_log (int heap_size, int max_stack_depth)
40 {
41 /* We use a standard Elisp hash-table object, but we use it in
42 a special way. This is OK as long as the object is not exposed
43 to Elisp, i.e. until it is returned by *-profiler-log, after which
44 it can't be used any more. */
45 Lisp_Object log = make_hash_table (Qequal, make_number (heap_size),
46 make_float (DEFAULT_REHASH_SIZE),
47 make_float (DEFAULT_REHASH_THRESHOLD),
48 Qnil, Qnil, Qnil);
49 struct Lisp_Hash_Table *h = XHASH_TABLE (log);
50
51 /* What is special about our hash-tables is that the keys are pre-filled
52 with the vectors we'll put in them. */
53 int i = ASIZE (h->key_and_value) / 2;
54 while (0 < i)
55 set_hash_key_slot (h, --i,
56 Fmake_vector (make_number (max_stack_depth), Qnil));
57 return log;
58 }
59
60 /* Evict the least used half of the hash_table.
61
62 When the table is full, we have to evict someone.
63 The easiest and most efficient is to evict the value we're about to add
64 (i.e. once the table is full, stop sampling).
65
66 We could also pick the element with the lowest count and evict it,
67 but finding it is O(N) and for that amount of work we get very
68 little in return: for the next sample, this latest sample will have
69 count==1 and will hence be a prime candidate for eviction :-(
70
71 So instead, we take O(N) time to eliminate more or less half of the
72 entries (the half with the lowest counts). So we get an amortized
73 cost of O(1) and we get O(N) time for a new entry to grow larger
74 than the other least counts before a new round of eviction. */
75
76 static EMACS_INT approximate_median (log_t *log,
77 ptrdiff_t start, ptrdiff_t size)
78 {
79 eassert (size > 0);
80 if (size < 2)
81 return XINT (HASH_VALUE (log, start));
82 if (size < 3)
83 /* Not an actual median, but better for our application than
84 choosing either of the two numbers. */
85 return ((XINT (HASH_VALUE (log, start))
86 + XINT (HASH_VALUE (log, start + 1)))
87 / 2);
88 else
89 {
90 ptrdiff_t newsize = size / 3;
91 ptrdiff_t start2 = start + newsize;
92 EMACS_INT i1 = approximate_median (log, start, newsize);
93 EMACS_INT i2 = approximate_median (log, start2, newsize);
94 EMACS_INT i3 = approximate_median (log, start2 + newsize,
95 size - 2 * newsize);
96 return (i1 < i2
97 ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1))
98 : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2)));
99 }
100 }
101
102 static void evict_lower_half (log_t *log)
103 {
104 ptrdiff_t size = ASIZE (log->key_and_value) / 2;
105 EMACS_INT median = approximate_median (log, 0, size);
106 ptrdiff_t i;
107
108 for (i = 0; i < size; i++)
109 /* Evict not only values smaller but also values equal to the median,
110 so as to make sure we evict something no matter what. */
111 if (XINT (HASH_VALUE (log, i)) <= median)
112 {
113 Lisp_Object key = HASH_KEY (log, i);
114 { /* FIXME: we could make this more efficient. */
115 Lisp_Object tmp;
116 XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */
117 Fremhash (key, tmp);
118 }
119 eassert (EQ (log->next_free, make_number (i)));
120 {
121 int j;
122 eassert (VECTORP (key));
123 for (j = 0; j < ASIZE (key); j++)
124 ASET (key, j, Qnil);
125 }
126 set_hash_key_slot (log, i, key);
127 }
128 }
129
130 /* Record the current backtrace in LOG. COUNT is the weight of this
131 current backtrace: milliseconds for CPU counts, and the allocation
132 size for memory logs. */
133
134 static void
135 record_backtrace (log_t *log, EMACS_INT count)
136 {
137 struct backtrace *backlist = backtrace_list;
138 Lisp_Object backtrace;
139 ptrdiff_t index, i = 0;
140 ptrdiff_t asize;
141
142 if (!INTEGERP (log->next_free))
143 /* FIXME: transfer the evicted counts to a special entry rather
144 than dropping them on the floor. */
145 evict_lower_half (log);
146 index = XINT (log->next_free);
147
148 /* Get a "working memory" vector. */
149 backtrace = HASH_KEY (log, index);
150 asize = ASIZE (backtrace);
151
152 /* Copy the backtrace contents into working memory. */
153 for (; i < asize && backlist; i++, backlist = backlist->next)
154 /* FIXME: For closures we should ignore the environment. */
155 ASET (backtrace, i, backlist->function);
156
157 /* Make sure that unused space of working memory is filled with nil. */
158 for (; i < asize; i++)
159 ASET (backtrace, i, Qnil);
160
161 { /* We basically do a `gethash+puthash' here, except that we have to be
162 careful to avoid memory allocation since we're in a signal
163 handler, and we optimize the code to try and avoid computing the
164 hash+lookup twice. See fns.c:Fputhash for reference. */
165 EMACS_UINT hash;
166 ptrdiff_t j = hash_lookup (log, backtrace, &hash);
167 if (j >= 0)
168 {
169 EMACS_INT old_val = XINT (HASH_VALUE (log, j));
170 EMACS_INT new_val = saturated_add (old_val, count);
171 set_hash_value_slot (log, j, make_number (new_val));
172 }
173 else
174 { /* BEWARE! hash_put in general can allocate memory.
175 But currently it only does that if log->next_free is nil. */
176 int j;
177 eassert (!NILP (log->next_free));
178 j = hash_put (log, backtrace, make_number (count), hash);
179 /* Let's make sure we've put `backtrace' right where it
180 already was to start with. */
181 eassert (index == j);
182
183 /* FIXME: If the hash-table is almost full, we should set
184 some global flag so that some Elisp code can offload its
185 data elsewhere, so as to avoid the eviction code.
186 There are 2 ways to do that, AFAICT:
187 - Set a flag checked in QUIT, such that QUIT can then call
188 Fprofiler_cpu_log and stash the full log for later use.
189 - Set a flag check in post-gc-hook, so that Elisp code can call
190 profiler-cpu-log. That gives us more flexibility since that
191 Elisp code can then do all kinds of fun stuff like write
192 the log to disk. Or turn it right away into a call tree.
193 Of course, using Elisp is generally preferable, but it may
194 take longer until we get a chance to run the Elisp code, so
195 there's more risk that the table will get full before we
196 get there. */
197 }
198 }
199 }
200 \f
201 /* Sampling profiler. */
202
203 #ifdef PROFILER_CPU_SUPPORT
204
205 /* The profiler timer and whether it was properly initialized, if
206 POSIX timers are available. */
207 #ifdef HAVE_TIMER_SETTIME
208 static timer_t profiler_timer;
209 static bool profiler_timer_ok;
210 #endif
211
212 /* Status of sampling profiler. */
213 static enum profiler_cpu_running
214 { NOT_RUNNING, TIMER_SETTIME_RUNNING, SETITIMER_RUNNING }
215 profiler_cpu_running;
216
217 /* Hash-table log of CPU profiler. */
218 static Lisp_Object cpu_log;
219
220 /* Separate counter for the time spent in the GC. */
221 static EMACS_INT cpu_gc_count;
222
223 /* The current sampling interval in milliseconds. */
224 static EMACS_INT current_sampling_interval;
225
226 /* Signal handler for sampling profiler. */
227
228 static void
229 handle_profiler_signal (int signal)
230 {
231 if (backtrace_list && EQ (backtrace_list->function, Qautomatic_gc))
232 /* Special case the time-count inside GC because the hash-table
233 code is not prepared to be used while the GC is running.
234 More specifically it uses ASIZE at many places where it does
235 not expect the ARRAY_MARK_FLAG to be set. We could try and
236 harden the hash-table code, but it doesn't seem worth the
237 effort. */
238 cpu_gc_count = saturated_add (cpu_gc_count, current_sampling_interval);
239 else
240 {
241 eassert (HASH_TABLE_P (cpu_log));
242 record_backtrace (XHASH_TABLE (cpu_log), current_sampling_interval);
243 }
244 }
245
246 static void
247 deliver_profiler_signal (int signal)
248 {
249 deliver_process_signal (signal, handle_profiler_signal);
250 }
251
252 static enum profiler_cpu_running
253 setup_cpu_timer (Lisp_Object sampling_interval)
254 {
255 struct sigaction action;
256 struct itimerval timer;
257 struct timespec interval;
258
259 if (! RANGED_INTEGERP (1, sampling_interval,
260 (TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / 1000
261 ? (EMACS_INT) TYPE_MAXIMUM (time_t) * 1000 + 999
262 : EMACS_INT_MAX)))
263 return NOT_RUNNING;
264
265 current_sampling_interval = XINT (sampling_interval);
266 interval = make_emacs_time (current_sampling_interval / 1000,
267 current_sampling_interval % 1000 * 1000000);
268 emacs_sigaction_init (&action, deliver_profiler_signal);
269 sigaction (SIGPROF, &action, 0);
270
271 #ifdef HAVE_TIMER_SETTIME
272 if (! profiler_timer_ok)
273 {
274 /* System clocks to try, in decreasing order of desirability. */
275 static clockid_t const system_clock[] = {
276 #ifdef CLOCK_THREAD_CPUTIME_ID
277 CLOCK_THREAD_CPUTIME_ID,
278 #endif
279 #ifdef CLOCK_PROCESS_CPUTIME_ID
280 CLOCK_PROCESS_CPUTIME_ID,
281 #endif
282 #ifdef CLOCK_MONOTONIC
283 CLOCK_MONOTONIC,
284 #endif
285 CLOCK_REALTIME
286 };
287 int i;
288 struct sigevent sigev;
289 sigev.sigev_value.sival_ptr = &profiler_timer;
290 sigev.sigev_signo = SIGPROF;
291 sigev.sigev_notify = SIGEV_SIGNAL;
292
293 for (i = 0; i < sizeof system_clock / sizeof *system_clock; i++)
294 if (timer_create (system_clock[i], &sigev, &profiler_timer) == 0)
295 {
296 profiler_timer_ok = 1;
297 break;
298 }
299 }
300
301 if (profiler_timer_ok)
302 {
303 struct itimerspec ispec;
304 ispec.it_value = ispec.it_interval = interval;
305 timer_settime (profiler_timer, 0, &ispec, 0);
306 return TIMER_SETTIME_RUNNING;
307 }
308 #endif
309
310 timer.it_value = timer.it_interval = make_timeval (interval);
311 setitimer (ITIMER_PROF, &timer, 0);
312 return SETITIMER_RUNNING;
313 }
314
315 DEFUN ("profiler-cpu-start", Fprofiler_cpu_start, Sprofiler_cpu_start,
316 1, 1, 0,
317 doc: /* Start or restart the cpu profiler.
318 It takes call-stack samples each SAMPLING-INTERVAL milliseconds.
319 See also `profiler-log-size' and `profiler-max-stack-depth'. */)
320 (Lisp_Object sampling_interval)
321 {
322 if (profiler_cpu_running)
323 error ("CPU profiler is already running");
324
325 if (NILP (cpu_log))
326 {
327 cpu_gc_count = 0;
328 cpu_log = make_log (profiler_log_size,
329 profiler_max_stack_depth);
330 }
331
332 profiler_cpu_running = setup_cpu_timer (sampling_interval);
333 if (! profiler_cpu_running)
334 error ("Invalid sampling interval");
335
336 return Qt;
337 }
338
339 DEFUN ("profiler-cpu-stop", Fprofiler_cpu_stop, Sprofiler_cpu_stop,
340 0, 0, 0,
341 doc: /* Stop the cpu profiler. The profiler log is not affected.
342 Return non-nil if the profiler was running. */)
343 (void)
344 {
345 switch (profiler_cpu_running)
346 {
347 case NOT_RUNNING:
348 return Qnil;
349
350 #ifdef HAVE_TIMER_SETTIME
351 case TIMER_SETTIME_RUNNING:
352 {
353 struct itimerspec disable;
354 memset (&disable, 0, sizeof disable);
355 timer_settime (profiler_timer, 0, &disable, 0);
356 }
357 break;
358 #endif
359
360 case SETITIMER_RUNNING:
361 {
362 struct itimerval disable;
363 memset (&disable, 0, sizeof disable);
364 setitimer (ITIMER_PROF, &disable, 0);
365 }
366 break;
367 }
368
369 signal (SIGPROF, SIG_IGN);
370 profiler_cpu_running = NOT_RUNNING;
371 return Qt;
372 }
373
374 DEFUN ("profiler-cpu-running-p",
375 Fprofiler_cpu_running_p, Sprofiler_cpu_running_p,
376 0, 0, 0,
377 doc: /* Return non-nil iff cpu profiler is running. */)
378 (void)
379 {
380 return profiler_cpu_running ? Qt : Qnil;
381 }
382
383 DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
384 0, 0, 0,
385 doc: /* Return the current cpu profiler log.
386 The log is a hash-table mapping backtraces to counters which represent
387 the amount of time spent at those points. Every backtrace is a vector
388 of functions, where the last few elements may be nil.
389 Before returning, a new log is allocated for future samples. */)
390 (void)
391 {
392 Lisp_Object result = cpu_log;
393 /* Here we're making the log visible to Elisp, so it's not safe any
394 more for our use afterwards since we can't rely on its special
395 pre-allocated keys anymore. So we have to allocate a new one. */
396 cpu_log = (profiler_cpu_running
397 ? make_log (profiler_log_size, profiler_max_stack_depth)
398 : Qnil);
399 Fputhash (Fmake_vector (make_number (1), Qautomatic_gc),
400 make_number (cpu_gc_count),
401 result);
402 cpu_gc_count = 0;
403 return result;
404 }
405 #endif /* PROFILER_CPU_SUPPORT */
406 \f
407 /* Memory profiler. */
408
409 /* True if memory profiler is running. */
410 bool profiler_memory_running;
411
412 static Lisp_Object memory_log;
413
414 DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
415 0, 0, 0,
416 doc: /* Start/restart the memory profiler.
417 The memory profiler will take samples of the call-stack whenever a new
418 allocation takes place. Note that most small allocations only trigger
419 the profiler occasionally.
420 See also `profiler-log-size' and `profiler-max-stack-depth'. */)
421 (void)
422 {
423 if (profiler_memory_running)
424 error ("Memory profiler is already running");
425
426 if (NILP (memory_log))
427 memory_log = make_log (profiler_log_size,
428 profiler_max_stack_depth);
429
430 profiler_memory_running = true;
431
432 return Qt;
433 }
434
435 DEFUN ("profiler-memory-stop",
436 Fprofiler_memory_stop, Sprofiler_memory_stop,
437 0, 0, 0,
438 doc: /* Stop the memory profiler. The profiler log is not affected.
439 Return non-nil if the profiler was running. */)
440 (void)
441 {
442 if (!profiler_memory_running)
443 return Qnil;
444 profiler_memory_running = false;
445 return Qt;
446 }
447
448 DEFUN ("profiler-memory-running-p",
449 Fprofiler_memory_running_p, Sprofiler_memory_running_p,
450 0, 0, 0,
451 doc: /* Return non-nil if memory profiler is running. */)
452 (void)
453 {
454 return profiler_memory_running ? Qt : Qnil;
455 }
456
457 DEFUN ("profiler-memory-log",
458 Fprofiler_memory_log, Sprofiler_memory_log,
459 0, 0, 0,
460 doc: /* Return the current memory profiler log.
461 The log is a hash-table mapping backtraces to counters which represent
462 the amount of memory allocated at those points. Every backtrace is a vector
463 of functions, where the last few elements may be nil.
464 Before returning, a new log is allocated for future samples. */)
465 (void)
466 {
467 Lisp_Object result = memory_log;
468 /* Here we're making the log visible to Elisp , so it's not safe any
469 more for our use afterwards since we can't rely on its special
470 pre-allocated keys anymore. So we have to allocate a new one. */
471 memory_log = (profiler_memory_running
472 ? make_log (profiler_log_size, profiler_max_stack_depth)
473 : Qnil);
474 return result;
475 }
476
477 \f
478 /* Signals and probes. */
479
480 /* Record that the current backtrace allocated SIZE bytes. */
481 void
482 malloc_probe (size_t size)
483 {
484 eassert (HASH_TABLE_P (memory_log));
485 record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM));
486 }
487
488 void
489 syms_of_profiler (void)
490 {
491 DEFVAR_INT ("profiler-max-stack-depth", profiler_max_stack_depth,
492 doc: /* Number of elements from the call-stack recorded in the log. */);
493 profiler_max_stack_depth = 16;
494 DEFVAR_INT ("profiler-log-size", profiler_log_size,
495 doc: /* Number of distinct call-stacks that can be recorded in a profiler log.
496 If the log gets full, some of the least-seen call-stacks will be evicted
497 to make room for new entries. */);
498 profiler_log_size = 10000;
499
500 #ifdef PROFILER_CPU_SUPPORT
501 profiler_cpu_running = NOT_RUNNING;
502 cpu_log = Qnil;
503 staticpro (&cpu_log);
504 defsubr (&Sprofiler_cpu_start);
505 defsubr (&Sprofiler_cpu_stop);
506 defsubr (&Sprofiler_cpu_running_p);
507 defsubr (&Sprofiler_cpu_log);
508 #endif
509 profiler_memory_running = false;
510 memory_log = Qnil;
511 staticpro (&memory_log);
512 defsubr (&Sprofiler_memory_start);
513 defsubr (&Sprofiler_memory_stop);
514 defsubr (&Sprofiler_memory_running_p);
515 defsubr (&Sprofiler_memory_log);
516 }