1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License, Version 1.0 only
   6  * (the "License").  You may not use this file except in compliance
   7  * with the License.
   8  *
   9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10  * or http://www.opensolaris.org/os/licensing.
  11  * See the License for the specific language governing permissions
  12  * and limitations under the License.
  13  *
  14  * When distributing Covered Code, include this CDDL HEADER in each
  15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16  * If applicable, add the following below this CDDL HEADER, with the
  17  * fields enclosed by brackets "[]" replaced with your own identifying
  18  * information: Portions Copyright [yyyy] [name of copyright owner]
  19  *
  20  * CDDL HEADER END
  21  */
  22 /*
  23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 #pragma ident   "%Z%%M% %I%     %E% SMI"
  28 
  29 #include <mdb/mdb_param.h>
  30 #include <mdb/mdb_modapi.h>
  31 
  32 #include <sys/fs/ufs_inode.h>
  33 #include <sys/kmem_impl.h>
  34 #include <sys/vmem_impl.h>
  35 #include <sys/modctl.h>
  36 #include <sys/kobj.h>
  37 #include <sys/kobj_impl.h>
  38 #include <vm/seg_vn.h>
  39 #include <vm/as.h>
  40 #include <vm/seg_map.h>
  41 #include <mdb/mdb_ctf.h>
  42 
  43 #include "kmem.h"
  44 #include "leaky_impl.h"
  45 
  46 /*
  47  * This file defines the genunix target for leaky.c.  There are three types
  48  * of buffers in the kernel's heap:  TYPE_VMEM, for kmem_oversize allocations,
  49  * TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
  50  * TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
  51  *
  52  * See "leaky_impl.h" for the target interface definition.
  53  */
  54 
  55 #define TYPE_VMEM       0               /* lkb_data is the vmem_seg's size */
  56 #define TYPE_CACHE      1               /* lkb_cid is the bufctl's cache */
  57 #define TYPE_KMEM       2               /* lkb_cid is the bufctl's cache */
  58 
  59 #define LKM_CTL_BUFCTL  0       /* normal allocation, PTR is bufctl */
  60 #define LKM_CTL_VMSEG   1       /* oversize allocation, PTR is vmem_seg_t */
  61 #define LKM_CTL_CACHE   2       /* normal alloc, non-debug, PTR is cache */
  62 #define LKM_CTL_MASK    3L
  63 
  64 #define LKM_CTL(ptr, type)      (LKM_CTLPTR(ptr) | (type))
  65 #define LKM_CTLPTR(ctl)         ((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
  66 #define LKM_CTLTYPE(ctl)        ((uintptr_t)(ctl) &  (LKM_CTL_MASK))
  67 
  68 static int kmem_lite_count = 0; /* cache of the kernel's version */
  69 
  70 /*ARGSUSED*/
  71 static int
  72 leaky_mtab(uintptr_t addr, const kmem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
  73 {
  74         leak_mtab_t *lm = (*lmp)++;
  75 
  76         lm->lkm_base = (uintptr_t)bcp->bc_addr;
  77         lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
  78 
  79         return (WALK_NEXT);
  80 }
  81 
  82 /*ARGSUSED*/
  83 static int
  84 leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
  85 {
  86         leak_mtab_t *lm = (*lmp)++;
  87 
  88         lm->lkm_base = addr;
  89 
  90         return (WALK_NEXT);
  91 }
  92 
  93 static int
  94 leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
  95 {
  96         leak_mtab_t *lm = (*lmp)++;
  97 
  98         lm->lkm_base = seg->vs_start;
  99         lm->lkm_limit = seg->vs_end;
 100         lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
 101 
 102         return (WALK_NEXT);
 103 }
 104 
 105 static int
 106 leaky_vmem_interested(const vmem_t *vmem)
 107 {
 108         if (strcmp(vmem->vm_name, "kmem_oversize") != 0 &&
 109             strcmp(vmem->vm_name, "static_alloc") != 0)
 110                 return (0);
 111         return (1);
 112 }
 113 
 114 static int
 115 leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
 116 {
 117         if (!leaky_vmem_interested(vmem))
 118                 return (WALK_NEXT);
 119 
 120         if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
 121                 mdb_warn("can't walk vmem_alloc for kmem_oversize (%p)", addr);
 122 
 123         return (WALK_NEXT);
 124 }
 125 
 126 /*ARGSUSED*/
 127 static int
 128 leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
 129 {
 130         if (!leaky_vmem_interested(vmem))
 131                 return (WALK_NEXT);
 132 
 133         *est += (int)(vmem->vm_kstat.vk_alloc.value.ui64 -
 134             vmem->vm_kstat.vk_free.value.ui64);
 135 
 136         return (WALK_NEXT);
 137 }
 138 
 139 static int
 140 leaky_interested(const kmem_cache_t *c)
 141 {
 142         vmem_t vmem;
 143 
 144         /*
 145          * ignore HAT-related caches that happen to derive from kmem_default
 146          */
 147         if (strcmp(c->cache_name, "sfmmu1_cache") == 0 ||
 148             strcmp(c->cache_name, "sf_hment_cache") == 0 ||
 149             strcmp(c->cache_name, "pa_hment_cache") == 0)
 150                 return (0);
 151 
 152         if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
 153                 mdb_warn("cannot read arena %p for cache '%s'",
 154                     (uintptr_t)c->cache_arena, c->cache_name);
 155                 return (0);
 156         }
 157 
 158         /*
 159          * If this cache isn't allocating from the kmem_default,
 160          * kmem_firewall, or static vmem arenas, we're not interested.
 161          */
 162         if (strcmp(vmem.vm_name, "kmem_default") != 0 &&
 163             strcmp(vmem.vm_name, "kmem_firewall") != 0 &&
 164             strcmp(vmem.vm_name, "static") != 0)
 165                 return (0);
 166 
 167         return (1);
 168 }
 169 
 170 static int
 171 leaky_estimate(uintptr_t addr, const kmem_cache_t *c, size_t *est)
 172 {
 173         if (!leaky_interested(c))
 174                 return (WALK_NEXT);
 175 
 176         *est += kmem_estimate_allocated(addr, c);
 177 
 178         return (WALK_NEXT);
 179 }
 180 
 181 /*ARGSUSED*/
 182 static int
 183 leaky_cache(uintptr_t addr, const kmem_cache_t *c, leak_mtab_t **lmp)
 184 {
 185         leak_mtab_t *lm = *lmp;
 186         mdb_walk_cb_t cb;
 187         const char *walk;
 188         int audit = (c->cache_flags & KMF_AUDIT);
 189 
 190         if (!leaky_interested(c))
 191                 return (WALK_NEXT);
 192 
 193         if (audit) {
 194                 walk = "bufctl";
 195                 cb = (mdb_walk_cb_t)leaky_mtab;
 196         } else {
 197                 walk = "kmem";
 198                 cb = (mdb_walk_cb_t)leaky_mtab_addr;
 199         }
 200         if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
 201                 mdb_warn("can't walk kmem for cache %p (%s)", addr,
 202                     c->cache_name);
 203                 return (WALK_DONE);
 204         }
 205 
 206         for (; lm < *lmp; lm++) {
 207                 lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
 208                 if (!audit)
 209                         lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
 210         }
 211 
 212         return (WALK_NEXT);
 213 }
 214 
 215 /*ARGSUSED*/
 216 static int
 217 leaky_scan_buffer(uintptr_t addr, const void *ignored, const kmem_cache_t *c)
 218 {
 219         leaky_grep(addr, c->cache_bufsize);
 220 
 221         /*
 222          * free, constructed KMF_LITE buffers keep their first uint64_t in
 223          * their buftag's redzone.
 224          */
 225         if (c->cache_flags & KMF_LITE) {
 226                 /* LINTED alignment */
 227                 kmem_buftag_t *btp = KMEM_BUFTAG(c, addr);
 228                 leaky_grep((uintptr_t)&btp->bt_redzone,
 229                     sizeof (btp->bt_redzone));
 230         }
 231 
 232         return (WALK_NEXT);
 233 }
 234 
 235 /*ARGSUSED*/
 236 static int
 237 leaky_scan_cache(uintptr_t addr, const kmem_cache_t *c, void *ignored)
 238 {
 239         if (!leaky_interested(c))
 240                 return (WALK_NEXT);
 241 
 242         /*
 243          * Scan all of the free, constructed buffers, since they may have
 244          * pointers to allocated objects.
 245          */
 246         if (mdb_pwalk("freemem_constructed",
 247             (mdb_walk_cb_t)leaky_scan_buffer, (void *)c, addr) == -1) {
 248                 mdb_warn("can't walk freemem_constructed for cache %p (%s)",
 249                     addr, c->cache_name);
 250                 return (WALK_DONE);
 251         }
 252 
 253         return (WALK_NEXT);
 254 }
 255 
 256 /*ARGSUSED*/
 257 static int
 258 leaky_modctl(uintptr_t addr, const struct modctl *m, int *ignored)
 259 {
 260         struct module mod;
 261         char name[MODMAXNAMELEN];
 262 
 263         if (m->mod_mp == NULL)
 264                 return (WALK_NEXT);
 265 
 266         if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
 267                 mdb_warn("couldn't read modctl %p's module", addr);
 268                 return (WALK_NEXT);
 269         }
 270 
 271         if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
 272                 (void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
 273 
 274         leaky_grep((uintptr_t)m->mod_mp, sizeof (struct module));
 275         leaky_grep((uintptr_t)mod.data, mod.data_size);
 276         leaky_grep((uintptr_t)mod.bss, mod.bss_size);
 277 
 278         return (WALK_NEXT);
 279 }
 280 
 281 static int
 282 leaky_thread(uintptr_t addr, const kthread_t *t, unsigned long *pagesize)
 283 {
 284         uintptr_t size, base = (uintptr_t)t->t_stkbase;
 285         uintptr_t stk = (uintptr_t)t->t_stk;
 286 
 287         /*
 288          * If this thread isn't in memory, we can't look at its stack.  This
 289          * may result in false positives, so we print a warning.
 290          */
 291         if (!(t->t_schedflag & TS_LOAD)) {
 292                 mdb_printf("findleaks: thread %p's stack swapped out; "
 293                     "false positives possible\n", addr);
 294                 return (WALK_NEXT);
 295         }
 296 
 297         if (t->t_state != TS_FREE)
 298                 leaky_grep(base, stk - base);
 299 
 300         /*
 301          * There is always gunk hanging out between t_stk and the page
 302          * boundary.  If this thread structure wasn't kmem allocated,
 303          * this will include the thread structure itself.  If the thread
 304          * _is_ kmem allocated, we'll be able to get to it via allthreads.
 305          */
 306         size = *pagesize - (stk & (*pagesize - 1));
 307 
 308         leaky_grep(stk, size);
 309 
 310         return (WALK_NEXT);
 311 }
 312 
 313 /*ARGSUSED*/
 314 static int
 315 leaky_kstat(uintptr_t addr, vmem_seg_t *seg, void *ignored)
 316 {
 317         leaky_grep(seg->vs_start, seg->vs_end - seg->vs_start);
 318 
 319         return (WALK_NEXT);
 320 }
 321 
 322 static void
 323 leaky_kludge(void)
 324 {
 325         GElf_Sym sym;
 326         mdb_ctf_id_t id, rid;
 327 
 328         int max_mem_nodes;
 329         uintptr_t *counters;
 330         size_t ncounters;
 331         ssize_t hwpm_size;
 332         int idx;
 333 
 334         /*
 335          * Because of DR, the page counters (which live in the kmem64 segment)
 336          * can point into kmem_alloc()ed memory.  The "page_counters" array
 337          * is multi-dimensional, and each entry points to an array of
 338          * "hw_page_map_t"s which is "max_mem_nodes" in length.
 339          *
 340          * To keep this from having too much grotty knowledge of internals,
 341          * we use CTF data to get the size of the structure.  For simplicity,
 342          * we treat the page_counters array as a flat array of pointers, and
 343          * use its size to determine how much to scan.  Unused entries will
 344          * be NULL.
 345          */
 346         if (mdb_lookup_by_name("page_counters", &sym) == -1) {
 347                 mdb_warn("unable to lookup page_counters");
 348                 return;
 349         }
 350 
 351         if (mdb_readvar(&max_mem_nodes, "max_mem_nodes") == -1) {
 352                 mdb_warn("unable to read max_mem_nodes");
 353                 return;
 354         }
 355 
 356         if (mdb_ctf_lookup_by_name("unix`hw_page_map_t", &id) == -1 ||
 357             mdb_ctf_type_resolve(id, &rid) == -1 ||
 358             (hwpm_size = mdb_ctf_type_size(rid)) < 0) {
 359                 mdb_warn("unable to lookup unix`hw_page_map_t");
 360                 return;
 361         }
 362 
 363         counters = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
 364 
 365         if (mdb_vread(counters, sym.st_size, (uintptr_t)sym.st_value) == -1) {
 366                 mdb_warn("unable to read page_counters");
 367                 return;
 368         }
 369 
 370         ncounters = sym.st_size / sizeof (counters);
 371 
 372         for (idx = 0; idx < ncounters; idx++) {
 373                 uintptr_t addr = counters[idx];
 374                 if (addr != 0)
 375                         leaky_grep(addr, hwpm_size * max_mem_nodes);
 376         }
 377 }
 378 
 379 int
 380 leaky_subr_estimate(size_t *estp)
 381 {
 382         uintptr_t panicstr;
 383         int state;
 384 
 385         if ((state = mdb_get_state()) == MDB_STATE_RUNNING) {
 386                 mdb_warn("findleaks: can only be run on a system "
 387                     "dump or under kmdb; see dumpadm(1M)\n");
 388                 return (DCMD_ERR);
 389         }
 390 
 391         if (mdb_readvar(&panicstr, "panicstr") == -1) {
 392                 mdb_warn("can't read variable 'panicstr'");
 393                 return (DCMD_ERR);
 394         }
 395 
 396         if (state != MDB_STATE_STOPPED && panicstr == NULL) {
 397                 mdb_warn("findleaks: cannot be run on a live dump.\n");
 398                 return (DCMD_ERR);
 399         }
 400 
 401         if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
 402                 mdb_warn("couldn't walk 'kmem_cache'");
 403                 return (DCMD_ERR);
 404         }
 405 
 406         if (*estp == 0) {
 407                 mdb_warn("findleaks: no buffers found\n");
 408                 return (DCMD_ERR);
 409         }
 410 
 411         if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
 412                 mdb_warn("couldn't walk 'vmem'");
 413                 return (DCMD_ERR);
 414         }
 415 
 416         return (DCMD_OK);
 417 }
 418 
 419 int
 420 leaky_subr_fill(leak_mtab_t **lmpp)
 421 {
 422         if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
 423                 mdb_warn("couldn't walk 'vmem'");
 424                 return (DCMD_ERR);
 425         }
 426 
 427         if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
 428                 mdb_warn("couldn't walk 'kmem_cache'");
 429                 return (DCMD_ERR);
 430         }
 431 
 432         if (mdb_readvar(&kmem_lite_count, "kmem_lite_count") == -1) {
 433                 mdb_warn("couldn't read 'kmem_lite_count'");
 434                 kmem_lite_count = 0;
 435         } else if (kmem_lite_count > 16) {
 436                 mdb_warn("kmem_lite_count nonsensical, ignored\n");
 437                 kmem_lite_count = 0;
 438         }
 439 
 440         return (DCMD_OK);
 441 }
 442 
 443 int
 444 leaky_subr_run(void)
 445 {
 446         unsigned long ps = PAGESIZE;
 447         uintptr_t kstat_arena;
 448         uintptr_t dmods;
 449 
 450         leaky_kludge();
 451 
 452         if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_scan_cache,
 453             NULL) == -1) {
 454                 mdb_warn("couldn't walk 'kmem_cache'");
 455                 return (DCMD_ERR);
 456         }
 457 
 458         if (mdb_walk("modctl", (mdb_walk_cb_t)leaky_modctl, NULL) == -1) {
 459                 mdb_warn("couldn't walk 'modctl'");
 460                 return (DCMD_ERR);
 461         }
 462 
 463         /*
 464          * If kmdb is loaded, we need to walk it's module list, since kmdb
 465          * modctl structures can reference kmem allocations.
 466          */
 467         if ((mdb_readvar(&dmods, "kdi_dmods") != -1) && (dmods != NULL))
 468                 (void) mdb_pwalk("modctl", (mdb_walk_cb_t)leaky_modctl,
 469                     NULL, dmods);
 470 
 471         if (mdb_walk("thread", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
 472                 mdb_warn("couldn't walk 'thread'");
 473                 return (DCMD_ERR);
 474         }
 475 
 476         if (mdb_walk("deathrow", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
 477                 mdb_warn("couldn't walk 'deathrow'");
 478                 return (DCMD_ERR);
 479         }
 480 
 481         if (mdb_readvar(&kstat_arena, "kstat_arena") == -1) {
 482                 mdb_warn("couldn't read 'kstat_arena'");
 483                 return (DCMD_ERR);
 484         }
 485 
 486         if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_kstat,
 487             NULL, kstat_arena) == -1) {
 488                 mdb_warn("couldn't walk kstat vmem arena");
 489                 return (DCMD_ERR);
 490         }
 491 
 492         return (DCMD_OK);
 493 }
 494 
 495 void
 496 leaky_subr_add_leak(leak_mtab_t *lmp)
 497 {
 498         uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
 499         size_t depth;
 500 
 501         switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
 502         case LKM_CTL_VMSEG: {
 503                 vmem_seg_t vs;
 504 
 505                 if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
 506                         mdb_warn("couldn't read leaked vmem_seg at addr %p",
 507                             addr);
 508                         return;
 509                 }
 510                 depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
 511 
 512                 leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
 513                     vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
 514                 break;
 515         }
 516         case LKM_CTL_BUFCTL: {
 517                 kmem_bufctl_audit_t bc;
 518 
 519                 if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
 520                         mdb_warn("couldn't read leaked bufctl at addr %p",
 521                             addr);
 522                         return;
 523                 }
 524 
 525                 depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
 526 
 527                 /*
 528                  * The top of the stack will be kmem_cache_alloc+offset.
 529                  * Since the offset in kmem_cache_alloc() isn't interesting
 530                  * we skip that frame for the purposes of uniquifying stacks.
 531                  *
 532                  * We also use the cache pointer as the leaks's cid, to
 533                  * prevent the coalescing of leaks from different caches.
 534                  */
 535                 if (depth > 0)
 536                         depth--;
 537                 leaky_add_leak(TYPE_KMEM, addr, (uintptr_t)bc.bc_addr,
 538                     bc.bc_timestamp, bc.bc_stack + 1, depth,
 539                     (uintptr_t)bc.bc_cache, 0);
 540                 break;
 541         }
 542         case LKM_CTL_CACHE: {
 543                 kmem_cache_t cache;
 544                 kmem_buftag_lite_t bt;
 545                 pc_t caller;
 546                 int depth = 0;
 547 
 548                 /*
 549                  * For KMF_LITE caches, we can get the allocation PC
 550                  * out of the buftag structure.
 551                  */
 552                 if (mdb_vread(&cache, sizeof (cache), addr) != -1 &&
 553                     (cache.cache_flags & KMF_LITE) &&
 554                     kmem_lite_count > 0 &&
 555                     mdb_vread(&bt, sizeof (bt),
 556                     /* LINTED alignment */
 557                     (uintptr_t)KMEM_BUFTAG(&cache, lmp->lkm_base)) != -1) {
 558                         caller = bt.bt_history[0];
 559                         depth = 1;
 560                 }
 561                 leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
 562                     &caller, depth, addr, addr);
 563                 break;
 564         }
 565         default:
 566                 mdb_warn("internal error: invalid leak_bufctl_t\n");
 567                 break;
 568         }
 569 }
 570 
 571 static void
 572 leaky_subr_caller(const pc_t *stack, uint_t depth, char *buf, uintptr_t *pcp)
 573 {
 574         int i;
 575         GElf_Sym sym;
 576         uintptr_t pc = 0;
 577 
 578         buf[0] = 0;
 579 
 580         for (i = 0; i < depth; i++) {
 581                 pc = stack[i];
 582 
 583                 if (mdb_lookup_by_addr(pc,
 584                     MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
 585                         continue;
 586                 if (strncmp(buf, "kmem_", 5) == 0)
 587                         continue;
 588                 if (strncmp(buf, "vmem_", 5) == 0)
 589                         continue;
 590                 *pcp = pc;
 591 
 592                 return;
 593         }
 594 
 595         /*
 596          * We're only here if the entire call chain begins with "kmem_";
 597          * this shouldn't happen, but we'll just use the last caller.
 598          */
 599         *pcp = pc;
 600 }
 601 
 602 int
 603 leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
 604 {
 605         char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
 606         uintptr_t lcaller, rcaller;
 607         int rval;
 608 
 609         leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
 610         leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
 611 
 612         if (rval = strcmp(lbuf, rbuf))
 613                 return (rval);
 614 
 615         if (lcaller < rcaller)
 616                 return (-1);
 617 
 618         if (lcaller > rcaller)
 619                 return (1);
 620 
 621         if (lhs->lkb_data < rhs->lkb_data)
 622                 return (-1);
 623 
 624         if (lhs->lkb_data > rhs->lkb_data)
 625                 return (1);
 626 
 627         return (0);
 628 }
 629 
 630 /*
 631  * Global state variables used by the leaky_subr_dump_* routines.  Note that
 632  * they are carefully cleared before use.
 633  */
 634 static int lk_vmem_seen;
 635 static int lk_cache_seen;
 636 static int lk_kmem_seen;
 637 static size_t lk_ttl;
 638 static size_t lk_bytes;
 639 
 640 void
 641 leaky_subr_dump_start(int type)
 642 {
 643         switch (type) {
 644         case TYPE_VMEM:
 645                 lk_vmem_seen = 0;
 646                 break;
 647         case TYPE_CACHE:
 648                 lk_cache_seen = 0;
 649                 break;
 650         case TYPE_KMEM:
 651                 lk_kmem_seen = 0;
 652                 break;
 653         default:
 654                 break;
 655         }
 656 
 657         lk_ttl = 0;
 658         lk_bytes = 0;
 659 }
 660 
 661 void
 662 leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
 663 {
 664         const leak_bufctl_t *cur;
 665         kmem_cache_t cache;
 666         size_t min, max, size;
 667         char sz[30];
 668         char c[MDB_SYM_NAMLEN];
 669         uintptr_t caller;
 670 
 671         if (verbose) {
 672                 lk_ttl = 0;
 673                 lk_bytes = 0;
 674         }
 675 
 676         switch (lkb->lkb_type) {
 677         case TYPE_VMEM:
 678                 if (!verbose && !lk_vmem_seen) {
 679                         lk_vmem_seen = 1;
 680                         mdb_printf("%-16s %7s %?s %s\n",
 681                             "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
 682                 }
 683 
 684                 min = max = lkb->lkb_data;
 685 
 686                 for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
 687                         size = cur->lkb_data;
 688 
 689                         if (size < min)
 690                                 min = size;
 691                         if (size > max)
 692                                 max = size;
 693 
 694                         lk_ttl++;
 695                         lk_bytes += size;
 696                 }
 697 
 698                 if (min == max)
 699                         (void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
 700                 else
 701                         (void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
 702                             min, max);
 703 
 704                 if (!verbose) {
 705                         leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
 706                             c, &caller);
 707 
 708                         if (caller != 0) {
 709                                 (void) mdb_snprintf(c, sizeof (c),
 710                                     "%a", caller);
 711                         } else {
 712                                 (void) mdb_snprintf(c, sizeof (c),
 713                                     "%s", "?");
 714                         }
 715                         mdb_printf("%-16s %7d %?p %s\n", sz, lkb->lkb_dups + 1,
 716                             lkb->lkb_addr, c);
 717                 } else {
 718                         mdb_arg_t v;
 719 
 720                         if (lk_ttl == 1)
 721                                 mdb_printf("kmem_oversize leak: 1 vmem_seg, "
 722                                     "%ld bytes\n", lk_bytes);
 723                         else
 724                                 mdb_printf("kmem_oversize leak: %d vmem_segs, "
 725                                     "%s bytes each, %ld bytes total\n",
 726                                     lk_ttl, sz, lk_bytes);
 727 
 728                         v.a_type = MDB_TYPE_STRING;
 729                         v.a_un.a_str = "-v";
 730 
 731                         if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
 732                             DCMD_ADDRSPEC, 1, &v) == -1) {
 733                                 mdb_warn("'%p::vmem_seg -v' failed",
 734                                     lkb->lkb_addr);
 735                         }
 736                 }
 737                 return;
 738 
 739         case TYPE_CACHE:
 740                 if (!verbose && !lk_cache_seen) {
 741                         lk_cache_seen = 1;
 742                         if (lk_vmem_seen)
 743                                 mdb_printf("\n");
 744                         mdb_printf("%-?s %7s %?s %s\n",
 745                             "CACHE", "LEAKED", "BUFFER", "CALLER");
 746                 }
 747 
 748                 if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
 749                         /*
 750                          * This _really_ shouldn't happen; we shouldn't
 751                          * have been able to get this far if this
 752                          * cache wasn't readable.
 753                          */
 754                         mdb_warn("can't read cache %p for leaked "
 755                             "buffer %p", lkb->lkb_data, lkb->lkb_addr);
 756                         return;
 757                 }
 758 
 759                 lk_ttl += lkb->lkb_dups + 1;
 760                 lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
 761 
 762                 caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
 763                 if (caller != 0) {
 764                         (void) mdb_snprintf(c, sizeof (c), "%a", caller);
 765                 } else {
 766                         (void) mdb_snprintf(c, sizeof (c),
 767                             "%s", (verbose) ? "" : "?");
 768                 }
 769 
 770                 if (!verbose) {
 771                         mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
 772                             lkb->lkb_dups + 1, lkb->lkb_addr, c);
 773                 } else {
 774                         if (lk_ttl == 1)
 775                                 mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
 776                                     cache.cache_name, lk_bytes);
 777                         else
 778                                 mdb_printf("%s leak: %d buffers, "
 779                                     "%ld bytes each, %ld bytes total,\n",
 780                                     cache.cache_name, lk_ttl,
 781                                     cache.cache_bufsize, lk_bytes);
 782 
 783                         mdb_printf("    sample addr %p%s%s\n",
 784                             lkb->lkb_addr, (caller == 0) ? "" : ", caller ", c);
 785                 }
 786                 return;
 787 
 788         case TYPE_KMEM:
 789                 if (!verbose && !lk_kmem_seen) {
 790                         lk_kmem_seen = 1;
 791                         if (lk_vmem_seen || lk_cache_seen)
 792                                 mdb_printf("\n");
 793                         mdb_printf("%-?s %7s %?s %s\n",
 794                             "CACHE", "LEAKED", "BUFCTL", "CALLER");
 795                 }
 796 
 797                 if (mdb_vread(&cache, sizeof (cache), lkb->lkb_cid) == -1) {
 798                         /*
 799                          * This _really_ shouldn't happen; we shouldn't
 800                          * have been able to get this far if this
 801                          * cache wasn't readable.
 802                          */
 803                         mdb_warn("can't read cache %p for leaked "
 804                             "bufctl %p", lkb->lkb_cid, lkb->lkb_addr);
 805                         return;
 806                 }
 807 
 808                 lk_ttl += lkb->lkb_dups + 1;
 809                 lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
 810 
 811                 if (!verbose) {
 812                         leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
 813                             c, &caller);
 814 
 815                         if (caller != 0) {
 816                                 (void) mdb_snprintf(c, sizeof (c),
 817                                     "%a", caller);
 818                         } else {
 819                                 (void) mdb_snprintf(c, sizeof (c),
 820                                     "%s", "?");
 821                         }
 822                         mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
 823                             lkb->lkb_dups + 1, lkb->lkb_addr, c);
 824                 } else {
 825                         mdb_arg_t v;
 826 
 827                         if (lk_ttl == 1)
 828                                 mdb_printf("%s leak: 1 buffer, %ld bytes\n",
 829                                     cache.cache_name, lk_bytes);
 830                         else
 831                                 mdb_printf("%s leak: %d buffers, "
 832                                     "%ld bytes each, %ld bytes total\n",
 833                                     cache.cache_name, lk_ttl,
 834                                     cache.cache_bufsize, lk_bytes);
 835 
 836                         v.a_type = MDB_TYPE_STRING;
 837                         v.a_un.a_str = "-v";
 838 
 839                         if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
 840                             DCMD_ADDRSPEC, 1, &v) == -1) {
 841                                 mdb_warn("'%p::bufctl -v' failed",
 842                                     lkb->lkb_addr);
 843                         }
 844                 }
 845                 return;
 846 
 847         default:
 848                 return;
 849         }
 850 }
 851 
 852 void
 853 leaky_subr_dump_end(int type)
 854 {
 855         int i;
 856         int width;
 857         const char *leaks;
 858 
 859         switch (type) {
 860         case TYPE_VMEM:
 861                 if (!lk_vmem_seen)
 862                         return;
 863 
 864                 width = 16;
 865                 leaks = "kmem_oversize leak";
 866                 break;
 867 
 868         case TYPE_CACHE:
 869                 if (!lk_cache_seen)
 870                         return;
 871 
 872                 width = sizeof (uintptr_t) * 2;
 873                 leaks = "buffer";
 874                 break;
 875 
 876         case TYPE_KMEM:
 877                 if (!lk_kmem_seen)
 878                         return;
 879 
 880                 width = sizeof (uintptr_t) * 2;
 881                 leaks = "buffer";
 882                 break;
 883 
 884         default:
 885                 return;
 886         }
 887 
 888         for (i = 0; i < 72; i++)
 889                 mdb_printf("-");
 890         mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
 891             width, "Total", lk_ttl, leaks, (lk_ttl == 1) ? "" : "s",
 892             lk_bytes, (lk_bytes == 1) ? "" : "s");
 893 }
 894 
 895 int
 896 leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
 897     void *cbdata)
 898 {
 899         kmem_bufctl_audit_t bc;
 900         vmem_seg_t vs;
 901 
 902         switch (lkb->lkb_type) {
 903         case TYPE_VMEM:
 904                 if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
 905                         mdb_warn("unable to read vmem_seg at %p",
 906                             lkb->lkb_addr);
 907                         return (WALK_NEXT);
 908                 }
 909                 return (cb(lkb->lkb_addr, &vs, cbdata));
 910 
 911         case TYPE_CACHE:
 912                 return (cb(lkb->lkb_addr, NULL, cbdata));
 913 
 914         case TYPE_KMEM:
 915                 if (mdb_vread(&bc, sizeof (bc), lkb->lkb_addr) == -1) {
 916                         mdb_warn("unable to read bufctl at %p",
 917                             lkb->lkb_addr);
 918                         return (WALK_NEXT);
 919                 }
 920                 return (cb(lkb->lkb_addr, &bc, cbdata));
 921         default:
 922                 return (WALK_NEXT);
 923         }
 924 }