1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License, Version 1.0 only
   6  * (the "License").  You may not use this file except in compliance
   7  * with the License.
   8  *
   9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10  * or http://www.opensolaris.org/os/licensing.
  11  * See the License for the specific language governing permissions
  12  * and limitations under the License.
  13  *
  14  * When distributing Covered Code, include this CDDL HEADER in each
  15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16  * If applicable, add the following below this CDDL HEADER, with the
  17  * fields enclosed by brackets "[]" replaced with your own identifying
  18  * information: Portions Copyright [yyyy] [name of copyright owner]
  19  *
  20  * CDDL HEADER END
  21  */
  22 /*
  23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 #include <mdb/mdb_param.h>
  28 #include <mdb/mdb_modapi.h>
  29 
  30 #include <sys/fs/ufs_inode.h>
  31 #include <sys/kmem_impl.h>
  32 #include <sys/vmem_impl.h>
  33 #include <sys/modctl.h>
  34 #include <sys/kobj.h>
  35 #include <sys/kobj_impl.h>
  36 #include <vm/seg_vn.h>
  37 #include <vm/as.h>
  38 #include <vm/seg_map.h>
  39 #include <mdb/mdb_ctf.h>
  40 
  41 #include "kmem.h"
  42 #include "leaky_impl.h"
  43 
  44 /*
  45  * This file defines the genunix target for leaky.c.  There are three types
  46  * of buffers in the kernel's heap:  TYPE_VMEM, for kmem_oversize allocations,
  47  * TYPE_KMEM, for kmem_cache_alloc() allocations bufctl_audit_ts, and
  48  * TYPE_CACHE, for kmem_cache_alloc() allocation without bufctl_audit_ts.
  49  *
  50  * See "leaky_impl.h" for the target interface definition.
  51  */
  52 
  53 #define TYPE_VMEM       0               /* lkb_data is the vmem_seg's size */
  54 #define TYPE_CACHE      1               /* lkb_cid is the bufctl's cache */
  55 #define TYPE_KMEM       2               /* lkb_cid is the bufctl's cache */
  56 
  57 #define LKM_CTL_BUFCTL  0       /* normal allocation, PTR is bufctl */
  58 #define LKM_CTL_VMSEG   1       /* oversize allocation, PTR is vmem_seg_t */
  59 #define LKM_CTL_CACHE   2       /* normal alloc, non-debug, PTR is cache */
  60 #define LKM_CTL_MASK    3L
  61 
  62 #define LKM_CTL(ptr, type)      (LKM_CTLPTR(ptr) | (type))
  63 #define LKM_CTLPTR(ctl)         ((uintptr_t)(ctl) & ~(LKM_CTL_MASK))
  64 #define LKM_CTLTYPE(ctl)        ((uintptr_t)(ctl) &  (LKM_CTL_MASK))
  65 
  66 static int kmem_lite_count = 0; /* cache of the kernel's version */
  67 
  68 /*ARGSUSED*/
  69 static int
  70 leaky_mtab(uintptr_t addr, const kmem_bufctl_audit_t *bcp, leak_mtab_t **lmp)
  71 {
  72         leak_mtab_t *lm = (*lmp)++;
  73 
  74         lm->lkm_base = (uintptr_t)bcp->bc_addr;
  75         lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_BUFCTL);
  76 
  77         return (WALK_NEXT);
  78 }
  79 
  80 /*ARGSUSED*/
  81 static int
  82 leaky_mtab_addr(uintptr_t addr, void *ignored, leak_mtab_t **lmp)
  83 {
  84         leak_mtab_t *lm = (*lmp)++;
  85 
  86         lm->lkm_base = addr;
  87 
  88         return (WALK_NEXT);
  89 }
  90 
  91 static int
  92 leaky_seg(uintptr_t addr, const vmem_seg_t *seg, leak_mtab_t **lmp)
  93 {
  94         leak_mtab_t *lm = (*lmp)++;
  95 
  96         lm->lkm_base = seg->vs_start;
  97         lm->lkm_limit = seg->vs_end;
  98         lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_VMSEG);
  99 
 100         return (WALK_NEXT);
 101 }
 102 
 103 static int
 104 leaky_vmem_interested(const vmem_t *vmem)
 105 {
 106         if (strcmp(vmem->vm_name, "kmem_oversize") != 0 &&
 107             strcmp(vmem->vm_name, "static_alloc") != 0)
 108                 return (0);
 109         return (1);
 110 }
 111 
 112 static int
 113 leaky_vmem(uintptr_t addr, const vmem_t *vmem, leak_mtab_t **lmp)
 114 {
 115         if (!leaky_vmem_interested(vmem))
 116                 return (WALK_NEXT);
 117 
 118         if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_seg, lmp, addr) == -1)
 119                 mdb_warn("can't walk vmem_alloc for kmem_oversize (%p)", addr);
 120 
 121         return (WALK_NEXT);
 122 }
 123 
 124 /*ARGSUSED*/
 125 static int
 126 leaky_estimate_vmem(uintptr_t addr, const vmem_t *vmem, size_t *est)
 127 {
 128         if (!leaky_vmem_interested(vmem))
 129                 return (WALK_NEXT);
 130 
 131         *est += (int)(vmem->vm_kstat.vk_alloc.value.ui64 -
 132             vmem->vm_kstat.vk_free.value.ui64);
 133 
 134         return (WALK_NEXT);
 135 }
 136 
 137 static int
 138 leaky_interested(const kmem_cache_t *c)
 139 {
 140         vmem_t vmem;
 141 
 142         /*
 143          * ignore HAT-related caches that happen to derive from kmem_default
 144          */
 145         if (strcmp(c->cache_name, "sfmmu1_cache") == 0 ||
 146             strcmp(c->cache_name, "sf_hment_cache") == 0 ||
 147             strcmp(c->cache_name, "pa_hment_cache") == 0)
 148                 return (0);
 149 
 150         if (mdb_vread(&vmem, sizeof (vmem), (uintptr_t)c->cache_arena) == -1) {
 151                 mdb_warn("cannot read arena %p for cache '%s'",
 152                     (uintptr_t)c->cache_arena, c->cache_name);
 153                 return (0);
 154         }
 155 
 156         /*
 157          * If this cache isn't allocating from the kmem_default,
 158          * kmem_firewall, or static vmem arenas, we're not interested.
 159          */
 160         if (strcmp(vmem.vm_name, "kmem_default") != 0 &&
 161             strcmp(vmem.vm_name, "kmem_firewall") != 0 &&
 162             strcmp(vmem.vm_name, "static") != 0)
 163                 return (0);
 164 
 165         return (1);
 166 }
 167 
 168 static int
 169 leaky_estimate(uintptr_t addr, const kmem_cache_t *c, size_t *est)
 170 {
 171         if (!leaky_interested(c))
 172                 return (WALK_NEXT);
 173 
 174         *est += kmem_estimate_allocated(addr, c);
 175 
 176         return (WALK_NEXT);
 177 }
 178 
 179 /*ARGSUSED*/
 180 static int
 181 leaky_cache(uintptr_t addr, const kmem_cache_t *c, leak_mtab_t **lmp)
 182 {
 183         leak_mtab_t *lm = *lmp;
 184         mdb_walk_cb_t cb;
 185         const char *walk;
 186         int audit = (c->cache_flags & KMF_AUDIT);
 187 
 188         if (!leaky_interested(c))
 189                 return (WALK_NEXT);
 190 
 191         if (audit) {
 192                 walk = "bufctl";
 193                 cb = (mdb_walk_cb_t)leaky_mtab;
 194         } else {
 195                 walk = "kmem";
 196                 cb = (mdb_walk_cb_t)leaky_mtab_addr;
 197         }
 198         if (mdb_pwalk(walk, cb, lmp, addr) == -1) {
 199                 mdb_warn("can't walk kmem for cache %p (%s)", addr,
 200                     c->cache_name);
 201                 return (WALK_DONE);
 202         }
 203 
 204         for (; lm < *lmp; lm++) {
 205                 lm->lkm_limit = lm->lkm_base + c->cache_bufsize;
 206                 if (!audit)
 207                         lm->lkm_bufctl = LKM_CTL(addr, LKM_CTL_CACHE);
 208         }
 209 
 210         return (WALK_NEXT);
 211 }
 212 
 213 /*ARGSUSED*/
 214 static int
 215 leaky_scan_buffer(uintptr_t addr, const void *ignored, const kmem_cache_t *c)
 216 {
 217         leaky_grep(addr, c->cache_bufsize);
 218 
 219         /*
 220          * free, constructed KMF_LITE buffers keep their first uint64_t in
 221          * their buftag's redzone.
 222          */
 223         if (c->cache_flags & KMF_LITE) {
 224                 /* LINTED alignment */
 225                 kmem_buftag_t *btp = KMEM_BUFTAG(c, addr);
 226                 leaky_grep((uintptr_t)&btp->bt_redzone,
 227                     sizeof (btp->bt_redzone));
 228         }
 229 
 230         return (WALK_NEXT);
 231 }
 232 
 233 /*ARGSUSED*/
 234 static int
 235 leaky_scan_cache(uintptr_t addr, const kmem_cache_t *c, void *ignored)
 236 {
 237         if (!leaky_interested(c))
 238                 return (WALK_NEXT);
 239 
 240         /*
 241          * Scan all of the free, constructed buffers, since they may have
 242          * pointers to allocated objects.
 243          */
 244         if (mdb_pwalk("freemem_constructed",
 245             (mdb_walk_cb_t)leaky_scan_buffer, (void *)c, addr) == -1) {
 246                 mdb_warn("can't walk freemem_constructed for cache %p (%s)",
 247                     addr, c->cache_name);
 248                 return (WALK_DONE);
 249         }
 250 
 251         return (WALK_NEXT);
 252 }
 253 
 254 /*ARGSUSED*/
 255 static int
 256 leaky_modctl(uintptr_t addr, const struct modctl *m, int *ignored)
 257 {
 258         struct module mod;
 259         char name[MODMAXNAMELEN];
 260 
 261         if (m->mod_mp == NULL)
 262                 return (WALK_NEXT);
 263 
 264         if (mdb_vread(&mod, sizeof (mod), (uintptr_t)m->mod_mp) == -1) {
 265                 mdb_warn("couldn't read modctl %p's module", addr);
 266                 return (WALK_NEXT);
 267         }
 268 
 269         if (mdb_readstr(name, sizeof (name), (uintptr_t)m->mod_modname) == -1)
 270                 (void) mdb_snprintf(name, sizeof (name), "0x%p", addr);
 271 
 272         leaky_grep((uintptr_t)m->mod_mp, sizeof (struct module));
 273         leaky_grep((uintptr_t)mod.data, mod.data_size);
 274         leaky_grep((uintptr_t)mod.bss, mod.bss_size);
 275 
 276         return (WALK_NEXT);
 277 }
 278 
 279 /*ARGSUSED*/
 280 static int
 281 leaky_thread(uintptr_t addr, const kthread_t *t, unsigned long *pagesize)
 282 {
 283         uintptr_t size, base = (uintptr_t)t->t_stkbase;
 284         uintptr_t stk = (uintptr_t)t->t_stk;
 285 
 286         if (t->t_state != TS_FREE)
 287                 leaky_grep(base, stk - base);
 288 
 289         /*
 290          * There is always gunk hanging out between t_stk and the page
 291          * boundary.  If this thread structure wasn't kmem allocated,
 292          * this will include the thread structure itself.  If the thread
 293          * _is_ kmem allocated, we'll be able to get to it via allthreads.
 294          */
 295         size = *pagesize - (stk & (*pagesize - 1));
 296 
 297         leaky_grep(stk, size);
 298 
 299         return (WALK_NEXT);
 300 }
 301 
 302 /*ARGSUSED*/
 303 static int
 304 leaky_kstat(uintptr_t addr, vmem_seg_t *seg, void *ignored)
 305 {
 306         leaky_grep(seg->vs_start, seg->vs_end - seg->vs_start);
 307 
 308         return (WALK_NEXT);
 309 }
 310 
 311 static void
 312 leaky_kludge(void)
 313 {
 314         GElf_Sym sym;
 315         mdb_ctf_id_t id, rid;
 316 
 317         int max_mem_nodes;
 318         uintptr_t *counters;
 319         size_t ncounters;
 320         ssize_t hwpm_size;
 321         int idx;
 322 
 323         /*
 324          * Because of DR, the page counters (which live in the kmem64 segment)
 325          * can point into kmem_alloc()ed memory.  The "page_counters" array
 326          * is multi-dimensional, and each entry points to an array of
 327          * "hw_page_map_t"s which is "max_mem_nodes" in length.
 328          *
 329          * To keep this from having too much grotty knowledge of internals,
 330          * we use CTF data to get the size of the structure.  For simplicity,
 331          * we treat the page_counters array as a flat array of pointers, and
 332          * use its size to determine how much to scan.  Unused entries will
 333          * be NULL.
 334          */
 335         if (mdb_lookup_by_name("page_counters", &sym) == -1) {
 336                 mdb_warn("unable to lookup page_counters");
 337                 return;
 338         }
 339 
 340         if (mdb_readvar(&max_mem_nodes, "max_mem_nodes") == -1) {
 341                 mdb_warn("unable to read max_mem_nodes");
 342                 return;
 343         }
 344 
 345         if (mdb_ctf_lookup_by_name("unix`hw_page_map_t", &id) == -1 ||
 346             mdb_ctf_type_resolve(id, &rid) == -1 ||
 347             (hwpm_size = mdb_ctf_type_size(rid)) < 0) {
 348                 mdb_warn("unable to lookup unix`hw_page_map_t");
 349                 return;
 350         }
 351 
 352         counters = mdb_alloc(sym.st_size, UM_SLEEP | UM_GC);
 353 
 354         if (mdb_vread(counters, sym.st_size, (uintptr_t)sym.st_value) == -1) {
 355                 mdb_warn("unable to read page_counters");
 356                 return;
 357         }
 358 
 359         ncounters = sym.st_size / sizeof (counters);
 360 
 361         for (idx = 0; idx < ncounters; idx++) {
 362                 uintptr_t addr = counters[idx];
 363                 if (addr != 0)
 364                         leaky_grep(addr, hwpm_size * max_mem_nodes);
 365         }
 366 }
 367 
 368 int
 369 leaky_subr_estimate(size_t *estp)
 370 {
 371         uintptr_t panicstr;
 372         int state;
 373 
 374         if ((state = mdb_get_state()) == MDB_STATE_RUNNING) {
 375                 mdb_warn("findleaks: can only be run on a system "
 376                     "dump or under kmdb; see dumpadm(1M)\n");
 377                 return (DCMD_ERR);
 378         }
 379 
 380         if (mdb_readvar(&panicstr, "panicstr") == -1) {
 381                 mdb_warn("can't read variable 'panicstr'");
 382                 return (DCMD_ERR);
 383         }
 384 
 385         if (state != MDB_STATE_STOPPED && panicstr == NULL) {
 386                 mdb_warn("findleaks: cannot be run on a live dump.\n");
 387                 return (DCMD_ERR);
 388         }
 389 
 390         if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_estimate, estp) == -1) {
 391                 mdb_warn("couldn't walk 'kmem_cache'");
 392                 return (DCMD_ERR);
 393         }
 394 
 395         if (*estp == 0) {
 396                 mdb_warn("findleaks: no buffers found\n");
 397                 return (DCMD_ERR);
 398         }
 399 
 400         if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_estimate_vmem, estp) == -1) {
 401                 mdb_warn("couldn't walk 'vmem'");
 402                 return (DCMD_ERR);
 403         }
 404 
 405         return (DCMD_OK);
 406 }
 407 
 408 int
 409 leaky_subr_fill(leak_mtab_t **lmpp)
 410 {
 411         if (mdb_walk("vmem", (mdb_walk_cb_t)leaky_vmem, lmpp) == -1) {
 412                 mdb_warn("couldn't walk 'vmem'");
 413                 return (DCMD_ERR);
 414         }
 415 
 416         if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_cache, lmpp) == -1) {
 417                 mdb_warn("couldn't walk 'kmem_cache'");
 418                 return (DCMD_ERR);
 419         }
 420 
 421         if (mdb_readvar(&kmem_lite_count, "kmem_lite_count") == -1) {
 422                 mdb_warn("couldn't read 'kmem_lite_count'");
 423                 kmem_lite_count = 0;
 424         } else if (kmem_lite_count > 16) {
 425                 mdb_warn("kmem_lite_count nonsensical, ignored\n");
 426                 kmem_lite_count = 0;
 427         }
 428 
 429         return (DCMD_OK);
 430 }
 431 
 432 int
 433 leaky_subr_run(void)
 434 {
 435         unsigned long ps = PAGESIZE;
 436         uintptr_t kstat_arena;
 437         uintptr_t dmods;
 438 
 439         leaky_kludge();
 440 
 441         if (mdb_walk("kmem_cache", (mdb_walk_cb_t)leaky_scan_cache,
 442             NULL) == -1) {
 443                 mdb_warn("couldn't walk 'kmem_cache'");
 444                 return (DCMD_ERR);
 445         }
 446 
 447         if (mdb_walk("modctl", (mdb_walk_cb_t)leaky_modctl, NULL) == -1) {
 448                 mdb_warn("couldn't walk 'modctl'");
 449                 return (DCMD_ERR);
 450         }
 451 
 452         /*
 453          * If kmdb is loaded, we need to walk it's module list, since kmdb
 454          * modctl structures can reference kmem allocations.
 455          */
 456         if ((mdb_readvar(&dmods, "kdi_dmods") != -1) && (dmods != NULL))
 457                 (void) mdb_pwalk("modctl", (mdb_walk_cb_t)leaky_modctl,
 458                     NULL, dmods);
 459 
 460         if (mdb_walk("thread", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
 461                 mdb_warn("couldn't walk 'thread'");
 462                 return (DCMD_ERR);
 463         }
 464 
 465         if (mdb_walk("deathrow", (mdb_walk_cb_t)leaky_thread, &ps) == -1) {
 466                 mdb_warn("couldn't walk 'deathrow'");
 467                 return (DCMD_ERR);
 468         }
 469 
 470         if (mdb_readvar(&kstat_arena, "kstat_arena") == -1) {
 471                 mdb_warn("couldn't read 'kstat_arena'");
 472                 return (DCMD_ERR);
 473         }
 474 
 475         if (mdb_pwalk("vmem_alloc", (mdb_walk_cb_t)leaky_kstat,
 476             NULL, kstat_arena) == -1) {
 477                 mdb_warn("couldn't walk kstat vmem arena");
 478                 return (DCMD_ERR);
 479         }
 480 
 481         return (DCMD_OK);
 482 }
 483 
 484 void
 485 leaky_subr_add_leak(leak_mtab_t *lmp)
 486 {
 487         uintptr_t addr = LKM_CTLPTR(lmp->lkm_bufctl);
 488         size_t depth;
 489 
 490         switch (LKM_CTLTYPE(lmp->lkm_bufctl)) {
 491         case LKM_CTL_VMSEG: {
 492                 vmem_seg_t vs;
 493 
 494                 if (mdb_vread(&vs, sizeof (vs), addr) == -1) {
 495                         mdb_warn("couldn't read leaked vmem_seg at addr %p",
 496                             addr);
 497                         return;
 498                 }
 499                 depth = MIN(vs.vs_depth, VMEM_STACK_DEPTH);
 500 
 501                 leaky_add_leak(TYPE_VMEM, addr, vs.vs_start, vs.vs_timestamp,
 502                     vs.vs_stack, depth, 0, (vs.vs_end - vs.vs_start));
 503                 break;
 504         }
 505         case LKM_CTL_BUFCTL: {
 506                 kmem_bufctl_audit_t bc;
 507 
 508                 if (mdb_vread(&bc, sizeof (bc), addr) == -1) {
 509                         mdb_warn("couldn't read leaked bufctl at addr %p",
 510                             addr);
 511                         return;
 512                 }
 513 
 514                 depth = MIN(bc.bc_depth, KMEM_STACK_DEPTH);
 515 
 516                 /*
 517                  * The top of the stack will be kmem_cache_alloc+offset.
 518                  * Since the offset in kmem_cache_alloc() isn't interesting
 519                  * we skip that frame for the purposes of uniquifying stacks.
 520                  *
 521                  * We also use the cache pointer as the leaks's cid, to
 522                  * prevent the coalescing of leaks from different caches.
 523                  */
 524                 if (depth > 0)
 525                         depth--;
 526                 leaky_add_leak(TYPE_KMEM, addr, (uintptr_t)bc.bc_addr,
 527                     bc.bc_timestamp, bc.bc_stack + 1, depth,
 528                     (uintptr_t)bc.bc_cache, 0);
 529                 break;
 530         }
 531         case LKM_CTL_CACHE: {
 532                 kmem_cache_t cache;
 533                 kmem_buftag_lite_t bt;
 534                 pc_t caller;
 535                 int depth = 0;
 536 
 537                 /*
 538                  * For KMF_LITE caches, we can get the allocation PC
 539                  * out of the buftag structure.
 540                  */
 541                 if (mdb_vread(&cache, sizeof (cache), addr) != -1 &&
 542                     (cache.cache_flags & KMF_LITE) &&
 543                     kmem_lite_count > 0 &&
 544                     mdb_vread(&bt, sizeof (bt),
 545                     /* LINTED alignment */
 546                     (uintptr_t)KMEM_BUFTAG(&cache, lmp->lkm_base)) != -1) {
 547                         caller = bt.bt_history[0];
 548                         depth = 1;
 549                 }
 550                 leaky_add_leak(TYPE_CACHE, lmp->lkm_base, lmp->lkm_base, 0,
 551                     &caller, depth, addr, addr);
 552                 break;
 553         }
 554         default:
 555                 mdb_warn("internal error: invalid leak_bufctl_t\n");
 556                 break;
 557         }
 558 }
 559 
 560 static void
 561 leaky_subr_caller(const pc_t *stack, uint_t depth, char *buf, uintptr_t *pcp)
 562 {
 563         int i;
 564         GElf_Sym sym;
 565         uintptr_t pc = 0;
 566 
 567         buf[0] = 0;
 568 
 569         for (i = 0; i < depth; i++) {
 570                 pc = stack[i];
 571 
 572                 if (mdb_lookup_by_addr(pc,
 573                     MDB_SYM_FUZZY, buf, MDB_SYM_NAMLEN, &sym) == -1)
 574                         continue;
 575                 if (strncmp(buf, "kmem_", 5) == 0)
 576                         continue;
 577                 if (strncmp(buf, "vmem_", 5) == 0)
 578                         continue;
 579                 *pcp = pc;
 580 
 581                 return;
 582         }
 583 
 584         /*
 585          * We're only here if the entire call chain begins with "kmem_";
 586          * this shouldn't happen, but we'll just use the last caller.
 587          */
 588         *pcp = pc;
 589 }
 590 
 591 int
 592 leaky_subr_bufctl_cmp(const leak_bufctl_t *lhs, const leak_bufctl_t *rhs)
 593 {
 594         char lbuf[MDB_SYM_NAMLEN], rbuf[MDB_SYM_NAMLEN];
 595         uintptr_t lcaller, rcaller;
 596         int rval;
 597 
 598         leaky_subr_caller(lhs->lkb_stack, lhs->lkb_depth, lbuf, &lcaller);
 599         leaky_subr_caller(rhs->lkb_stack, lhs->lkb_depth, rbuf, &rcaller);
 600 
 601         if (rval = strcmp(lbuf, rbuf))
 602                 return (rval);
 603 
 604         if (lcaller < rcaller)
 605                 return (-1);
 606 
 607         if (lcaller > rcaller)
 608                 return (1);
 609 
 610         if (lhs->lkb_data < rhs->lkb_data)
 611                 return (-1);
 612 
 613         if (lhs->lkb_data > rhs->lkb_data)
 614                 return (1);
 615 
 616         return (0);
 617 }
 618 
 619 /*
 620  * Global state variables used by the leaky_subr_dump_* routines.  Note that
 621  * they are carefully cleared before use.
 622  */
 623 static int lk_vmem_seen;
 624 static int lk_cache_seen;
 625 static int lk_kmem_seen;
 626 static size_t lk_ttl;
 627 static size_t lk_bytes;
 628 
 629 void
 630 leaky_subr_dump_start(int type)
 631 {
 632         switch (type) {
 633         case TYPE_VMEM:
 634                 lk_vmem_seen = 0;
 635                 break;
 636         case TYPE_CACHE:
 637                 lk_cache_seen = 0;
 638                 break;
 639         case TYPE_KMEM:
 640                 lk_kmem_seen = 0;
 641                 break;
 642         default:
 643                 break;
 644         }
 645 
 646         lk_ttl = 0;
 647         lk_bytes = 0;
 648 }
 649 
 650 void
 651 leaky_subr_dump(const leak_bufctl_t *lkb, int verbose)
 652 {
 653         const leak_bufctl_t *cur;
 654         kmem_cache_t cache;
 655         size_t min, max, size;
 656         char sz[30];
 657         char c[MDB_SYM_NAMLEN];
 658         uintptr_t caller;
 659 
 660         if (verbose) {
 661                 lk_ttl = 0;
 662                 lk_bytes = 0;
 663         }
 664 
 665         switch (lkb->lkb_type) {
 666         case TYPE_VMEM:
 667                 if (!verbose && !lk_vmem_seen) {
 668                         lk_vmem_seen = 1;
 669                         mdb_printf("%-16s %7s %?s %s\n",
 670                             "BYTES", "LEAKED", "VMEM_SEG", "CALLER");
 671                 }
 672 
 673                 min = max = lkb->lkb_data;
 674 
 675                 for (cur = lkb; cur != NULL; cur = cur->lkb_next) {
 676                         size = cur->lkb_data;
 677 
 678                         if (size < min)
 679                                 min = size;
 680                         if (size > max)
 681                                 max = size;
 682 
 683                         lk_ttl++;
 684                         lk_bytes += size;
 685                 }
 686 
 687                 if (min == max)
 688                         (void) mdb_snprintf(sz, sizeof (sz), "%ld", min);
 689                 else
 690                         (void) mdb_snprintf(sz, sizeof (sz), "%ld-%ld",
 691                             min, max);
 692 
 693                 if (!verbose) {
 694                         leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
 695                             c, &caller);
 696 
 697                         if (caller != 0) {
 698                                 (void) mdb_snprintf(c, sizeof (c),
 699                                     "%a", caller);
 700                         } else {
 701                                 (void) mdb_snprintf(c, sizeof (c),
 702                                     "%s", "?");
 703                         }
 704                         mdb_printf("%-16s %7d %?p %s\n", sz, lkb->lkb_dups + 1,
 705                             lkb->lkb_addr, c);
 706                 } else {
 707                         mdb_arg_t v;
 708 
 709                         if (lk_ttl == 1)
 710                                 mdb_printf("kmem_oversize leak: 1 vmem_seg, "
 711                                     "%ld bytes\n", lk_bytes);
 712                         else
 713                                 mdb_printf("kmem_oversize leak: %d vmem_segs, "
 714                                     "%s bytes each, %ld bytes total\n",
 715                                     lk_ttl, sz, lk_bytes);
 716 
 717                         v.a_type = MDB_TYPE_STRING;
 718                         v.a_un.a_str = "-v";
 719 
 720                         if (mdb_call_dcmd("vmem_seg", lkb->lkb_addr,
 721                             DCMD_ADDRSPEC, 1, &v) == -1) {
 722                                 mdb_warn("'%p::vmem_seg -v' failed",
 723                                     lkb->lkb_addr);
 724                         }
 725                 }
 726                 return;
 727 
 728         case TYPE_CACHE:
 729                 if (!verbose && !lk_cache_seen) {
 730                         lk_cache_seen = 1;
 731                         if (lk_vmem_seen)
 732                                 mdb_printf("\n");
 733                         mdb_printf("%-?s %7s %?s %s\n",
 734                             "CACHE", "LEAKED", "BUFFER", "CALLER");
 735                 }
 736 
 737                 if (mdb_vread(&cache, sizeof (cache), lkb->lkb_data) == -1) {
 738                         /*
 739                          * This _really_ shouldn't happen; we shouldn't
 740                          * have been able to get this far if this
 741                          * cache wasn't readable.
 742                          */
 743                         mdb_warn("can't read cache %p for leaked "
 744                             "buffer %p", lkb->lkb_data, lkb->lkb_addr);
 745                         return;
 746                 }
 747 
 748                 lk_ttl += lkb->lkb_dups + 1;
 749                 lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
 750 
 751                 caller = (lkb->lkb_depth == 0) ? 0 : lkb->lkb_stack[0];
 752                 if (caller != 0) {
 753                         (void) mdb_snprintf(c, sizeof (c), "%a", caller);
 754                 } else {
 755                         (void) mdb_snprintf(c, sizeof (c),
 756                             "%s", (verbose) ? "" : "?");
 757                 }
 758 
 759                 if (!verbose) {
 760                         mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
 761                             lkb->lkb_dups + 1, lkb->lkb_addr, c);
 762                 } else {
 763                         if (lk_ttl == 1)
 764                                 mdb_printf("%s leak: 1 buffer, %ld bytes,\n",
 765                                     cache.cache_name, lk_bytes);
 766                         else
 767                                 mdb_printf("%s leak: %d buffers, "
 768                                     "%ld bytes each, %ld bytes total,\n",
 769                                     cache.cache_name, lk_ttl,
 770                                     cache.cache_bufsize, lk_bytes);
 771 
 772                         mdb_printf("    sample addr %p%s%s\n",
 773                             lkb->lkb_addr, (caller == 0) ? "" : ", caller ", c);
 774                 }
 775                 return;
 776 
 777         case TYPE_KMEM:
 778                 if (!verbose && !lk_kmem_seen) {
 779                         lk_kmem_seen = 1;
 780                         if (lk_vmem_seen || lk_cache_seen)
 781                                 mdb_printf("\n");
 782                         mdb_printf("%-?s %7s %?s %s\n",
 783                             "CACHE", "LEAKED", "BUFCTL", "CALLER");
 784                 }
 785 
 786                 if (mdb_vread(&cache, sizeof (cache), lkb->lkb_cid) == -1) {
 787                         /*
 788                          * This _really_ shouldn't happen; we shouldn't
 789                          * have been able to get this far if this
 790                          * cache wasn't readable.
 791                          */
 792                         mdb_warn("can't read cache %p for leaked "
 793                             "bufctl %p", lkb->lkb_cid, lkb->lkb_addr);
 794                         return;
 795                 }
 796 
 797                 lk_ttl += lkb->lkb_dups + 1;
 798                 lk_bytes += (lkb->lkb_dups + 1) * cache.cache_bufsize;
 799 
 800                 if (!verbose) {
 801                         leaky_subr_caller(lkb->lkb_stack, lkb->lkb_depth,
 802                             c, &caller);
 803 
 804                         if (caller != 0) {
 805                                 (void) mdb_snprintf(c, sizeof (c),
 806                                     "%a", caller);
 807                         } else {
 808                                 (void) mdb_snprintf(c, sizeof (c),
 809                                     "%s", "?");
 810                         }
 811                         mdb_printf("%0?p %7d %0?p %s\n", lkb->lkb_cid,
 812                             lkb->lkb_dups + 1, lkb->lkb_addr, c);
 813                 } else {
 814                         mdb_arg_t v;
 815 
 816                         if (lk_ttl == 1)
 817                                 mdb_printf("%s leak: 1 buffer, %ld bytes\n",
 818                                     cache.cache_name, lk_bytes);
 819                         else
 820                                 mdb_printf("%s leak: %d buffers, "
 821                                     "%ld bytes each, %ld bytes total\n",
 822                                     cache.cache_name, lk_ttl,
 823                                     cache.cache_bufsize, lk_bytes);
 824 
 825                         v.a_type = MDB_TYPE_STRING;
 826                         v.a_un.a_str = "-v";
 827 
 828                         if (mdb_call_dcmd("bufctl", lkb->lkb_addr,
 829                             DCMD_ADDRSPEC, 1, &v) == -1) {
 830                                 mdb_warn("'%p::bufctl -v' failed",
 831                                     lkb->lkb_addr);
 832                         }
 833                 }
 834                 return;
 835 
 836         default:
 837                 return;
 838         }
 839 }
 840 
 841 void
 842 leaky_subr_dump_end(int type)
 843 {
 844         int i;
 845         int width;
 846         const char *leaks;
 847 
 848         switch (type) {
 849         case TYPE_VMEM:
 850                 if (!lk_vmem_seen)
 851                         return;
 852 
 853                 width = 16;
 854                 leaks = "kmem_oversize leak";
 855                 break;
 856 
 857         case TYPE_CACHE:
 858                 if (!lk_cache_seen)
 859                         return;
 860 
 861                 width = sizeof (uintptr_t) * 2;
 862                 leaks = "buffer";
 863                 break;
 864 
 865         case TYPE_KMEM:
 866                 if (!lk_kmem_seen)
 867                         return;
 868 
 869                 width = sizeof (uintptr_t) * 2;
 870                 leaks = "buffer";
 871                 break;
 872 
 873         default:
 874                 return;
 875         }
 876 
 877         for (i = 0; i < 72; i++)
 878                 mdb_printf("-");
 879         mdb_printf("\n%*s %7ld %s%s, %ld byte%s\n",
 880             width, "Total", lk_ttl, leaks, (lk_ttl == 1) ? "" : "s",
 881             lk_bytes, (lk_bytes == 1) ? "" : "s");
 882 }
 883 
 884 int
 885 leaky_subr_invoke_callback(const leak_bufctl_t *lkb, mdb_walk_cb_t cb,
 886     void *cbdata)
 887 {
 888         kmem_bufctl_audit_t bc;
 889         vmem_seg_t vs;
 890 
 891         switch (lkb->lkb_type) {
 892         case TYPE_VMEM:
 893                 if (mdb_vread(&vs, sizeof (vs), lkb->lkb_addr) == -1) {
 894                         mdb_warn("unable to read vmem_seg at %p",
 895                             lkb->lkb_addr);
 896                         return (WALK_NEXT);
 897                 }
 898                 return (cb(lkb->lkb_addr, &vs, cbdata));
 899 
 900         case TYPE_CACHE:
 901                 return (cb(lkb->lkb_addr, NULL, cbdata));
 902 
 903         case TYPE_KMEM:
 904                 if (mdb_vread(&bc, sizeof (bc), lkb->lkb_addr) == -1) {
 905                         mdb_warn("unable to read bufctl at %p",
 906                             lkb->lkb_addr);
 907                         return (WALK_NEXT);
 908                 }
 909                 return (cb(lkb->lkb_addr, &bc, cbdata));
 910         default:
 911                 return (WALK_NEXT);
 912         }
 913 }