1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  24  */
  25 
  26 /*
  27  * Kernel memory allocator, as described in the following two papers and a
  28  * statement about the consolidator:
  29  *
  30  * Jeff Bonwick,
  31  * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
  32  * Proceedings of the Summer 1994 Usenix Conference.
  33  * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
  34  *
  35  * Jeff Bonwick and Jonathan Adams,
  36  * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
  37  * Arbitrary Resources.
  38  * Proceedings of the 2001 Usenix Conference.
  39  * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
  40  *
  41  * kmem Slab Consolidator Big Theory Statement:
  42  *
  43  * 1. Motivation
  44  *
  45  * As stated in Bonwick94, slabs provide the following advantages over other
  46  * allocation structures in terms of memory fragmentation:
  47  *
  48  *  - Internal fragmentation (per-buffer wasted space) is minimal.
  49  *  - Severe external fragmentation (unused buffers on the free list) is
  50  *    unlikely.
  51  *
  52  * Segregating objects by size eliminates one source of external fragmentation,
  53  * and according to Bonwick:
  54  *
  55  *   The other reason that slabs reduce external fragmentation is that all
  56  *   objects in a slab are of the same type, so they have the same lifetime
  57  *   distribution. The resulting segregation of short-lived and long-lived
  58  *   objects at slab granularity reduces the likelihood of an entire page being
  59  *   held hostage due to a single long-lived allocation [Barrett93, Hanson90].
  60  *
  61  * While unlikely, severe external fragmentation remains possible. Clients that
  62  * allocate both short- and long-lived objects from the same cache cannot
  63  * anticipate the distribution of long-lived objects within the allocator's slab
  64  * implementation. Even a small percentage of long-lived objects distributed
  65  * randomly across many slabs can lead to a worst case scenario where the client
  66  * frees the majority of its objects and the system gets back almost none of the
  67  * slabs. Despite the client doing what it reasonably can to help the system
  68  * reclaim memory, the allocator cannot shake free enough slabs because of
  69  * lonely allocations stubbornly hanging on. Although the allocator is in a
  70  * position to diagnose the fragmentation, there is nothing that the allocator
  71  * by itself can do about it. It only takes a single allocated object to prevent
  72  * an entire slab from being reclaimed, and any object handed out by
  73  * kmem_cache_alloc() is by definition in the client's control. Conversely,
  74  * although the client is in a position to move a long-lived object, it has no
  75  * way of knowing if the object is causing fragmentation, and if so, where to
  76  * move it. A solution necessarily requires further cooperation between the
  77  * allocator and the client.
  78  *
  79  * 2. Move Callback
  80  *
  81  * The kmem slab consolidator therefore adds a move callback to the
  82  * allocator/client interface, improving worst-case external fragmentation in
  83  * kmem caches that supply a function to move objects from one memory location
  84  * to another. In a situation of low memory kmem attempts to consolidate all of
  85  * a cache's slabs at once; otherwise it works slowly to bring external
  86  * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
  87  * thereby helping to avoid a low memory situation in the future.
  88  *
  89  * The callback has the following signature:
  90  *
  91  *   kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
  92  *
  93  * It supplies the kmem client with two addresses: the allocated object that
  94  * kmem wants to move and a buffer selected by kmem for the client to use as the
  95  * copy destination. The callback is kmem's way of saying "Please get off of
  96  * this buffer and use this one instead." kmem knows where it wants to move the
  97  * object in order to best reduce fragmentation. All the client needs to know
  98  * about the second argument (void *new) is that it is an allocated, constructed
  99  * object ready to take the contents of the old object. When the move function
 100  * is called, the system is likely to be low on memory, and the new object
 101  * spares the client from having to worry about allocating memory for the
 102  * requested move. The third argument supplies the size of the object, in case a
 103  * single move function handles multiple caches whose objects differ only in
 104  * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
 105  * user argument passed to the constructor, destructor, and reclaim functions is
 106  * also passed to the move callback.
 107  *
 108  * 2.1 Setting the Move Callback
 109  *
 110  * The client sets the move callback after creating the cache and before
 111  * allocating from it:
 112  *
 113  *      object_cache = kmem_cache_create(...);
 114  *      kmem_cache_set_move(object_cache, object_move);
 115  *
 116  * 2.2 Move Callback Return Values
 117  *
 118  * Only the client knows about its own data and when is a good time to move it.
 119  * The client is cooperating with kmem to return unused memory to the system,
 120  * and kmem respectfully accepts this help at the client's convenience. When
 121  * asked to move an object, the client can respond with any of the following:
 122  *
 123  *   typedef enum kmem_cbrc {
 124  *           KMEM_CBRC_YES,
 125  *           KMEM_CBRC_NO,
 126  *           KMEM_CBRC_LATER,
 127  *           KMEM_CBRC_DONT_NEED,
 128  *           KMEM_CBRC_DONT_KNOW
 129  *   } kmem_cbrc_t;
 130  *
 131  * The client must not explicitly kmem_cache_free() either of the objects passed
 132  * to the callback, since kmem wants to free them directly to the slab layer
 133  * (bypassing the per-CPU magazine layer). The response tells kmem which of the
 134  * objects to free:
 135  *
 136  *       YES: (Did it) The client moved the object, so kmem frees the old one.
 137  *        NO: (Never) The client refused, so kmem frees the new object (the
 138  *            unused copy destination). kmem also marks the slab of the old
 139  *            object so as not to bother the client with further callbacks for
 140  *            that object as long as the slab remains on the partial slab list.
 141  *            (The system won't be getting the slab back as long as the
 142  *            immovable object holds it hostage, so there's no point in moving
 143  *            any of its objects.)
 144  *     LATER: The client is using the object and cannot move it now, so kmem
 145  *            frees the new object (the unused copy destination). kmem still
 146  *            attempts to move other objects off the slab, since it expects to
 147  *            succeed in clearing the slab in a later callback. The client
 148  *            should use LATER instead of NO if the object is likely to become
 149  *            movable very soon.
 150  * DONT_NEED: The client no longer needs the object, so kmem frees the old along
 151  *            with the new object (the unused copy destination). This response
 152  *            is the client's opportunity to be a model citizen and give back as
 153  *            much as it can.
 154  * DONT_KNOW: The client does not know about the object because
 155  *            a) the client has just allocated the object and not yet put it
 156  *               wherever it expects to find known objects
 157  *            b) the client has removed the object from wherever it expects to
 158  *               find known objects and is about to free it, or
 159  *            c) the client has freed the object.
 160  *            In all these cases (a, b, and c) kmem frees the new object (the
 161  *            unused copy destination) and searches for the old object in the
 162  *            magazine layer. If found, the object is removed from the magazine
 163  *            layer and freed to the slab layer so it will no longer hold the
 164  *            slab hostage.
 165  *
 166  * 2.3 Object States
 167  *
 168  * Neither kmem nor the client can be assumed to know the object's whereabouts
 169  * at the time of the callback. An object belonging to a kmem cache may be in
 170  * any of the following states:
 171  *
 172  * 1. Uninitialized on the slab
 173  * 2. Allocated from the slab but not constructed (still uninitialized)
 174  * 3. Allocated from the slab, constructed, but not yet ready for business
 175  *    (not in a valid state for the move callback)
 176  * 4. In use (valid and known to the client)
 177  * 5. About to be freed (no longer in a valid state for the move callback)
 178  * 6. Freed to a magazine (still constructed)
 179  * 7. Allocated from a magazine, not yet ready for business (not in a valid
 180  *    state for the move callback), and about to return to state #4
 181  * 8. Deconstructed on a magazine that is about to be freed
 182  * 9. Freed to the slab
 183  *
 184  * Since the move callback may be called at any time while the object is in any
 185  * of the above states (except state #1), the client needs a safe way to
 186  * determine whether or not it knows about the object. Specifically, the client
 187  * needs to know whether or not the object is in state #4, the only state in
 188  * which a move is valid. If the object is in any other state, the client should
 189  * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
 190  * the object's fields.
 191  *
 192  * Note that although an object may be in state #4 when kmem initiates the move
 193  * request, the object may no longer be in that state by the time kmem actually
 194  * calls the move function. Not only does the client free objects
 195  * asynchronously, kmem itself puts move requests on a queue where thay are
 196  * pending until kmem processes them from another context. Also, objects freed
 197  * to a magazine appear allocated from the point of view of the slab layer, so
 198  * kmem may even initiate requests for objects in a state other than state #4.
 199  *
 200  * 2.3.1 Magazine Layer
 201  *
 202  * An important insight revealed by the states listed above is that the magazine
 203  * layer is populated only by kmem_cache_free(). Magazines of constructed
 204  * objects are never populated directly from the slab layer (which contains raw,
 205  * unconstructed objects). Whenever an allocation request cannot be satisfied
 206  * from the magazine layer, the magazines are bypassed and the request is
 207  * satisfied from the slab layer (creating a new slab if necessary). kmem calls
 208  * the object constructor only when allocating from the slab layer, and only in
 209  * response to kmem_cache_alloc() or to prepare the destination buffer passed in
 210  * the move callback. kmem does not preconstruct objects in anticipation of
 211  * kmem_cache_alloc().
 212  *
 213  * 2.3.2 Object Constructor and Destructor
 214  *
 215  * If the client supplies a destructor, it must be valid to call the destructor
 216  * on a newly created object (immediately after the constructor).
 217  *
 218  * 2.4 Recognizing Known Objects
 219  *
 220  * There is a simple test to determine safely whether or not the client knows
 221  * about a given object in the move callback. It relies on the fact that kmem
 222  * guarantees that the object of the move callback has only been touched by the
 223  * client itself or else by kmem. kmem does this by ensuring that none of the
 224  * cache's slabs are freed to the virtual memory (VM) subsystem while a move
 225  * callback is pending. When the last object on a slab is freed, if there is a
 226  * pending move, kmem puts the slab on a per-cache dead list and defers freeing
 227  * slabs on that list until all pending callbacks are completed. That way,
 228  * clients can be certain that the object of a move callback is in one of the
 229  * states listed above, making it possible to distinguish known objects (in
 230  * state #4) using the two low order bits of any pointer member (with the
 231  * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
 232  * platforms).
 233  *
 234  * The test works as long as the client always transitions objects from state #4
 235  * (known, in use) to state #5 (about to be freed, invalid) by setting the low
 236  * order bit of the client-designated pointer member. Since kmem only writes
 237  * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
 238  * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
 239  * guaranteed to set at least one of the two low order bits. Therefore, given an
 240  * object with a back pointer to a 'container_t *o_container', the client can
 241  * test
 242  *
 243  *      container_t *container = object->o_container;
 244  *      if ((uintptr_t)container & 0x3) {
 245  *              return (KMEM_CBRC_DONT_KNOW);
 246  *      }
 247  *
 248  * Typically, an object will have a pointer to some structure with a list or
 249  * hash where objects from the cache are kept while in use. Assuming that the
 250  * client has some way of knowing that the container structure is valid and will
 251  * not go away during the move, and assuming that the structure includes a lock
 252  * to protect whatever collection is used, then the client would continue as
 253  * follows:
 254  *
 255  *      // Ensure that the container structure does not go away.
 256  *      if (container_hold(container) == 0) {
 257  *              return (KMEM_CBRC_DONT_KNOW);
 258  *      }
 259  *      mutex_enter(&container->c_objects_lock);
 260  *      if (container != object->o_container) {
 261  *              mutex_exit(&container->c_objects_lock);
 262  *              container_rele(container);
 263  *              return (KMEM_CBRC_DONT_KNOW);
 264  *      }
 265  *
 266  * At this point the client knows that the object cannot be freed as long as
 267  * c_objects_lock is held. Note that after acquiring the lock, the client must
 268  * recheck the o_container pointer in case the object was removed just before
 269  * acquiring the lock.
 270  *
 271  * When the client is about to free an object, it must first remove that object
 272  * from the list, hash, or other structure where it is kept. At that time, to
 273  * mark the object so it can be distinguished from the remaining, known objects,
 274  * the client sets the designated low order bit:
 275  *
 276  *      mutex_enter(&container->c_objects_lock);
 277  *      object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
 278  *      list_remove(&container->c_objects, object);
 279  *      mutex_exit(&container->c_objects_lock);
 280  *
 281  * In the common case, the object is freed to the magazine layer, where it may
 282  * be reused on a subsequent allocation without the overhead of calling the
 283  * constructor. While in the magazine it appears allocated from the point of
 284  * view of the slab layer, making it a candidate for the move callback. Most
 285  * objects unrecognized by the client in the move callback fall into this
 286  * category and are cheaply distinguished from known objects by the test
 287  * described earlier. Since recognition is cheap for the client, and searching
 288  * magazines is expensive for kmem, kmem defers searching until the client first
 289  * returns KMEM_CBRC_DONT_KNOW. As long as the needed effort is reasonable, kmem
 290  * elsewhere does what it can to avoid bothering the client unnecessarily.
 291  *
 292  * Invalidating the designated pointer member before freeing the object marks
 293  * the object to be avoided in the callback, and conversely, assigning a valid
 294  * value to the designated pointer member after allocating the object makes the
 295  * object fair game for the callback:
 296  *
 297  *      ... allocate object ...
 298  *      ... set any initial state not set by the constructor ...
 299  *
 300  *      mutex_enter(&container->c_objects_lock);
 301  *      list_insert_tail(&container->c_objects, object);
 302  *      membar_producer();
 303  *      object->o_container = container;
 304  *      mutex_exit(&container->c_objects_lock);
 305  *
 306  * Note that everything else must be valid before setting o_container makes the
 307  * object fair game for the move callback. The membar_producer() call ensures
 308  * that all the object's state is written to memory before setting the pointer
 309  * that transitions the object from state #3 or #7 (allocated, constructed, not
 310  * yet in use) to state #4 (in use, valid). That's important because the move
 311  * function has to check the validity of the pointer before it can safely
 312  * acquire the lock protecting the collection where it expects to find known
 313  * objects.
 314  *
 315  * This method of distinguishing known objects observes the usual symmetry:
 316  * invalidating the designated pointer is the first thing the client does before
 317  * freeing the object, and setting the designated pointer is the last thing the
 318  * client does after allocating the object. Of course, the client is not
 319  * required to use this method. Fundamentally, how the client recognizes known
 320  * objects is completely up to the client, but this method is recommended as an
 321  * efficient and safe way to take advantage of the guarantees made by kmem. If
 322  * the entire object is arbitrary data without any markable bits from a suitable
 323  * pointer member, then the client must find some other method, such as
 324  * searching a hash table of known objects.
 325  *
 326  * 2.5 Preventing Objects From Moving
 327  *
 328  * Besides a way to distinguish known objects, the other thing that the client
 329  * needs is a strategy to ensure that an object will not move while the client
 330  * is actively using it. The details of satisfying this requirement tend to be
 331  * highly cache-specific. It might seem that the same rules that let a client
 332  * remove an object safely should also decide when an object can be moved
 333  * safely. However, any object state that makes a removal attempt invalid is
 334  * likely to be long-lasting for objects that the client does not expect to
 335  * remove. kmem knows nothing about the object state and is equally likely (from
 336  * the client's point of view) to request a move for any object in the cache,
 337  * whether prepared for removal or not. Even a low percentage of objects stuck
 338  * in place by unremovability will defeat the consolidator if the stuck objects
 339  * are the same long-lived allocations likely to hold slabs hostage.
 340  * Fundamentally, the consolidator is not aimed at common cases. Severe external
 341  * fragmentation is a worst case scenario manifested as sparsely allocated
 342  * slabs, by definition a low percentage of the cache's objects. When deciding
 343  * what makes an object movable, keep in mind the goal of the consolidator: to
 344  * bring worst-case external fragmentation within the limits guaranteed for
 345  * internal fragmentation. Removability is a poor criterion if it is likely to
 346  * exclude more than an insignificant percentage of objects for long periods of
 347  * time.
 348  *
 349  * A tricky general solution exists, and it has the advantage of letting you
 350  * move any object at almost any moment, practically eliminating the likelihood
 351  * that an object can hold a slab hostage. However, if there is a cache-specific
 352  * way to ensure that an object is not actively in use in the vast majority of
 353  * cases, a simpler solution that leverages this cache-specific knowledge is
 354  * preferred.
 355  *
 356  * 2.5.1 Cache-Specific Solution
 357  *
 358  * As an example of a cache-specific solution, the ZFS znode cache takes
 359  * advantage of the fact that the vast majority of znodes are only being
 360  * referenced from the DNLC. (A typical case might be a few hundred in active
 361  * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
 362  * client has established that it recognizes the znode and can access its fields
 363  * safely (using the method described earlier), it then tests whether the znode
 364  * is referenced by anything other than the DNLC. If so, it assumes that the
 365  * znode may be in active use and is unsafe to move, so it drops its locks and
 366  * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
 367  * else znodes are used, no change is needed to protect against the possibility
 368  * of the znode moving. The disadvantage is that it remains possible for an
 369  * application to hold a znode slab hostage with an open file descriptor.
 370  * However, this case ought to be rare and the consolidator has a way to deal
 371  * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
 372  * object, kmem eventually stops believing it and treats the slab as if the
 373  * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
 374  * then focus on getting it off of the partial slab list by allocating rather
 375  * than freeing all of its objects. (Either way of getting a slab off the
 376  * free list reduces fragmentation.)
 377  *
 378  * 2.5.2 General Solution
 379  *
 380  * The general solution, on the other hand, requires an explicit hold everywhere
 381  * the object is used to prevent it from moving. To keep the client locking
 382  * strategy as uncomplicated as possible, kmem guarantees the simplifying
 383  * assumption that move callbacks are sequential, even across multiple caches.
 384  * Internally, a global queue processed by a single thread supports all caches
 385  * implementing the callback function. No matter how many caches supply a move
 386  * function, the consolidator never moves more than one object at a time, so the
 387  * client does not have to worry about tricky lock ordering involving several
 388  * related objects from different kmem caches.
 389  *
 390  * The general solution implements the explicit hold as a read-write lock, which
 391  * allows multiple readers to access an object from the cache simultaneously
 392  * while a single writer is excluded from moving it. A single rwlock for the
 393  * entire cache would lock out all threads from using any of the cache's objects
 394  * even though only a single object is being moved, so to reduce contention,
 395  * the client can fan out the single rwlock into an array of rwlocks hashed by
 396  * the object address, making it probable that moving one object will not
 397  * prevent other threads from using a different object. The rwlock cannot be a
 398  * member of the object itself, because the possibility of the object moving
 399  * makes it unsafe to access any of the object's fields until the lock is
 400  * acquired.
 401  *
 402  * Assuming a small, fixed number of locks, it's possible that multiple objects
 403  * will hash to the same lock. A thread that needs to use multiple objects in
 404  * the same function may acquire the same lock multiple times. Since rwlocks are
 405  * reentrant for readers, and since there is never more than a single writer at
 406  * a time (assuming that the client acquires the lock as a writer only when
 407  * moving an object inside the callback), there would seem to be no problem.
 408  * However, a client locking multiple objects in the same function must handle
 409  * one case of potential deadlock: Assume that thread A needs to prevent both
 410  * object 1 and object 2 from moving, and thread B, the callback, meanwhile
 411  * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
 412  * same lock, that thread A will acquire the lock for object 1 as a reader
 413  * before thread B sets the lock's write-wanted bit, preventing thread A from
 414  * reacquiring the lock for object 2 as a reader. Unable to make forward
 415  * progress, thread A will never release the lock for object 1, resulting in
 416  * deadlock.
 417  *
 418  * There are two ways of avoiding the deadlock just described. The first is to
 419  * use rw_tryenter() rather than rw_enter() in the callback function when
 420  * attempting to acquire the lock as a writer. If tryenter discovers that the
 421  * same object (or another object hashed to the same lock) is already in use, it
 422  * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
 423  * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
 424  * since it allows a thread to acquire the lock as a reader in spite of a
 425  * waiting writer. This second approach insists on moving the object now, no
 426  * matter how many readers the move function must wait for in order to do so,
 427  * and could delay the completion of the callback indefinitely (blocking
 428  * callbacks to other clients). In practice, a less insistent callback using
 429  * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
 430  * little reason to use anything else.
 431  *
 432  * Avoiding deadlock is not the only problem that an implementation using an
 433  * explicit hold needs to solve. Locking the object in the first place (to
 434  * prevent it from moving) remains a problem, since the object could move
 435  * between the time you obtain a pointer to the object and the time you acquire
 436  * the rwlock hashed to that pointer value. Therefore the client needs to
 437  * recheck the value of the pointer after acquiring the lock, drop the lock if
 438  * the value has changed, and try again. This requires a level of indirection:
 439  * something that points to the object rather than the object itself, that the
 440  * client can access safely while attempting to acquire the lock. (The object
 441  * itself cannot be referenced safely because it can move at any time.)
 442  * The following lock-acquisition function takes whatever is safe to reference
 443  * (arg), follows its pointer to the object (using function f), and tries as
 444  * often as necessary to acquire the hashed lock and verify that the object
 445  * still has not moved:
 446  *
 447  *      object_t *
 448  *      object_hold(object_f f, void *arg)
 449  *      {
 450  *              object_t *op;
 451  *
 452  *              op = f(arg);
 453  *              if (op == NULL) {
 454  *                      return (NULL);
 455  *              }
 456  *
 457  *              rw_enter(OBJECT_RWLOCK(op), RW_READER);
 458  *              while (op != f(arg)) {
 459  *                      rw_exit(OBJECT_RWLOCK(op));
 460  *                      op = f(arg);
 461  *                      if (op == NULL) {
 462  *                              break;
 463  *                      }
 464  *                      rw_enter(OBJECT_RWLOCK(op), RW_READER);
 465  *              }
 466  *
 467  *              return (op);
 468  *      }
 469  *
 470  * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
 471  * lock reacquisition loop, while necessary, almost never executes. The function
 472  * pointer f (used to obtain the object pointer from arg) has the following type
 473  * definition:
 474  *
 475  *      typedef object_t *(*object_f)(void *arg);
 476  *
 477  * An object_f implementation is likely to be as simple as accessing a structure
 478  * member:
 479  *
 480  *      object_t *
 481  *      s_object(void *arg)
 482  *      {
 483  *              something_t *sp = arg;
 484  *              return (sp->s_object);
 485  *      }
 486  *
 487  * The flexibility of a function pointer allows the path to the object to be
 488  * arbitrarily complex and also supports the notion that depending on where you
 489  * are using the object, you may need to get it from someplace different.
 490  *
 491  * The function that releases the explicit hold is simpler because it does not
 492  * have to worry about the object moving:
 493  *
 494  *      void
 495  *      object_rele(object_t *op)
 496  *      {
 497  *              rw_exit(OBJECT_RWLOCK(op));
 498  *      }
 499  *
 500  * The caller is spared these details so that obtaining and releasing an
 501  * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
 502  * of object_hold() only needs to know that the returned object pointer is valid
 503  * if not NULL and that the object will not move until released.
 504  *
 505  * Although object_hold() prevents an object from moving, it does not prevent it
 506  * from being freed. The caller must take measures before calling object_hold()
 507  * (afterwards is too late) to ensure that the held object cannot be freed. The
 508  * caller must do so without accessing the unsafe object reference, so any lock
 509  * or reference count used to ensure the continued existence of the object must
 510  * live outside the object itself.
 511  *
 512  * Obtaining a new object is a special case where an explicit hold is impossible
 513  * for the caller. Any function that returns a newly allocated object (either as
 514  * a return value, or as an in-out paramter) must return it already held; after
 515  * the caller gets it is too late, since the object cannot be safely accessed
 516  * without the level of indirection described earlier. The following
 517  * object_alloc() example uses the same code shown earlier to transition a new
 518  * object into the state of being recognized (by the client) as a known object.
 519  * The function must acquire the hold (rw_enter) before that state transition
 520  * makes the object movable:
 521  *
 522  *      static object_t *
 523  *      object_alloc(container_t *container)
 524  *      {
 525  *              object_t *object = kmem_cache_alloc(object_cache, 0);
 526  *              ... set any initial state not set by the constructor ...
 527  *              rw_enter(OBJECT_RWLOCK(object), RW_READER);
 528  *              mutex_enter(&container->c_objects_lock);
 529  *              list_insert_tail(&container->c_objects, object);
 530  *              membar_producer();
 531  *              object->o_container = container;
 532  *              mutex_exit(&container->c_objects_lock);
 533  *              return (object);
 534  *      }
 535  *
 536  * Functions that implicitly acquire an object hold (any function that calls
 537  * object_alloc() to supply an object for the caller) need to be carefully noted
 538  * so that the matching object_rele() is not neglected. Otherwise, leaked holds
 539  * prevent all objects hashed to the affected rwlocks from ever being moved.
 540  *
 541  * The pointer to a held object can be hashed to the holding rwlock even after
 542  * the object has been freed. Although it is possible to release the hold
 543  * after freeing the object, you may decide to release the hold implicitly in
 544  * whatever function frees the object, so as to release the hold as soon as
 545  * possible, and for the sake of symmetry with the function that implicitly
 546  * acquires the hold when it allocates the object. Here, object_free() releases
 547  * the hold acquired by object_alloc(). Its implicit object_rele() forms a
 548  * matching pair with object_hold():
 549  *
 550  *      void
 551  *      object_free(object_t *object)
 552  *      {
 553  *              container_t *container;
 554  *
 555  *              ASSERT(object_held(object));
 556  *              container = object->o_container;
 557  *              mutex_enter(&container->c_objects_lock);
 558  *              object->o_container =
 559  *                  (void *)((uintptr_t)object->o_container | 0x1);
 560  *              list_remove(&container->c_objects, object);
 561  *              mutex_exit(&container->c_objects_lock);
 562  *              object_rele(object);
 563  *              kmem_cache_free(object_cache, object);
 564  *      }
 565  *
 566  * Note that object_free() cannot safely accept an object pointer as an argument
 567  * unless the object is already held. Any function that calls object_free()
 568  * needs to be carefully noted since it similarly forms a matching pair with
 569  * object_hold().
 570  *
 571  * To complete the picture, the following callback function implements the
 572  * general solution by moving objects only if they are currently unheld:
 573  *
 574  *      static kmem_cbrc_t
 575  *      object_move(void *buf, void *newbuf, size_t size, void *arg)
 576  *      {
 577  *              object_t *op = buf, *np = newbuf;
 578  *              container_t *container;
 579  *
 580  *              container = op->o_container;
 581  *              if ((uintptr_t)container & 0x3) {
 582  *                      return (KMEM_CBRC_DONT_KNOW);
 583  *              }
 584  *
 585  *              // Ensure that the container structure does not go away.
 586  *              if (container_hold(container) == 0) {
 587  *                      return (KMEM_CBRC_DONT_KNOW);
 588  *              }
 589  *
 590  *              mutex_enter(&container->c_objects_lock);
 591  *              if (container != op->o_container) {
 592  *                      mutex_exit(&container->c_objects_lock);
 593  *                      container_rele(container);
 594  *                      return (KMEM_CBRC_DONT_KNOW);
 595  *              }
 596  *
 597  *              if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
 598  *                      mutex_exit(&container->c_objects_lock);
 599  *                      container_rele(container);
 600  *                      return (KMEM_CBRC_LATER);
 601  *              }
 602  *
 603  *              object_move_impl(op, np); // critical section
 604  *              rw_exit(OBJECT_RWLOCK(op));
 605  *
 606  *              op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
 607  *              list_link_replace(&op->o_link_node, &np->o_link_node);
 608  *              mutex_exit(&container->c_objects_lock);
 609  *              container_rele(container);
 610  *              return (KMEM_CBRC_YES);
 611  *      }
 612  *
 613  * Note that object_move() must invalidate the designated o_container pointer of
 614  * the old object in the same way that object_free() does, since kmem will free
 615  * the object in response to the KMEM_CBRC_YES return value.
 616  *
 617  * The lock order in object_move() differs from object_alloc(), which locks
 618  * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
 619  * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
 620  * not a problem. Holding the lock on the object list in the example above
 621  * through the entire callback not only prevents the object from going away, it
 622  * also allows you to lock the list elsewhere and know that none of its elements
 623  * will move during iteration.
 624  *
 625  * Adding an explicit hold everywhere an object from the cache is used is tricky
 626  * and involves much more change to client code than a cache-specific solution
 627  * that leverages existing state to decide whether or not an object is
 628  * movable. However, this approach has the advantage that no object remains
 629  * immovable for any significant length of time, making it extremely unlikely
 630  * that long-lived allocations can continue holding slabs hostage; and it works
 631  * for any cache.
 632  *
 633  * 3. Consolidator Implementation
 634  *
 635  * Once the client supplies a move function that a) recognizes known objects and
 636  * b) avoids moving objects that are actively in use, the remaining work is up
 637  * to the consolidator to decide which objects to move and when to issue
 638  * callbacks.
 639  *
 640  * The consolidator relies on the fact that a cache's slabs are ordered by
 641  * usage. Each slab has a fixed number of objects. Depending on the slab's
 642  * "color" (the offset of the first object from the beginning of the slab;
 643  * offsets are staggered to mitigate false sharing of cache lines) it is either
 644  * the maximum number of objects per slab determined at cache creation time or
 645  * else the number closest to the maximum that fits within the space remaining
 646  * after the initial offset. A completely allocated slab may contribute some
 647  * internal fragmentation (per-slab overhead) but no external fragmentation, so
 648  * it is of no interest to the consolidator. At the other extreme, slabs whose
 649  * objects have all been freed to the slab are released to the virtual memory
 650  * (VM) subsystem (objects freed to magazines are still allocated as far as the
 651  * slab is concerned). External fragmentation exists when there are slabs
 652  * somewhere between these extremes. A partial slab has at least one but not all
 653  * of its objects allocated. The more partial slabs, and the fewer allocated
 654  * objects on each of them, the higher the fragmentation. Hence the
 655  * consolidator's overall strategy is to reduce the number of partial slabs by
 656  * moving allocated objects from the least allocated slabs to the most allocated
 657  * slabs.
 658  *
 659  * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
 660  * slabs are kept separately in an unordered list. Since the majority of slabs
 661  * tend to be completely allocated (a typical unfragmented cache may have
 662  * thousands of complete slabs and only a single partial slab), separating
 663  * complete slabs improves the efficiency of partial slab ordering, since the
 664  * complete slabs do not affect the depth or balance of the AVL tree. This
 665  * ordered sequence of partial slabs acts as a "free list" supplying objects for
 666  * allocation requests.
 667  *
 668  * Objects are always allocated from the first partial slab in the free list,
 669  * where the allocation is most likely to eliminate a partial slab (by
 670  * completely allocating it). Conversely, when a single object from a completely
 671  * allocated slab is freed to the slab, that slab is added to the front of the
 672  * free list. Since most free list activity involves highly allocated slabs
 673  * coming and going at the front of the list, slabs tend naturally toward the
 674  * ideal order: highly allocated at the front, sparsely allocated at the back.
 675  * Slabs with few allocated objects are likely to become completely free if they
 676  * keep a safe distance away from the front of the free list. Slab misorders
 677  * interfere with the natural tendency of slabs to become completely free or
 678  * completely allocated. For example, a slab with a single allocated object
 679  * needs only a single free to escape the cache; its natural desire is
 680  * frustrated when it finds itself at the front of the list where a second
 681  * allocation happens just before the free could have released it. Another slab
 682  * with all but one object allocated might have supplied the buffer instead, so
 683  * that both (as opposed to neither) of the slabs would have been taken off the
 684  * free list.
 685  *
 686  * Although slabs tend naturally toward the ideal order, misorders allowed by a
 687  * simple list implementation defeat the consolidator's strategy of merging
 688  * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
 689  * needs another way to fix misorders to optimize its callback strategy. One
 690  * approach is to periodically scan a limited number of slabs, advancing a
 691  * marker to hold the current scan position, and to move extreme misorders to
 692  * the front or back of the free list and to the front or back of the current
 693  * scan range. By making consecutive scan ranges overlap by one slab, the least
 694  * allocated slab in the current range can be carried along from the end of one
 695  * scan to the start of the next.
 696  *
 697  * Maintaining partial slabs in an AVL tree relieves kmem of this additional
 698  * task, however. Since most of the cache's activity is in the magazine layer,
 699  * and allocations from the slab layer represent only a startup cost, the
 700  * overhead of maintaining a balanced tree is not a significant concern compared
 701  * to the opportunity of reducing complexity by eliminating the partial slab
 702  * scanner just described. The overhead of an AVL tree is minimized by
 703  * maintaining only partial slabs in the tree and keeping completely allocated
 704  * slabs separately in a list. To avoid increasing the size of the slab
 705  * structure the AVL linkage pointers are reused for the slab's list linkage,
 706  * since the slab will always be either partial or complete, never stored both
 707  * ways at the same time. To further minimize the overhead of the AVL tree the
 708  * compare function that orders partial slabs by usage divides the range of
 709  * allocated object counts into bins such that counts within the same bin are
 710  * considered equal. Binning partial slabs makes it less likely that allocating
 711  * or freeing a single object will change the slab's order, requiring a tree
 712  * reinsertion (an avl_remove() followed by an avl_add(), both potentially
 713  * requiring some rebalancing of the tree). Allocation counts closest to
 714  * completely free and completely allocated are left unbinned (finely sorted) to
 715  * better support the consolidator's strategy of merging slabs at either
 716  * extreme.
 717  *
 718  * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
 719  *
 720  * The consolidator piggybacks on the kmem maintenance thread and is called on
 721  * the same interval as kmem_cache_update(), once per cache every fifteen
 722  * seconds. kmem maintains a running count of unallocated objects in the slab
 723  * layer (cache_bufslab). The consolidator checks whether that number exceeds
 724  * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
 725  * there is a significant number of slabs in the cache (arbitrarily a minimum
 726  * 101 total slabs). Unused objects that have fallen out of the magazine layer's
 727  * working set are included in the assessment, and magazines in the depot are
 728  * reaped if those objects would lift cache_bufslab above the fragmentation
 729  * threshold. Once the consolidator decides that a cache is fragmented, it looks
 730  * for a candidate slab to reclaim, starting at the end of the partial slab free
 731  * list and scanning backwards. At first the consolidator is choosy: only a slab
 732  * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
 733  * single allocated object, regardless of percentage). If there is difficulty
 734  * finding a candidate slab, kmem raises the allocation threshold incrementally,
 735  * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
 736  * external fragmentation (unused objects on the free list) below 12.5% (1/8),
 737  * even in the worst case of every slab in the cache being almost 7/8 allocated.
 738  * The threshold can also be lowered incrementally when candidate slabs are easy
 739  * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
 740  * is no longer fragmented.
 741  *
 742  * 3.2 Generating Callbacks
 743  *
 744  * Once an eligible slab is chosen, a callback is generated for every allocated
 745  * object on the slab, in the hope that the client will move everything off the
 746  * slab and make it reclaimable. Objects selected as move destinations are
 747  * chosen from slabs at the front of the free list. Assuming slabs in the ideal
 748  * order (most allocated at the front, least allocated at the back) and a
 749  * cooperative client, the consolidator will succeed in removing slabs from both
 750  * ends of the free list, completely allocating on the one hand and completely
 751  * freeing on the other. Objects selected as move destinations are allocated in
 752  * the kmem maintenance thread where move requests are enqueued. A separate
 753  * callback thread removes pending callbacks from the queue and calls the
 754  * client. The separate thread ensures that client code (the move function) does
 755  * not interfere with internal kmem maintenance tasks. A map of pending
 756  * callbacks keyed by object address (the object to be moved) is checked to
 757  * ensure that duplicate callbacks are not generated for the same object.
 758  * Allocating the move destination (the object to move to) prevents subsequent
 759  * callbacks from selecting the same destination as an earlier pending callback.
 760  *
 761  * Move requests can also be generated by kmem_cache_reap() when the system is
 762  * desperate for memory and by kmem_cache_move_notify(), called by the client to
 763  * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
 764  * The map of pending callbacks is protected by the same lock that protects the
 765  * slab layer.
 766  *
 767  * When the system is desperate for memory, kmem does not bother to determine
 768  * whether or not the cache exceeds the fragmentation threshold, but tries to
 769  * consolidate as many slabs as possible. Normally, the consolidator chews
 770  * slowly, one sparsely allocated slab at a time during each maintenance
 771  * interval that the cache is fragmented. When desperate, the consolidator
 772  * starts at the last partial slab and enqueues callbacks for every allocated
 773  * object on every partial slab, working backwards until it reaches the first
 774  * partial slab. The first partial slab, meanwhile, advances in pace with the
 775  * consolidator as allocations to supply move destinations for the enqueued
 776  * callbacks use up the highly allocated slabs at the front of the free list.
 777  * Ideally, the overgrown free list collapses like an accordion, starting at
 778  * both ends and ending at the center with a single partial slab.
 779  *
 780  * 3.3 Client Responses
 781  *
 782  * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
 783  * marks the slab that supplied the stuck object non-reclaimable and moves it to
 784  * front of the free list. The slab remains marked as long as it remains on the
 785  * free list, and it appears more allocated to the partial slab compare function
 786  * than any unmarked slab, no matter how many of its objects are allocated.
 787  * Since even one immovable object ties up the entire slab, the goal is to
 788  * completely allocate any slab that cannot be completely freed. kmem does not
 789  * bother generating callbacks to move objects from a marked slab unless the
 790  * system is desperate.
 791  *
 792  * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
 793  * slab. If the client responds LATER too many times, kmem disbelieves and
 794  * treats the response as a NO. The count is cleared when the slab is taken off
 795  * the partial slab list or when the client moves one of the slab's objects.
 796  *
 797  * 4. Observability
 798  *
 799  * A kmem cache's external fragmentation is best observed with 'mdb -k' using
 800  * the ::kmem_slabs dcmd. For a complete description of the command, enter
 801  * '::help kmem_slabs' at the mdb prompt.
 802  */
 803 
 804 #include <sys/kmem_impl.h>
 805 #include <sys/vmem_impl.h>
 806 #include <sys/param.h>
 807 #include <sys/sysmacros.h>
 808 #include <sys/vm.h>
 809 #include <sys/proc.h>
 810 #include <sys/tuneable.h>
 811 #include <sys/systm.h>
 812 #include <sys/cmn_err.h>
 813 #include <sys/debug.h>
 814 #include <sys/sdt.h>
 815 #include <sys/mutex.h>
 816 #include <sys/bitmap.h>
 817 #include <sys/atomic.h>
 818 #include <sys/kobj.h>
 819 #include <sys/disp.h>
 820 #include <vm/seg_kmem.h>
 821 #include <sys/log.h>
 822 #include <sys/callb.h>
 823 #include <sys/taskq.h>
 824 #include <sys/modctl.h>
 825 #include <sys/reboot.h>
 826 #include <sys/id32.h>
 827 #include <sys/zone.h>
 828 #include <sys/netstack.h>
 829 #ifdef  DEBUG
 830 #include <sys/random.h>
 831 #endif
 832 
 833 extern void streams_msg_init(void);
 834 extern int segkp_fromheap;
 835 extern void segkp_cache_free(void);
 836 extern int callout_init_done;
 837 
 838 struct kmem_cache_kstat {
 839         kstat_named_t   kmc_buf_size;
 840         kstat_named_t   kmc_align;
 841         kstat_named_t   kmc_chunk_size;
 842         kstat_named_t   kmc_slab_size;
 843         kstat_named_t   kmc_alloc;
 844         kstat_named_t   kmc_alloc_fail;
 845         kstat_named_t   kmc_free;
 846         kstat_named_t   kmc_depot_alloc;
 847         kstat_named_t   kmc_depot_free;
 848         kstat_named_t   kmc_depot_contention;
 849         kstat_named_t   kmc_slab_alloc;
 850         kstat_named_t   kmc_slab_free;
 851         kstat_named_t   kmc_buf_constructed;
 852         kstat_named_t   kmc_buf_avail;
 853         kstat_named_t   kmc_buf_inuse;
 854         kstat_named_t   kmc_buf_total;
 855         kstat_named_t   kmc_buf_max;
 856         kstat_named_t   kmc_slab_create;
 857         kstat_named_t   kmc_slab_destroy;
 858         kstat_named_t   kmc_vmem_source;
 859         kstat_named_t   kmc_hash_size;
 860         kstat_named_t   kmc_hash_lookup_depth;
 861         kstat_named_t   kmc_hash_rescale;
 862         kstat_named_t   kmc_full_magazines;
 863         kstat_named_t   kmc_empty_magazines;
 864         kstat_named_t   kmc_magazine_size;
 865         kstat_named_t   kmc_reap; /* number of kmem_cache_reap() calls */
 866         kstat_named_t   kmc_defrag; /* attempts to defrag all partial slabs */
 867         kstat_named_t   kmc_scan; /* attempts to defrag one partial slab */
 868         kstat_named_t   kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
 869         kstat_named_t   kmc_move_yes;
 870         kstat_named_t   kmc_move_no;
 871         kstat_named_t   kmc_move_later;
 872         kstat_named_t   kmc_move_dont_need;
 873         kstat_named_t   kmc_move_dont_know; /* obj unrecognized by client ... */
 874         kstat_named_t   kmc_move_hunt_found; /* ... but found in mag layer */
 875         kstat_named_t   kmc_move_slabs_freed; /* slabs freed by consolidator */
 876         kstat_named_t   kmc_move_reclaimable; /* buffers, if consolidator ran */
 877 } kmem_cache_kstat = {
 878         { "buf_size",           KSTAT_DATA_UINT64 },
 879         { "align",              KSTAT_DATA_UINT64 },
 880         { "chunk_size",         KSTAT_DATA_UINT64 },
 881         { "slab_size",          KSTAT_DATA_UINT64 },
 882         { "alloc",              KSTAT_DATA_UINT64 },
 883         { "alloc_fail",         KSTAT_DATA_UINT64 },
 884         { "free",               KSTAT_DATA_UINT64 },
 885         { "depot_alloc",        KSTAT_DATA_UINT64 },
 886         { "depot_free",         KSTAT_DATA_UINT64 },
 887         { "depot_contention",   KSTAT_DATA_UINT64 },
 888         { "slab_alloc",         KSTAT_DATA_UINT64 },
 889         { "slab_free",          KSTAT_DATA_UINT64 },
 890         { "buf_constructed",    KSTAT_DATA_UINT64 },
 891         { "buf_avail",          KSTAT_DATA_UINT64 },
 892         { "buf_inuse",          KSTAT_DATA_UINT64 },
 893         { "buf_total",          KSTAT_DATA_UINT64 },
 894         { "buf_max",            KSTAT_DATA_UINT64 },
 895         { "slab_create",        KSTAT_DATA_UINT64 },
 896         { "slab_destroy",       KSTAT_DATA_UINT64 },
 897         { "vmem_source",        KSTAT_DATA_UINT64 },
 898         { "hash_size",          KSTAT_DATA_UINT64 },
 899         { "hash_lookup_depth",  KSTAT_DATA_UINT64 },
 900         { "hash_rescale",       KSTAT_DATA_UINT64 },
 901         { "full_magazines",     KSTAT_DATA_UINT64 },
 902         { "empty_magazines",    KSTAT_DATA_UINT64 },
 903         { "magazine_size",      KSTAT_DATA_UINT64 },
 904         { "reap",               KSTAT_DATA_UINT64 },
 905         { "defrag",             KSTAT_DATA_UINT64 },
 906         { "scan",               KSTAT_DATA_UINT64 },
 907         { "move_callbacks",     KSTAT_DATA_UINT64 },
 908         { "move_yes",           KSTAT_DATA_UINT64 },
 909         { "move_no",            KSTAT_DATA_UINT64 },
 910         { "move_later",         KSTAT_DATA_UINT64 },
 911         { "move_dont_need",     KSTAT_DATA_UINT64 },
 912         { "move_dont_know",     KSTAT_DATA_UINT64 },
 913         { "move_hunt_found",    KSTAT_DATA_UINT64 },
 914         { "move_slabs_freed",   KSTAT_DATA_UINT64 },
 915         { "move_reclaimable",   KSTAT_DATA_UINT64 },
 916 };
 917 
 918 static kmutex_t kmem_cache_kstat_lock;
 919 
 920 /*
 921  * The default set of caches to back kmem_alloc().
 922  * These sizes should be reevaluated periodically.
 923  *
 924  * We want allocations that are multiples of the coherency granularity
 925  * (64 bytes) to be satisfied from a cache which is a multiple of 64
 926  * bytes, so that it will be 64-byte aligned.  For all multiples of 64,
 927  * the next kmem_cache_size greater than or equal to it must be a
 928  * multiple of 64.
 929  *
 930  * We split the table into two sections:  size <= 4k and size > 4k.  This
 931  * saves a lot of space and cache footprint in our cache tables.
 932  */
 933 static const int kmem_alloc_sizes[] = {
 934         1 * 8,
 935         2 * 8,
 936         3 * 8,
 937         4 * 8,          5 * 8,          6 * 8,          7 * 8,
 938         4 * 16,         5 * 16,         6 * 16,         7 * 16,
 939         4 * 32,         5 * 32,         6 * 32,         7 * 32,
 940         4 * 64,         5 * 64,         6 * 64,         7 * 64,
 941         4 * 128,        5 * 128,        6 * 128,        7 * 128,
 942         P2ALIGN(8192 / 7, 64),
 943         P2ALIGN(8192 / 6, 64),
 944         P2ALIGN(8192 / 5, 64),
 945         P2ALIGN(8192 / 4, 64),
 946         P2ALIGN(8192 / 3, 64),
 947         P2ALIGN(8192 / 2, 64),
 948 };
 949 
 950 static const int kmem_big_alloc_sizes[] = {
 951         2 * 4096,       3 * 4096,
 952         2 * 8192,       3 * 8192,
 953         4 * 8192,       5 * 8192,       6 * 8192,       7 * 8192,
 954         8 * 8192,       9 * 8192,       10 * 8192,      11 * 8192,
 955         12 * 8192,      13 * 8192,      14 * 8192,      15 * 8192,
 956         16 * 8192
 957 };
 958 
 959 #define KMEM_MAXBUF             4096
 960 #define KMEM_BIG_MAXBUF_32BIT   32768
 961 #define KMEM_BIG_MAXBUF         131072
 962 
 963 #define KMEM_BIG_MULTIPLE       4096    /* big_alloc_sizes must be a multiple */
 964 #define KMEM_BIG_SHIFT          12      /* lg(KMEM_BIG_MULTIPLE) */
 965 
 966 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
 967 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
 968 
 969 #define KMEM_ALLOC_TABLE_MAX    (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
 970 static size_t kmem_big_alloc_table_max = 0;     /* # of filled elements */
 971 
 972 static kmem_magtype_t kmem_magtype[] = {
 973         { 1,    8,      3200,   65536   },
 974         { 3,    16,     256,    32768   },
 975         { 7,    32,     64,     16384   },
 976         { 15,   64,     0,      8192    },
 977         { 31,   64,     0,      4096    },
 978         { 47,   64,     0,      2048    },
 979         { 63,   64,     0,      1024    },
 980         { 95,   64,     0,      512     },
 981         { 143,  64,     0,      0       },
 982 };
 983 
 984 static uint32_t kmem_reaping;
 985 static uint32_t kmem_reaping_idspace;
 986 
 987 /*
 988  * kmem tunables
 989  */
 990 clock_t kmem_reap_interval;     /* cache reaping rate [15 * HZ ticks] */
 991 int kmem_depot_contention = 3;  /* max failed tryenters per real interval */
 992 pgcnt_t kmem_reapahead = 0;     /* start reaping N pages before pageout */
 993 int kmem_panic = 1;             /* whether to panic on error */
 994 int kmem_logging = 1;           /* kmem_log_enter() override */
 995 uint32_t kmem_mtbf = 0;         /* mean time between failures [default: off] */
 996 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
 997 size_t kmem_content_log_size;   /* content log size [2% of memory] */
 998 size_t kmem_failure_log_size;   /* failure log [4 pages per CPU] */
 999 size_t kmem_slab_log_size;      /* slab create log [4 pages per CPU] */
1000 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1001 size_t kmem_lite_minsize = 0;   /* minimum buffer size for KMF_LITE */
1002 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1003 int kmem_lite_pcs = 4;          /* number of PCs to store in KMF_LITE mode */
1004 size_t kmem_maxverify;          /* maximum bytes to inspect in debug routines */
1005 size_t kmem_minfirewall;        /* hardware-enforced redzone threshold */
1006 
1007 #ifdef _LP64
1008 size_t  kmem_max_cached = KMEM_BIG_MAXBUF;      /* maximum kmem_alloc cache */
1009 #else
1010 size_t  kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1011 #endif
1012 
1013 #ifdef DEBUG
1014 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1015 #else
1016 int kmem_flags = 0;
1017 #endif
1018 int kmem_ready;
1019 
1020 static kmem_cache_t     *kmem_slab_cache;
1021 static kmem_cache_t     *kmem_bufctl_cache;
1022 static kmem_cache_t     *kmem_bufctl_audit_cache;
1023 
1024 static kmutex_t         kmem_cache_lock;        /* inter-cache linkage only */
1025 static list_t           kmem_caches;
1026 
1027 static taskq_t          *kmem_taskq;
1028 static kmutex_t         kmem_flags_lock;
1029 static vmem_t           *kmem_metadata_arena;
1030 static vmem_t           *kmem_msb_arena;        /* arena for metadata caches */
1031 static vmem_t           *kmem_cache_arena;
1032 static vmem_t           *kmem_hash_arena;
1033 static vmem_t           *kmem_log_arena;
1034 static vmem_t           *kmem_oversize_arena;
1035 static vmem_t           *kmem_va_arena;
1036 static vmem_t           *kmem_default_arena;
1037 static vmem_t           *kmem_firewall_va_arena;
1038 static vmem_t           *kmem_firewall_arena;
1039 
1040 /*
1041  * Define KMEM_STATS to turn on statistic gathering. By default, it is only
1042  * turned on when DEBUG is also defined.
1043  */
1044 #ifdef  DEBUG
1045 #define KMEM_STATS
1046 #endif  /* DEBUG */
1047 
1048 #ifdef  KMEM_STATS
1049 #define KMEM_STAT_ADD(stat)                     ((stat)++)
1050 #define KMEM_STAT_COND_ADD(cond, stat)          ((void) (!(cond) || (stat)++))
1051 #else
1052 #define KMEM_STAT_ADD(stat)                     /* nothing */
1053 #define KMEM_STAT_COND_ADD(cond, stat)          /* nothing */
1054 #endif  /* KMEM_STATS */
1055 
1056 /*
1057  * kmem slab consolidator thresholds (tunables)
1058  */
1059 size_t kmem_frag_minslabs = 101;        /* minimum total slabs */
1060 size_t kmem_frag_numer = 1;             /* free buffers (numerator) */
1061 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1062 /*
1063  * Maximum number of slabs from which to move buffers during a single
1064  * maintenance interval while the system is not low on memory.
1065  */
1066 size_t kmem_reclaim_max_slabs = 1;
1067 /*
1068  * Number of slabs to scan backwards from the end of the partial slab list
1069  * when searching for buffers to relocate.
1070  */
1071 size_t kmem_reclaim_scan_range = 12;
1072 
1073 #ifdef  KMEM_STATS
1074 static struct {
1075         uint64_t kms_callbacks;
1076         uint64_t kms_yes;
1077         uint64_t kms_no;
1078         uint64_t kms_later;
1079         uint64_t kms_dont_need;
1080         uint64_t kms_dont_know;
1081         uint64_t kms_hunt_found_mag;
1082         uint64_t kms_hunt_found_slab;
1083         uint64_t kms_hunt_alloc_fail;
1084         uint64_t kms_hunt_lucky;
1085         uint64_t kms_notify;
1086         uint64_t kms_notify_callbacks;
1087         uint64_t kms_disbelief;
1088         uint64_t kms_already_pending;
1089         uint64_t kms_callback_alloc_fail;
1090         uint64_t kms_callback_taskq_fail;
1091         uint64_t kms_endscan_slab_dead;
1092         uint64_t kms_endscan_slab_destroyed;
1093         uint64_t kms_endscan_nomem;
1094         uint64_t kms_endscan_refcnt_changed;
1095         uint64_t kms_endscan_nomove_changed;
1096         uint64_t kms_endscan_freelist;
1097         uint64_t kms_avl_update;
1098         uint64_t kms_avl_noupdate;
1099         uint64_t kms_no_longer_reclaimable;
1100         uint64_t kms_notify_no_longer_reclaimable;
1101         uint64_t kms_notify_slab_dead;
1102         uint64_t kms_notify_slab_destroyed;
1103         uint64_t kms_alloc_fail;
1104         uint64_t kms_constructor_fail;
1105         uint64_t kms_dead_slabs_freed;
1106         uint64_t kms_defrags;
1107         uint64_t kms_scans;
1108         uint64_t kms_scan_depot_ws_reaps;
1109         uint64_t kms_debug_reaps;
1110         uint64_t kms_debug_scans;
1111 } kmem_move_stats;
1112 #endif  /* KMEM_STATS */
1113 
1114 /* consolidator knobs */
1115 static boolean_t kmem_move_noreap;
1116 static boolean_t kmem_move_blocked;
1117 static boolean_t kmem_move_fulltilt;
1118 static boolean_t kmem_move_any_partial;
1119 
1120 #ifdef  DEBUG
1121 /*
1122  * kmem consolidator debug tunables:
1123  * Ensure code coverage by occasionally running the consolidator even when the
1124  * caches are not fragmented (they may never be). These intervals are mean time
1125  * in cache maintenance intervals (kmem_cache_update).
1126  */
1127 uint32_t kmem_mtb_move = 60;    /* defrag 1 slab (~15min) */
1128 uint32_t kmem_mtb_reap = 1800;  /* defrag all slabs (~7.5hrs) */
1129 #endif  /* DEBUG */
1130 
1131 static kmem_cache_t     *kmem_defrag_cache;
1132 static kmem_cache_t     *kmem_move_cache;
1133 static taskq_t          *kmem_move_taskq;
1134 
1135 static void kmem_cache_scan(kmem_cache_t *);
1136 static void kmem_cache_defrag(kmem_cache_t *);
1137 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1138 
1139 
1140 kmem_log_header_t       *kmem_transaction_log;
1141 kmem_log_header_t       *kmem_content_log;
1142 kmem_log_header_t       *kmem_failure_log;
1143 kmem_log_header_t       *kmem_slab_log;
1144 
1145 static int              kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1146 
1147 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller)                       \
1148         if ((count) > 0) {                                           \
1149                 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
1150                 pc_t *_e;                                               \
1151                 /* memmove() the old entries down one notch */          \
1152                 for (_e = &_s[(count) - 1]; _e > _s; _e--)               \
1153                         *_e = *(_e - 1);                                \
1154                 *_s = (uintptr_t)(caller);                              \
1155         }
1156 
1157 #define KMERR_MODIFIED  0       /* buffer modified while on freelist */
1158 #define KMERR_REDZONE   1       /* redzone violation (write past end of buf) */
1159 #define KMERR_DUPFREE   2       /* freed a buffer twice */
1160 #define KMERR_BADADDR   3       /* freed a bad (unallocated) address */
1161 #define KMERR_BADBUFTAG 4       /* buftag corrupted */
1162 #define KMERR_BADBUFCTL 5       /* bufctl corrupted */
1163 #define KMERR_BADCACHE  6       /* freed a buffer to the wrong cache */
1164 #define KMERR_BADSIZE   7       /* alloc size != free size */
1165 #define KMERR_BADBASE   8       /* buffer base address wrong */
1166 
1167 struct {
1168         hrtime_t        kmp_timestamp;  /* timestamp of panic */
1169         int             kmp_error;      /* type of kmem error */
1170         void            *kmp_buffer;    /* buffer that induced panic */
1171         void            *kmp_realbuf;   /* real start address for buffer */
1172         kmem_cache_t    *kmp_cache;     /* buffer's cache according to client */
1173         kmem_cache_t    *kmp_realcache; /* actual cache containing buffer */
1174         kmem_slab_t     *kmp_slab;      /* slab accoring to kmem_findslab() */
1175         kmem_bufctl_t   *kmp_bufctl;    /* bufctl */
1176 } kmem_panic_info;
1177 
1178 
1179 static void
1180 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1181 {
1182         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1183         uint64_t *buf = buf_arg;
1184 
1185         while (buf < bufend)
1186                 *buf++ = pattern;
1187 }
1188 
1189 static void *
1190 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1191 {
1192         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1193         uint64_t *buf;
1194 
1195         for (buf = buf_arg; buf < bufend; buf++)
1196                 if (*buf != pattern)
1197                         return (buf);
1198         return (NULL);
1199 }
1200 
1201 static void *
1202 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1203 {
1204         uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1205         uint64_t *buf;
1206 
1207         for (buf = buf_arg; buf < bufend; buf++) {
1208                 if (*buf != old) {
1209                         copy_pattern(old, buf_arg,
1210                             (char *)buf - (char *)buf_arg);
1211                         return (buf);
1212                 }
1213                 *buf = new;
1214         }
1215 
1216         return (NULL);
1217 }
1218 
1219 static void
1220 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1221 {
1222         kmem_cache_t *cp;
1223 
1224         mutex_enter(&kmem_cache_lock);
1225         for (cp = list_head(&kmem_caches); cp != NULL;
1226             cp = list_next(&kmem_caches, cp))
1227                 if (tq != NULL)
1228                         (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1229                             tqflag);
1230                 else
1231                         func(cp);
1232         mutex_exit(&kmem_cache_lock);
1233 }
1234 
1235 static void
1236 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1237 {
1238         kmem_cache_t *cp;
1239 
1240         mutex_enter(&kmem_cache_lock);
1241         for (cp = list_head(&kmem_caches); cp != NULL;
1242             cp = list_next(&kmem_caches, cp)) {
1243                 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1244                         continue;
1245                 if (tq != NULL)
1246                         (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1247                             tqflag);
1248                 else
1249                         func(cp);
1250         }
1251         mutex_exit(&kmem_cache_lock);
1252 }
1253 
1254 /*
1255  * Debugging support.  Given a buffer address, find its slab.
1256  */
1257 static kmem_slab_t *
1258 kmem_findslab(kmem_cache_t *cp, void *buf)
1259 {
1260         kmem_slab_t *sp;
1261 
1262         mutex_enter(&cp->cache_lock);
1263         for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1264             sp = list_next(&cp->cache_complete_slabs, sp)) {
1265                 if (KMEM_SLAB_MEMBER(sp, buf)) {
1266                         mutex_exit(&cp->cache_lock);
1267                         return (sp);
1268                 }
1269         }
1270         for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1271             sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1272                 if (KMEM_SLAB_MEMBER(sp, buf)) {
1273                         mutex_exit(&cp->cache_lock);
1274                         return (sp);
1275                 }
1276         }
1277         mutex_exit(&cp->cache_lock);
1278 
1279         return (NULL);
1280 }
1281 
1282 static void
1283 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1284 {
1285         kmem_buftag_t *btp = NULL;
1286         kmem_bufctl_t *bcp = NULL;
1287         kmem_cache_t *cp = cparg;
1288         kmem_slab_t *sp;
1289         uint64_t *off;
1290         void *buf = bufarg;
1291 
1292         kmem_logging = 0;       /* stop logging when a bad thing happens */
1293 
1294         kmem_panic_info.kmp_timestamp = gethrtime();
1295 
1296         sp = kmem_findslab(cp, buf);
1297         if (sp == NULL) {
1298                 for (cp = list_tail(&kmem_caches); cp != NULL;
1299                     cp = list_prev(&kmem_caches, cp)) {
1300                         if ((sp = kmem_findslab(cp, buf)) != NULL)
1301                                 break;
1302                 }
1303         }
1304 
1305         if (sp == NULL) {
1306                 cp = NULL;
1307                 error = KMERR_BADADDR;
1308         } else {
1309                 if (cp != cparg)
1310                         error = KMERR_BADCACHE;
1311                 else
1312                         buf = (char *)bufarg - ((uintptr_t)bufarg -
1313                             (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1314                 if (buf != bufarg)
1315                         error = KMERR_BADBASE;
1316                 if (cp->cache_flags & KMF_BUFTAG)
1317                         btp = KMEM_BUFTAG(cp, buf);
1318                 if (cp->cache_flags & KMF_HASH) {
1319                         mutex_enter(&cp->cache_lock);
1320                         for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1321                                 if (bcp->bc_addr == buf)
1322                                         break;
1323                         mutex_exit(&cp->cache_lock);
1324                         if (bcp == NULL && btp != NULL)
1325                                 bcp = btp->bt_bufctl;
1326                         if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1327                             NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1328                             bcp->bc_addr != buf) {
1329                                 error = KMERR_BADBUFCTL;
1330                                 bcp = NULL;
1331                         }
1332                 }
1333         }
1334 
1335         kmem_panic_info.kmp_error = error;
1336         kmem_panic_info.kmp_buffer = bufarg;
1337         kmem_panic_info.kmp_realbuf = buf;
1338         kmem_panic_info.kmp_cache = cparg;
1339         kmem_panic_info.kmp_realcache = cp;
1340         kmem_panic_info.kmp_slab = sp;
1341         kmem_panic_info.kmp_bufctl = bcp;
1342 
1343         printf("kernel memory allocator: ");
1344 
1345         switch (error) {
1346 
1347         case KMERR_MODIFIED:
1348                 printf("buffer modified after being freed\n");
1349                 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1350                 if (off == NULL)        /* shouldn't happen */
1351                         off = buf;
1352                 printf("modification occurred at offset 0x%lx "
1353                     "(0x%llx replaced by 0x%llx)\n",
1354                     (uintptr_t)off - (uintptr_t)buf,
1355                     (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1356                 break;
1357 
1358         case KMERR_REDZONE:
1359                 printf("redzone violation: write past end of buffer\n");
1360                 break;
1361 
1362         case KMERR_BADADDR:
1363                 printf("invalid free: buffer not in cache\n");
1364                 break;
1365 
1366         case KMERR_DUPFREE:
1367                 printf("duplicate free: buffer freed twice\n");
1368                 break;
1369 
1370         case KMERR_BADBUFTAG:
1371                 printf("boundary tag corrupted\n");
1372                 printf("bcp ^ bxstat = %lx, should be %lx\n",
1373                     (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1374                     KMEM_BUFTAG_FREE);
1375                 break;
1376 
1377         case KMERR_BADBUFCTL:
1378                 printf("bufctl corrupted\n");
1379                 break;
1380 
1381         case KMERR_BADCACHE:
1382                 printf("buffer freed to wrong cache\n");
1383                 printf("buffer was allocated from %s,\n", cp->cache_name);
1384                 printf("caller attempting free to %s.\n", cparg->cache_name);
1385                 break;
1386 
1387         case KMERR_BADSIZE:
1388                 printf("bad free: free size (%u) != alloc size (%u)\n",
1389                     KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1390                     KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1391                 break;
1392 
1393         case KMERR_BADBASE:
1394                 printf("bad free: free address (%p) != alloc address (%p)\n",
1395                     bufarg, buf);
1396                 break;
1397         }
1398 
1399         printf("buffer=%p  bufctl=%p  cache: %s\n",
1400             bufarg, (void *)bcp, cparg->cache_name);
1401 
1402         if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1403             error != KMERR_BADBUFCTL) {
1404                 int d;
1405                 timestruc_t ts;
1406                 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1407 
1408                 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1409                 printf("previous transaction on buffer %p:\n", buf);
1410                 printf("thread=%p  time=T-%ld.%09ld  slab=%p  cache: %s\n",
1411                     (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1412                     (void *)sp, cp->cache_name);
1413                 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1414                         ulong_t off;
1415                         char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1416                         printf("%s+%lx\n", sym ? sym : "?", off);
1417                 }
1418         }
1419         if (kmem_panic > 0)
1420                 panic("kernel heap corruption detected");
1421         if (kmem_panic == 0)
1422                 debug_enter(NULL);
1423         kmem_logging = 1;       /* resume logging */
1424 }
1425 
1426 static kmem_log_header_t *
1427 kmem_log_init(size_t logsize)
1428 {
1429         kmem_log_header_t *lhp;
1430         int nchunks = 4 * max_ncpus;
1431         size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1432         int i;
1433 
1434         /*
1435          * Make sure that lhp->lh_cpu[] is nicely aligned
1436          * to prevent false sharing of cache lines.
1437          */
1438         lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1439         lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1440             NULL, NULL, VM_SLEEP);
1441         bzero(lhp, lhsize);
1442 
1443         mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1444         lhp->lh_nchunks = nchunks;
1445         lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1446         lhp->lh_base = vmem_alloc(kmem_log_arena,
1447             lhp->lh_chunksize * nchunks, VM_SLEEP);
1448         lhp->lh_free = vmem_alloc(kmem_log_arena,
1449             nchunks * sizeof (int), VM_SLEEP);
1450         bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1451 
1452         for (i = 0; i < max_ncpus; i++) {
1453                 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1454                 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1455                 clhp->clh_chunk = i;
1456         }
1457 
1458         for (i = max_ncpus; i < nchunks; i++)
1459                 lhp->lh_free[i] = i;
1460 
1461         lhp->lh_head = max_ncpus;
1462         lhp->lh_tail = 0;
1463 
1464         return (lhp);
1465 }
1466 
1467 static void *
1468 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1469 {
1470         void *logspace;
1471         kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1472 
1473         if (lhp == NULL || kmem_logging == 0 || panicstr)
1474                 return (NULL);
1475 
1476         mutex_enter(&clhp->clh_lock);
1477         clhp->clh_hits++;
1478         if (size > clhp->clh_avail) {
1479                 mutex_enter(&lhp->lh_lock);
1480                 lhp->lh_hits++;
1481                 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1482                 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1483                 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1484                 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1485                 clhp->clh_current = lhp->lh_base +
1486                     clhp->clh_chunk * lhp->lh_chunksize;
1487                 clhp->clh_avail = lhp->lh_chunksize;
1488                 if (size > lhp->lh_chunksize)
1489                         size = lhp->lh_chunksize;
1490                 mutex_exit(&lhp->lh_lock);
1491         }
1492         logspace = clhp->clh_current;
1493         clhp->clh_current += size;
1494         clhp->clh_avail -= size;
1495         bcopy(data, logspace, size);
1496         mutex_exit(&clhp->clh_lock);
1497         return (logspace);
1498 }
1499 
1500 #define KMEM_AUDIT(lp, cp, bcp)                                         \
1501 {                                                                       \
1502         kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp);       \
1503         _bcp->bc_timestamp = gethrtime();                            \
1504         _bcp->bc_thread = curthread;                                 \
1505         _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH);    \
1506         _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp));       \
1507 }
1508 
1509 static void
1510 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1511         kmem_slab_t *sp, void *addr)
1512 {
1513         kmem_bufctl_audit_t bca;
1514 
1515         bzero(&bca, sizeof (kmem_bufctl_audit_t));
1516         bca.bc_addr = addr;
1517         bca.bc_slab = sp;
1518         bca.bc_cache = cp;
1519         KMEM_AUDIT(lp, cp, &bca);
1520 }
1521 
1522 /*
1523  * Create a new slab for cache cp.
1524  */
1525 static kmem_slab_t *
1526 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1527 {
1528         size_t slabsize = cp->cache_slabsize;
1529         size_t chunksize = cp->cache_chunksize;
1530         int cache_flags = cp->cache_flags;
1531         size_t color, chunks;
1532         char *buf, *slab;
1533         kmem_slab_t *sp;
1534         kmem_bufctl_t *bcp;
1535         vmem_t *vmp = cp->cache_arena;
1536 
1537         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1538 
1539         color = cp->cache_color + cp->cache_align;
1540         if (color > cp->cache_maxcolor)
1541                 color = cp->cache_mincolor;
1542         cp->cache_color = color;
1543 
1544         slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1545 
1546         if (slab == NULL)
1547                 goto vmem_alloc_failure;
1548 
1549         ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1550 
1551         /*
1552          * Reverify what was already checked in kmem_cache_set_move(), since the
1553          * consolidator depends (for correctness) on slabs being initialized
1554          * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1555          * clients to distinguish uninitialized memory from known objects).
1556          */
1557         ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1558         if (!(cp->cache_cflags & KMC_NOTOUCH))
1559                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1560 
1561         if (cache_flags & KMF_HASH) {
1562                 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1563                         goto slab_alloc_failure;
1564                 chunks = (slabsize - color) / chunksize;
1565         } else {
1566                 sp = KMEM_SLAB(cp, slab);
1567                 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1568         }
1569 
1570         sp->slab_cache       = cp;
1571         sp->slab_head        = NULL;
1572         sp->slab_refcnt      = 0;
1573         sp->slab_base        = buf = slab + color;
1574         sp->slab_chunks      = chunks;
1575         sp->slab_stuck_offset = (uint32_t)-1;
1576         sp->slab_later_count = 0;
1577         sp->slab_flags = 0;
1578 
1579         ASSERT(chunks > 0);
1580         while (chunks-- != 0) {
1581                 if (cache_flags & KMF_HASH) {
1582                         bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1583                         if (bcp == NULL)
1584                                 goto bufctl_alloc_failure;
1585                         if (cache_flags & KMF_AUDIT) {
1586                                 kmem_bufctl_audit_t *bcap =
1587                                     (kmem_bufctl_audit_t *)bcp;
1588                                 bzero(bcap, sizeof (kmem_bufctl_audit_t));
1589                                 bcap->bc_cache = cp;
1590                         }
1591                         bcp->bc_addr = buf;
1592                         bcp->bc_slab = sp;
1593                 } else {
1594                         bcp = KMEM_BUFCTL(cp, buf);
1595                 }
1596                 if (cache_flags & KMF_BUFTAG) {
1597                         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1598                         btp->bt_redzone = KMEM_REDZONE_PATTERN;
1599                         btp->bt_bufctl = bcp;
1600                         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1601                         if (cache_flags & KMF_DEADBEEF) {
1602                                 copy_pattern(KMEM_FREE_PATTERN, buf,
1603                                     cp->cache_verify);
1604                         }
1605                 }
1606                 bcp->bc_next = sp->slab_head;
1607                 sp->slab_head = bcp;
1608                 buf += chunksize;
1609         }
1610 
1611         kmem_log_event(kmem_slab_log, cp, sp, slab);
1612 
1613         return (sp);
1614 
1615 bufctl_alloc_failure:
1616 
1617         while ((bcp = sp->slab_head) != NULL) {
1618                 sp->slab_head = bcp->bc_next;
1619                 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1620         }
1621         kmem_cache_free(kmem_slab_cache, sp);
1622 
1623 slab_alloc_failure:
1624 
1625         vmem_free(vmp, slab, slabsize);
1626 
1627 vmem_alloc_failure:
1628 
1629         kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1630         atomic_inc_64(&cp->cache_alloc_fail);
1631 
1632         return (NULL);
1633 }
1634 
1635 /*
1636  * Destroy a slab.
1637  */
1638 static void
1639 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1640 {
1641         vmem_t *vmp = cp->cache_arena;
1642         void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1643 
1644         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1645         ASSERT(sp->slab_refcnt == 0);
1646 
1647         if (cp->cache_flags & KMF_HASH) {
1648                 kmem_bufctl_t *bcp;
1649                 while ((bcp = sp->slab_head) != NULL) {
1650                         sp->slab_head = bcp->bc_next;
1651                         kmem_cache_free(cp->cache_bufctl_cache, bcp);
1652                 }
1653                 kmem_cache_free(kmem_slab_cache, sp);
1654         }
1655         vmem_free(vmp, slab, cp->cache_slabsize);
1656 }
1657 
1658 static void *
1659 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1660 {
1661         kmem_bufctl_t *bcp, **hash_bucket;
1662         void *buf;
1663         boolean_t new_slab = (sp->slab_refcnt == 0);
1664 
1665         ASSERT(MUTEX_HELD(&cp->cache_lock));
1666         /*
1667          * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1668          * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1669          * slab is newly created.
1670          */
1671         ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1672             (sp == avl_first(&cp->cache_partial_slabs))));
1673         ASSERT(sp->slab_cache == cp);
1674 
1675         cp->cache_slab_alloc++;
1676         cp->cache_bufslab--;
1677         sp->slab_refcnt++;
1678 
1679         bcp = sp->slab_head;
1680         sp->slab_head = bcp->bc_next;
1681 
1682         if (cp->cache_flags & KMF_HASH) {
1683                 /*
1684                  * Add buffer to allocated-address hash table.
1685                  */
1686                 buf = bcp->bc_addr;
1687                 hash_bucket = KMEM_HASH(cp, buf);
1688                 bcp->bc_next = *hash_bucket;
1689                 *hash_bucket = bcp;
1690                 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1691                         KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1692                 }
1693         } else {
1694                 buf = KMEM_BUF(cp, bcp);
1695         }
1696 
1697         ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1698 
1699         if (sp->slab_head == NULL) {
1700                 ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1701                 if (new_slab) {
1702                         ASSERT(sp->slab_chunks == 1);
1703                 } else {
1704                         ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1705                         avl_remove(&cp->cache_partial_slabs, sp);
1706                         sp->slab_later_count = 0; /* clear history */
1707                         sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1708                         sp->slab_stuck_offset = (uint32_t)-1;
1709                 }
1710                 list_insert_head(&cp->cache_complete_slabs, sp);
1711                 cp->cache_complete_slab_count++;
1712                 return (buf);
1713         }
1714 
1715         ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1716         /*
1717          * Peek to see if the magazine layer is enabled before
1718          * we prefill.  We're not holding the cpu cache lock,
1719          * so the peek could be wrong, but there's no harm in it.
1720          */
1721         if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1722             (KMEM_CPU_CACHE(cp)->cc_magsize != 0))  {
1723                 kmem_slab_prefill(cp, sp);
1724                 return (buf);
1725         }
1726 
1727         if (new_slab) {
1728                 avl_add(&cp->cache_partial_slabs, sp);
1729                 return (buf);
1730         }
1731 
1732         /*
1733          * The slab is now more allocated than it was, so the
1734          * order remains unchanged.
1735          */
1736         ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1737         return (buf);
1738 }
1739 
1740 /*
1741  * Allocate a raw (unconstructed) buffer from cp's slab layer.
1742  */
1743 static void *
1744 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1745 {
1746         kmem_slab_t *sp;
1747         void *buf;
1748         boolean_t test_destructor;
1749 
1750         mutex_enter(&cp->cache_lock);
1751         test_destructor = (cp->cache_slab_alloc == 0);
1752         sp = avl_first(&cp->cache_partial_slabs);
1753         if (sp == NULL) {
1754                 ASSERT(cp->cache_bufslab == 0);
1755 
1756                 /*
1757                  * The freelist is empty.  Create a new slab.
1758                  */
1759                 mutex_exit(&cp->cache_lock);
1760                 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1761                         return (NULL);
1762                 }
1763                 mutex_enter(&cp->cache_lock);
1764                 cp->cache_slab_create++;
1765                 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1766                         cp->cache_bufmax = cp->cache_buftotal;
1767                 cp->cache_bufslab += sp->slab_chunks;
1768         }
1769 
1770         buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1771         ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1772             (cp->cache_complete_slab_count +
1773             avl_numnodes(&cp->cache_partial_slabs) +
1774             (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1775         mutex_exit(&cp->cache_lock);
1776 
1777         if (test_destructor && cp->cache_destructor != NULL) {
1778                 /*
1779                  * On the first kmem_slab_alloc(), assert that it is valid to
1780                  * call the destructor on a newly constructed object without any
1781                  * client involvement.
1782                  */
1783                 if ((cp->cache_constructor == NULL) ||
1784                     cp->cache_constructor(buf, cp->cache_private,
1785                     kmflag) == 0) {
1786                         cp->cache_destructor(buf, cp->cache_private);
1787                 }
1788                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1789                     cp->cache_bufsize);
1790                 if (cp->cache_flags & KMF_DEADBEEF) {
1791                         copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1792                 }
1793         }
1794 
1795         return (buf);
1796 }
1797 
1798 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1799 
1800 /*
1801  * Free a raw (unconstructed) buffer to cp's slab layer.
1802  */
1803 static void
1804 kmem_slab_free(kmem_cache_t *cp, void *buf)
1805 {
1806         kmem_slab_t *sp;
1807         kmem_bufctl_t *bcp, **prev_bcpp;
1808 
1809         ASSERT(buf != NULL);
1810 
1811         mutex_enter(&cp->cache_lock);
1812         cp->cache_slab_free++;
1813 
1814         if (cp->cache_flags & KMF_HASH) {
1815                 /*
1816                  * Look up buffer in allocated-address hash table.
1817                  */
1818                 prev_bcpp = KMEM_HASH(cp, buf);
1819                 while ((bcp = *prev_bcpp) != NULL) {
1820                         if (bcp->bc_addr == buf) {
1821                                 *prev_bcpp = bcp->bc_next;
1822                                 sp = bcp->bc_slab;
1823                                 break;
1824                         }
1825                         cp->cache_lookup_depth++;
1826                         prev_bcpp = &bcp->bc_next;
1827                 }
1828         } else {
1829                 bcp = KMEM_BUFCTL(cp, buf);
1830                 sp = KMEM_SLAB(cp, buf);
1831         }
1832 
1833         if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1834                 mutex_exit(&cp->cache_lock);
1835                 kmem_error(KMERR_BADADDR, cp, buf);
1836                 return;
1837         }
1838 
1839         if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1840                 /*
1841                  * If this is the buffer that prevented the consolidator from
1842                  * clearing the slab, we can reset the slab flags now that the
1843                  * buffer is freed. (It makes sense to do this in
1844                  * kmem_cache_free(), where the client gives up ownership of the
1845                  * buffer, but on the hot path the test is too expensive.)
1846                  */
1847                 kmem_slab_move_yes(cp, sp, buf);
1848         }
1849 
1850         if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1851                 if (cp->cache_flags & KMF_CONTENTS)
1852                         ((kmem_bufctl_audit_t *)bcp)->bc_contents =
1853                             kmem_log_enter(kmem_content_log, buf,
1854                             cp->cache_contents);
1855                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1856         }
1857 
1858         bcp->bc_next = sp->slab_head;
1859         sp->slab_head = bcp;
1860 
1861         cp->cache_bufslab++;
1862         ASSERT(sp->slab_refcnt >= 1);
1863 
1864         if (--sp->slab_refcnt == 0) {
1865                 /*
1866                  * There are no outstanding allocations from this slab,
1867                  * so we can reclaim the memory.
1868                  */
1869                 if (sp->slab_chunks == 1) {
1870                         list_remove(&cp->cache_complete_slabs, sp);
1871                         cp->cache_complete_slab_count--;
1872                 } else {
1873                         avl_remove(&cp->cache_partial_slabs, sp);
1874                 }
1875 
1876                 cp->cache_buftotal -= sp->slab_chunks;
1877                 cp->cache_bufslab -= sp->slab_chunks;
1878                 /*
1879                  * Defer releasing the slab to the virtual memory subsystem
1880                  * while there is a pending move callback, since we guarantee
1881                  * that buffers passed to the move callback have only been
1882                  * touched by kmem or by the client itself. Since the memory
1883                  * patterns baddcafe (uninitialized) and deadbeef (freed) both
1884                  * set at least one of the two lowest order bits, the client can
1885                  * test those bits in the move callback to determine whether or
1886                  * not it knows about the buffer (assuming that the client also
1887                  * sets one of those low order bits whenever it frees a buffer).
1888                  */
1889                 if (cp->cache_defrag == NULL ||
1890                     (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1891                     !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1892                         cp->cache_slab_destroy++;
1893                         mutex_exit(&cp->cache_lock);
1894                         kmem_slab_destroy(cp, sp);
1895                 } else {
1896                         list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1897                         /*
1898                          * Slabs are inserted at both ends of the deadlist to
1899                          * distinguish between slabs freed while move callbacks
1900                          * are pending (list head) and a slab freed while the
1901                          * lock is dropped in kmem_move_buffers() (list tail) so
1902                          * that in both cases slab_destroy() is called from the
1903                          * right context.
1904                          */
1905                         if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1906                                 list_insert_tail(deadlist, sp);
1907                         } else {
1908                                 list_insert_head(deadlist, sp);
1909                         }
1910                         cp->cache_defrag->kmd_deadcount++;
1911                         mutex_exit(&cp->cache_lock);
1912                 }
1913                 return;
1914         }
1915 
1916         if (bcp->bc_next == NULL) {
1917                 /* Transition the slab from completely allocated to partial. */
1918                 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1919                 ASSERT(sp->slab_chunks > 1);
1920                 list_remove(&cp->cache_complete_slabs, sp);
1921                 cp->cache_complete_slab_count--;
1922                 avl_add(&cp->cache_partial_slabs, sp);
1923         } else {
1924 #ifdef  DEBUG
1925                 if (avl_update_gt(&cp->cache_partial_slabs, sp)) {
1926                         KMEM_STAT_ADD(kmem_move_stats.kms_avl_update);
1927                 } else {
1928                         KMEM_STAT_ADD(kmem_move_stats.kms_avl_noupdate);
1929                 }
1930 #else
1931                 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1932 #endif
1933         }
1934 
1935         ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1936             (cp->cache_complete_slab_count +
1937             avl_numnodes(&cp->cache_partial_slabs) +
1938             (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1939         mutex_exit(&cp->cache_lock);
1940 }
1941 
1942 /*
1943  * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1944  */
1945 static int
1946 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1947     caddr_t caller)
1948 {
1949         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1950         kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1951         uint32_t mtbf;
1952 
1953         if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1954                 kmem_error(KMERR_BADBUFTAG, cp, buf);
1955                 return (-1);
1956         }
1957 
1958         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1959 
1960         if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1961                 kmem_error(KMERR_BADBUFCTL, cp, buf);
1962                 return (-1);
1963         }
1964 
1965         if (cp->cache_flags & KMF_DEADBEEF) {
1966                 if (!construct && (cp->cache_flags & KMF_LITE)) {
1967                         if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1968                                 kmem_error(KMERR_MODIFIED, cp, buf);
1969                                 return (-1);
1970                         }
1971                         if (cp->cache_constructor != NULL)
1972                                 *(uint64_t *)buf = btp->bt_redzone;
1973                         else
1974                                 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1975                 } else {
1976                         construct = 1;
1977                         if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1978                             KMEM_UNINITIALIZED_PATTERN, buf,
1979                             cp->cache_verify)) {
1980                                 kmem_error(KMERR_MODIFIED, cp, buf);
1981                                 return (-1);
1982                         }
1983                 }
1984         }
1985         btp->bt_redzone = KMEM_REDZONE_PATTERN;
1986 
1987         if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1988             gethrtime() % mtbf == 0 &&
1989             (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1990                 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1991                 if (!construct && cp->cache_destructor != NULL)
1992                         cp->cache_destructor(buf, cp->cache_private);
1993         } else {
1994                 mtbf = 0;
1995         }
1996 
1997         if (mtbf || (construct && cp->cache_constructor != NULL &&
1998             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1999                 atomic_inc_64(&cp->cache_alloc_fail);
2000                 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2001                 if (cp->cache_flags & KMF_DEADBEEF)
2002                         copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2003                 kmem_slab_free(cp, buf);
2004                 return (1);
2005         }
2006 
2007         if (cp->cache_flags & KMF_AUDIT) {
2008                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2009         }
2010 
2011         if ((cp->cache_flags & KMF_LITE) &&
2012             !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2013                 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2014         }
2015 
2016         return (0);
2017 }
2018 
2019 static int
2020 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
2021 {
2022         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2023         kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
2024         kmem_slab_t *sp;
2025 
2026         if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
2027                 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
2028                         kmem_error(KMERR_DUPFREE, cp, buf);
2029                         return (-1);
2030                 }
2031                 sp = kmem_findslab(cp, buf);
2032                 if (sp == NULL || sp->slab_cache != cp)
2033                         kmem_error(KMERR_BADADDR, cp, buf);
2034                 else
2035                         kmem_error(KMERR_REDZONE, cp, buf);
2036                 return (-1);
2037         }
2038 
2039         btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2040 
2041         if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2042                 kmem_error(KMERR_BADBUFCTL, cp, buf);
2043                 return (-1);
2044         }
2045 
2046         if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
2047                 kmem_error(KMERR_REDZONE, cp, buf);
2048                 return (-1);
2049         }
2050 
2051         if (cp->cache_flags & KMF_AUDIT) {
2052                 if (cp->cache_flags & KMF_CONTENTS)
2053                         bcp->bc_contents = kmem_log_enter(kmem_content_log,
2054                             buf, cp->cache_contents);
2055                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2056         }
2057 
2058         if ((cp->cache_flags & KMF_LITE) &&
2059             !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2060                 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2061         }
2062 
2063         if (cp->cache_flags & KMF_DEADBEEF) {
2064                 if (cp->cache_flags & KMF_LITE)
2065                         btp->bt_redzone = *(uint64_t *)buf;
2066                 else if (cp->cache_destructor != NULL)
2067                         cp->cache_destructor(buf, cp->cache_private);
2068 
2069                 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2070         }
2071 
2072         return (0);
2073 }
2074 
2075 /*
2076  * Free each object in magazine mp to cp's slab layer, and free mp itself.
2077  */
2078 static void
2079 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2080 {
2081         int round;
2082 
2083         ASSERT(!list_link_active(&cp->cache_link) ||
2084             taskq_member(kmem_taskq, curthread));
2085 
2086         for (round = 0; round < nrounds; round++) {
2087                 void *buf = mp->mag_round[round];
2088 
2089                 if (cp->cache_flags & KMF_DEADBEEF) {
2090                         if (verify_pattern(KMEM_FREE_PATTERN, buf,
2091                             cp->cache_verify) != NULL) {
2092                                 kmem_error(KMERR_MODIFIED, cp, buf);
2093                                 continue;
2094                         }
2095                         if ((cp->cache_flags & KMF_LITE) &&
2096                             cp->cache_destructor != NULL) {
2097                                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2098                                 *(uint64_t *)buf = btp->bt_redzone;
2099                                 cp->cache_destructor(buf, cp->cache_private);
2100                                 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2101                         }
2102                 } else if (cp->cache_destructor != NULL) {
2103                         cp->cache_destructor(buf, cp->cache_private);
2104                 }
2105 
2106                 kmem_slab_free(cp, buf);
2107         }
2108         ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2109         kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2110 }
2111 
2112 /*
2113  * Allocate a magazine from the depot.
2114  */
2115 static kmem_magazine_t *
2116 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2117 {
2118         kmem_magazine_t *mp;
2119 
2120         /*
2121          * If we can't get the depot lock without contention,
2122          * update our contention count.  We use the depot
2123          * contention rate to determine whether we need to
2124          * increase the magazine size for better scalability.
2125          */
2126         if (!mutex_tryenter(&cp->cache_depot_lock)) {
2127                 mutex_enter(&cp->cache_depot_lock);
2128                 cp->cache_depot_contention++;
2129         }
2130 
2131         if ((mp = mlp->ml_list) != NULL) {
2132                 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2133                 mlp->ml_list = mp->mag_next;
2134                 if (--mlp->ml_total < mlp->ml_min)
2135                         mlp->ml_min = mlp->ml_total;
2136                 mlp->ml_alloc++;
2137         }
2138 
2139         mutex_exit(&cp->cache_depot_lock);
2140 
2141         return (mp);
2142 }
2143 
2144 /*
2145  * Free a magazine to the depot.
2146  */
2147 static void
2148 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2149 {
2150         mutex_enter(&cp->cache_depot_lock);
2151         ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2152         mp->mag_next = mlp->ml_list;
2153         mlp->ml_list = mp;
2154         mlp->ml_total++;
2155         mutex_exit(&cp->cache_depot_lock);
2156 }
2157 
2158 /*
2159  * Update the working set statistics for cp's depot.
2160  */
2161 static void
2162 kmem_depot_ws_update(kmem_cache_t *cp)
2163 {
2164         mutex_enter(&cp->cache_depot_lock);
2165         cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2166         cp->cache_full.ml_min = cp->cache_full.ml_total;
2167         cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2168         cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2169         mutex_exit(&cp->cache_depot_lock);
2170 }
2171 
2172 /*
2173  * Set the working set statistics for cp's depot to zero.  (Everything is
2174  * eligible for reaping.)
2175  */
2176 static void
2177 kmem_depot_ws_zero(kmem_cache_t *cp)
2178 {
2179         mutex_enter(&cp->cache_depot_lock);
2180         cp->cache_full.ml_reaplimit = cp->cache_full.ml_total;
2181         cp->cache_full.ml_min = cp->cache_full.ml_total;
2182         cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_total;
2183         cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2184         mutex_exit(&cp->cache_depot_lock);
2185 }
2186 
2187 /*
2188  * Reap all magazines that have fallen out of the depot's working set.
2189  */
2190 static void
2191 kmem_depot_ws_reap(kmem_cache_t *cp)
2192 {
2193         long reap;
2194         kmem_magazine_t *mp;
2195 
2196         ASSERT(!list_link_active(&cp->cache_link) ||
2197             taskq_member(kmem_taskq, curthread));
2198 
2199         reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2200         while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL)
2201                 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2202 
2203         reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2204         while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL)
2205                 kmem_magazine_destroy(cp, mp, 0);
2206 }
2207 
2208 static void
2209 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2210 {
2211         ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2212             (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2213         ASSERT(ccp->cc_magsize > 0);
2214 
2215         ccp->cc_ploaded = ccp->cc_loaded;
2216         ccp->cc_prounds = ccp->cc_rounds;
2217         ccp->cc_loaded = mp;
2218         ccp->cc_rounds = rounds;
2219 }
2220 
2221 /*
2222  * Intercept kmem alloc/free calls during crash dump in order to avoid
2223  * changing kmem state while memory is being saved to the dump device.
2224  * Otherwise, ::kmem_verify will report "corrupt buffers".  Note that
2225  * there are no locks because only one CPU calls kmem during a crash
2226  * dump. To enable this feature, first create the associated vmem
2227  * arena with VMC_DUMPSAFE.
2228  */
2229 static void *kmem_dump_start;   /* start of pre-reserved heap */
2230 static void *kmem_dump_end;     /* end of heap area */
2231 static void *kmem_dump_curr;    /* current free heap pointer */
2232 static size_t kmem_dump_size;   /* size of heap area */
2233 
2234 /* append to each buf created in the pre-reserved heap */
2235 typedef struct kmem_dumpctl {
2236         void    *kdc_next;      /* cache dump free list linkage */
2237 } kmem_dumpctl_t;
2238 
2239 #define KMEM_DUMPCTL(cp, buf)   \
2240         ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2241             sizeof (void *)))
2242 
2243 /* Keep some simple stats. */
2244 #define KMEM_DUMP_LOGS  (100)
2245 
2246 typedef struct kmem_dump_log {
2247         kmem_cache_t    *kdl_cache;
2248         uint_t          kdl_allocs;             /* # of dump allocations */
2249         uint_t          kdl_frees;              /* # of dump frees */
2250         uint_t          kdl_alloc_fails;        /* # of allocation failures */
2251         uint_t          kdl_free_nondump;       /* # of non-dump frees */
2252         uint_t          kdl_unsafe;             /* cache was used, but unsafe */
2253 } kmem_dump_log_t;
2254 
2255 static kmem_dump_log_t *kmem_dump_log;
2256 static int kmem_dump_log_idx;
2257 
2258 #define KDI_LOG(cp, stat) {                                             \
2259         kmem_dump_log_t *kdl;                                           \
2260         if ((kdl = (kmem_dump_log_t *)((cp)->cache_dumplog)) != NULL) {      \
2261                 kdl->stat++;                                         \
2262         } else if (kmem_dump_log_idx < KMEM_DUMP_LOGS) {             \
2263                 kdl = &kmem_dump_log[kmem_dump_log_idx++];          \
2264                 kdl->stat++;                                         \
2265                 kdl->kdl_cache = (cp);                                       \
2266                 (cp)->cache_dumplog = kdl;                           \
2267         }                                                               \
2268 }
2269 
2270 /* set non zero for full report */
2271 uint_t kmem_dump_verbose = 0;
2272 
2273 /* stats for overize heap */
2274 uint_t kmem_dump_oversize_allocs = 0;
2275 uint_t kmem_dump_oversize_max = 0;
2276 
2277 static void
2278 kmem_dumppr(char **pp, char *e, const char *format, ...)
2279 {
2280         char *p = *pp;
2281 
2282         if (p < e) {
2283                 int n;
2284                 va_list ap;
2285 
2286                 va_start(ap, format);
2287                 n = vsnprintf(p, e - p, format, ap);
2288                 va_end(ap);
2289                 *pp = p + n;
2290         }
2291 }
2292 
2293 /*
2294  * Called when dumpadm(1M) configures dump parameters.
2295  */
2296 void
2297 kmem_dump_init(size_t size)
2298 {
2299         if (kmem_dump_start != NULL)
2300                 kmem_free(kmem_dump_start, kmem_dump_size);
2301 
2302         if (kmem_dump_log == NULL)
2303                 kmem_dump_log = (kmem_dump_log_t *)kmem_zalloc(KMEM_DUMP_LOGS *
2304                     sizeof (kmem_dump_log_t), KM_SLEEP);
2305 
2306         kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2307 
2308         if (kmem_dump_start != NULL) {
2309                 kmem_dump_size = size;
2310                 kmem_dump_curr = kmem_dump_start;
2311                 kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2312                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2313         } else {
2314                 kmem_dump_size = 0;
2315                 kmem_dump_curr = NULL;
2316                 kmem_dump_end = NULL;
2317         }
2318 }
2319 
2320 /*
2321  * Set flag for each kmem_cache_t if is safe to use alternate dump
2322  * memory. Called just before panic crash dump starts. Set the flag
2323  * for the calling CPU.
2324  */
2325 void
2326 kmem_dump_begin(void)
2327 {
2328         ASSERT(panicstr != NULL);
2329         if (kmem_dump_start != NULL) {
2330                 kmem_cache_t *cp;
2331 
2332                 for (cp = list_head(&kmem_caches); cp != NULL;
2333                     cp = list_next(&kmem_caches, cp)) {
2334                         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2335 
2336                         if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2337                                 cp->cache_flags |= KMF_DUMPDIVERT;
2338                                 ccp->cc_flags |= KMF_DUMPDIVERT;
2339                                 ccp->cc_dump_rounds = ccp->cc_rounds;
2340                                 ccp->cc_dump_prounds = ccp->cc_prounds;
2341                                 ccp->cc_rounds = ccp->cc_prounds = -1;
2342                         } else {
2343                                 cp->cache_flags |= KMF_DUMPUNSAFE;
2344                                 ccp->cc_flags |= KMF_DUMPUNSAFE;
2345                         }
2346                 }
2347         }
2348 }
2349 
2350 /*
2351  * finished dump intercept
2352  * print any warnings on the console
2353  * return verbose information to dumpsys() in the given buffer
2354  */
2355 size_t
2356 kmem_dump_finish(char *buf, size_t size)
2357 {
2358         int kdi_idx;
2359         int kdi_end = kmem_dump_log_idx;
2360         int percent = 0;
2361         int header = 0;
2362         int warn = 0;
2363         size_t used;
2364         kmem_cache_t *cp;
2365         kmem_dump_log_t *kdl;
2366         char *e = buf + size;
2367         char *p = buf;
2368 
2369         if (kmem_dump_size == 0 || kmem_dump_verbose == 0)
2370                 return (0);
2371 
2372         used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2373         percent = (used * 100) / kmem_dump_size;
2374 
2375         kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2376         kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2377         kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2378         kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2379             kmem_dump_oversize_allocs);
2380         kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2381             kmem_dump_oversize_max);
2382 
2383         for (kdi_idx = 0; kdi_idx < kdi_end; kdi_idx++) {
2384                 kdl = &kmem_dump_log[kdi_idx];
2385                 cp = kdl->kdl_cache;
2386                 if (cp == NULL)
2387                         break;
2388                 if (kdl->kdl_alloc_fails)
2389                         ++warn;
2390                 if (header == 0) {
2391                         kmem_dumppr(&p, e,
2392                             "Cache Name,Allocs,Frees,Alloc Fails,"
2393                             "Nondump Frees,Unsafe Allocs/Frees\n");
2394                         header = 1;
2395                 }
2396                 kmem_dumppr(&p, e, "%s,%d,%d,%d,%d,%d\n",
2397                     cp->cache_name, kdl->kdl_allocs, kdl->kdl_frees,
2398                     kdl->kdl_alloc_fails, kdl->kdl_free_nondump,
2399                     kdl->kdl_unsafe);
2400         }
2401 
2402         /* return buffer size used */
2403         if (p < e)
2404                 bzero(p, e - p);
2405         return (p - buf);
2406 }
2407 
2408 /*
2409  * Allocate a constructed object from alternate dump memory.
2410  */
2411 void *
2412 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2413 {
2414         void *buf;
2415         void *curr;
2416         char *bufend;
2417 
2418         /* return a constructed object */
2419         if ((buf = cp->cache_dumpfreelist) != NULL) {
2420                 cp->cache_dumpfreelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2421                 KDI_LOG(cp, kdl_allocs);
2422                 return (buf);
2423         }
2424 
2425         /* create a new constructed object */
2426         curr = kmem_dump_curr;
2427         buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2428         bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2429 
2430         /* hat layer objects cannot cross a page boundary */
2431         if (cp->cache_align < PAGESIZE) {
2432                 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2433                 if (bufend > page) {
2434                         bufend += page - (char *)buf;
2435                         buf = (void *)page;
2436                 }
2437         }
2438 
2439         /* fall back to normal alloc if reserved area is used up */
2440         if (bufend > (char *)kmem_dump_end) {
2441                 kmem_dump_curr = kmem_dump_end;
2442                 KDI_LOG(cp, kdl_alloc_fails);
2443                 return (NULL);
2444         }
2445 
2446         /*
2447          * Must advance curr pointer before calling a constructor that
2448          * may also allocate memory.
2449          */
2450         kmem_dump_curr = bufend;
2451 
2452         /* run constructor */
2453         if (cp->cache_constructor != NULL &&
2454             cp->cache_constructor(buf, cp->cache_private, kmflag)
2455             != 0) {
2456 #ifdef DEBUG
2457                 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2458                     cp->cache_name, (void *)cp);
2459 #endif
2460                 /* reset curr pointer iff no allocs were done */
2461                 if (kmem_dump_curr == bufend)
2462                         kmem_dump_curr = curr;
2463 
2464                 /* fall back to normal alloc if the constructor fails */
2465                 KDI_LOG(cp, kdl_alloc_fails);
2466                 return (NULL);
2467         }
2468 
2469         KDI_LOG(cp, kdl_allocs);
2470         return (buf);
2471 }
2472 
2473 /*
2474  * Free a constructed object in alternate dump memory.
2475  */
2476 int
2477 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2478 {
2479         /* save constructed buffers for next time */
2480         if ((char *)buf >= (char *)kmem_dump_start &&
2481             (char *)buf < (char *)kmem_dump_end) {
2482                 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dumpfreelist;
2483                 cp->cache_dumpfreelist = buf;
2484                 KDI_LOG(cp, kdl_frees);
2485                 return (0);
2486         }
2487 
2488         /* count all non-dump buf frees */
2489         KDI_LOG(cp, kdl_free_nondump);
2490 
2491         /* just drop buffers that were allocated before dump started */
2492         if (kmem_dump_curr < kmem_dump_end)
2493                 return (0);
2494 
2495         /* fall back to normal free if reserved area is used up */
2496         return (1);
2497 }
2498 
2499 /*
2500  * Allocate a constructed object from cache cp.
2501  */
2502 void *
2503 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2504 {
2505         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2506         kmem_magazine_t *fmp;
2507         void *buf;
2508 
2509         mutex_enter(&ccp->cc_lock);
2510         for (;;) {
2511                 /*
2512                  * If there's an object available in the current CPU's
2513                  * loaded magazine, just take it and return.
2514                  */
2515                 if (ccp->cc_rounds > 0) {
2516                         buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2517                         ccp->cc_alloc++;
2518                         mutex_exit(&ccp->cc_lock);
2519                         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2520                                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2521                                         ASSERT(!(ccp->cc_flags &
2522                                             KMF_DUMPDIVERT));
2523                                         KDI_LOG(cp, kdl_unsafe);
2524                                 }
2525                                 if ((ccp->cc_flags & KMF_BUFTAG) &&
2526                                     kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2527                                     caller()) != 0) {
2528                                         if (kmflag & KM_NOSLEEP)
2529                                                 return (NULL);
2530                                         mutex_enter(&ccp->cc_lock);
2531                                         continue;
2532                                 }
2533                         }
2534                         return (buf);
2535                 }
2536 
2537                 /*
2538                  * The loaded magazine is empty.  If the previously loaded
2539                  * magazine was full, exchange them and try again.
2540                  */
2541                 if (ccp->cc_prounds > 0) {
2542                         kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2543                         continue;
2544                 }
2545 
2546                 /*
2547                  * Return an alternate buffer at dump time to preserve
2548                  * the heap.
2549                  */
2550                 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2551                         if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2552                                 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2553                                 /* log it so that we can warn about it */
2554                                 KDI_LOG(cp, kdl_unsafe);
2555                         } else {
2556                                 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2557                                     NULL) {
2558                                         mutex_exit(&ccp->cc_lock);
2559                                         return (buf);
2560                                 }
2561                                 break;          /* fall back to slab layer */
2562                         }
2563                 }
2564 
2565                 /*
2566                  * If the magazine layer is disabled, break out now.
2567                  */
2568                 if (ccp->cc_magsize == 0)
2569                         break;
2570 
2571                 /*
2572                  * Try to get a full magazine from the depot.
2573                  */
2574                 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2575                 if (fmp != NULL) {
2576                         if (ccp->cc_ploaded != NULL)
2577                                 kmem_depot_free(cp, &cp->cache_empty,
2578                                     ccp->cc_ploaded);
2579                         kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2580                         continue;
2581                 }
2582 
2583                 /*
2584                  * There are no full magazines in the depot,
2585                  * so fall through to the slab layer.
2586                  */
2587                 break;
2588         }
2589         mutex_exit(&ccp->cc_lock);
2590 
2591         /*
2592          * We couldn't allocate a constructed object from the magazine layer,
2593          * so get a raw buffer from the slab layer and apply its constructor.
2594          */
2595         buf = kmem_slab_alloc(cp, kmflag);
2596 
2597         if (buf == NULL)
2598                 return (NULL);
2599 
2600         if (cp->cache_flags & KMF_BUFTAG) {
2601                 /*
2602                  * Make kmem_cache_alloc_debug() apply the constructor for us.
2603                  */
2604                 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2605                 if (rc != 0) {
2606                         if (kmflag & KM_NOSLEEP)
2607                                 return (NULL);
2608                         /*
2609                          * kmem_cache_alloc_debug() detected corruption
2610                          * but didn't panic (kmem_panic <= 0). We should not be
2611                          * here because the constructor failed (indicated by a
2612                          * return code of 1). Try again.
2613                          */
2614                         ASSERT(rc == -1);
2615                         return (kmem_cache_alloc(cp, kmflag));
2616                 }
2617                 return (buf);
2618         }
2619 
2620         if (cp->cache_constructor != NULL &&
2621             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2622                 atomic_inc_64(&cp->cache_alloc_fail);
2623                 kmem_slab_free(cp, buf);
2624                 return (NULL);
2625         }
2626 
2627         return (buf);
2628 }
2629 
2630 /*
2631  * The freed argument tells whether or not kmem_cache_free_debug() has already
2632  * been called so that we can avoid the duplicate free error. For example, a
2633  * buffer on a magazine has already been freed by the client but is still
2634  * constructed.
2635  */
2636 static void
2637 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2638 {
2639         if (!freed && (cp->cache_flags & KMF_BUFTAG))
2640                 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2641                         return;
2642 
2643         /*
2644          * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2645          * kmem_cache_free_debug() will have already applied the destructor.
2646          */
2647         if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2648             cp->cache_destructor != NULL) {
2649                 if (cp->cache_flags & KMF_DEADBEEF) {    /* KMF_LITE implied */
2650                         kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2651                         *(uint64_t *)buf = btp->bt_redzone;
2652                         cp->cache_destructor(buf, cp->cache_private);
2653                         *(uint64_t *)buf = KMEM_FREE_PATTERN;
2654                 } else {
2655                         cp->cache_destructor(buf, cp->cache_private);
2656                 }
2657         }
2658 
2659         kmem_slab_free(cp, buf);
2660 }
2661 
2662 /*
2663  * Used when there's no room to free a buffer to the per-CPU cache.
2664  * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2665  * caller should try freeing to the per-CPU cache again.
2666  * Note that we don't directly install the magazine in the cpu cache,
2667  * since its state may have changed wildly while the lock was dropped.
2668  */
2669 static int
2670 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2671 {
2672         kmem_magazine_t *emp;
2673         kmem_magtype_t *mtp;
2674 
2675         ASSERT(MUTEX_HELD(&ccp->cc_lock));
2676         ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2677             ((uint_t)ccp->cc_rounds == -1)) &&
2678             ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2679             ((uint_t)ccp->cc_prounds == -1)));
2680 
2681         emp = kmem_depot_alloc(cp, &cp->cache_empty);
2682         if (emp != NULL) {
2683                 if (ccp->cc_ploaded != NULL)
2684                         kmem_depot_free(cp, &cp->cache_full,
2685                             ccp->cc_ploaded);
2686                 kmem_cpu_reload(ccp, emp, 0);
2687                 return (1);
2688         }
2689         /*
2690          * There are no empty magazines in the depot,
2691          * so try to allocate a new one.  We must drop all locks
2692          * across kmem_cache_alloc() because lower layers may
2693          * attempt to allocate from this cache.
2694          */
2695         mtp = cp->cache_magtype;
2696         mutex_exit(&ccp->cc_lock);
2697         emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2698         mutex_enter(&ccp->cc_lock);
2699 
2700         if (emp != NULL) {
2701                 /*
2702                  * We successfully allocated an empty magazine.
2703                  * However, we had to drop ccp->cc_lock to do it,
2704                  * so the cache's magazine size may have changed.
2705                  * If so, free the magazine and try again.
2706                  */
2707                 if (ccp->cc_magsize != mtp->mt_magsize) {
2708                         mutex_exit(&ccp->cc_lock);
2709                         kmem_cache_free(mtp->mt_cache, emp);
2710                         mutex_enter(&ccp->cc_lock);
2711                         return (1);
2712                 }
2713 
2714                 /*
2715                  * We got a magazine of the right size.  Add it to
2716                  * the depot and try the whole dance again.
2717                  */
2718                 kmem_depot_free(cp, &cp->cache_empty, emp);
2719                 return (1);
2720         }
2721 
2722         /*
2723          * We couldn't allocate an empty magazine,
2724          * so fall through to the slab layer.
2725          */
2726         return (0);
2727 }
2728 
2729 /*
2730  * Free a constructed object to cache cp.
2731  */
2732 void
2733 kmem_cache_free(kmem_cache_t *cp, void *buf)
2734 {
2735         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2736 
2737         /*
2738          * The client must not free either of the buffers passed to the move
2739          * callback function.
2740          */
2741         ASSERT(cp->cache_defrag == NULL ||
2742             cp->cache_defrag->kmd_thread != curthread ||
2743             (buf != cp->cache_defrag->kmd_from_buf &&
2744             buf != cp->cache_defrag->kmd_to_buf));
2745 
2746         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2747                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2748                         ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2749                         /* log it so that we can warn about it */
2750                         KDI_LOG(cp, kdl_unsafe);
2751                 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2752                         return;
2753                 }
2754                 if (ccp->cc_flags & KMF_BUFTAG) {
2755                         if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2756                                 return;
2757                 }
2758         }
2759 
2760         mutex_enter(&ccp->cc_lock);
2761         /*
2762          * Any changes to this logic should be reflected in kmem_slab_prefill()
2763          */
2764         for (;;) {
2765                 /*
2766                  * If there's a slot available in the current CPU's
2767                  * loaded magazine, just put the object there and return.
2768                  */
2769                 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2770                         ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2771                         ccp->cc_free++;
2772                         mutex_exit(&ccp->cc_lock);
2773                         return;
2774                 }
2775 
2776                 /*
2777                  * The loaded magazine is full.  If the previously loaded
2778                  * magazine was empty, exchange them and try again.
2779                  */
2780                 if (ccp->cc_prounds == 0) {
2781                         kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2782                         continue;
2783                 }
2784 
2785                 /*
2786                  * If the magazine layer is disabled, break out now.
2787                  */
2788                 if (ccp->cc_magsize == 0)
2789                         break;
2790 
2791                 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2792                         /*
2793                          * We couldn't free our constructed object to the
2794                          * magazine layer, so apply its destructor and free it
2795                          * to the slab layer.
2796                          */
2797                         break;
2798                 }
2799         }
2800         mutex_exit(&ccp->cc_lock);
2801         kmem_slab_free_constructed(cp, buf, B_TRUE);
2802 }
2803 
2804 static void
2805 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2806 {
2807         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2808         int cache_flags = cp->cache_flags;
2809 
2810         kmem_bufctl_t *next, *head;
2811         size_t nbufs;
2812 
2813         /*
2814          * Completely allocate the newly created slab and put the pre-allocated
2815          * buffers in magazines. Any of the buffers that cannot be put in
2816          * magazines must be returned to the slab.
2817          */
2818         ASSERT(MUTEX_HELD(&cp->cache_lock));
2819         ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2820         ASSERT(cp->cache_constructor == NULL);
2821         ASSERT(sp->slab_cache == cp);
2822         ASSERT(sp->slab_refcnt == 1);
2823         ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2824         ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2825 
2826         head = sp->slab_head;
2827         nbufs = (sp->slab_chunks - sp->slab_refcnt);
2828         sp->slab_head = NULL;
2829         sp->slab_refcnt += nbufs;
2830         cp->cache_bufslab -= nbufs;
2831         cp->cache_slab_alloc += nbufs;
2832         list_insert_head(&cp->cache_complete_slabs, sp);
2833         cp->cache_complete_slab_count++;
2834         mutex_exit(&cp->cache_lock);
2835         mutex_enter(&ccp->cc_lock);
2836 
2837         while (head != NULL) {
2838                 void *buf = KMEM_BUF(cp, head);
2839                 /*
2840                  * If there's a slot available in the current CPU's
2841                  * loaded magazine, just put the object there and
2842                  * continue.
2843                  */
2844                 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2845                         ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2846                             buf;
2847                         ccp->cc_free++;
2848                         nbufs--;
2849                         head = head->bc_next;
2850                         continue;
2851                 }
2852 
2853                 /*
2854                  * The loaded magazine is full.  If the previously
2855                  * loaded magazine was empty, exchange them and try
2856                  * again.
2857                  */
2858                 if (ccp->cc_prounds == 0) {
2859                         kmem_cpu_reload(ccp, ccp->cc_ploaded,
2860                             ccp->cc_prounds);
2861                         continue;
2862                 }
2863 
2864                 /*
2865                  * If the magazine layer is disabled, break out now.
2866                  */
2867 
2868                 if (ccp->cc_magsize == 0) {
2869                         break;
2870                 }
2871 
2872                 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2873                         break;
2874         }
2875         mutex_exit(&ccp->cc_lock);
2876         if (nbufs != 0) {
2877                 ASSERT(head != NULL);
2878 
2879                 /*
2880                  * If there was a failure, return remaining objects to
2881                  * the slab
2882                  */
2883                 while (head != NULL) {
2884                         ASSERT(nbufs != 0);
2885                         next = head->bc_next;
2886                         head->bc_next = NULL;
2887                         kmem_slab_free(cp, KMEM_BUF(cp, head));
2888                         head = next;
2889                         nbufs--;
2890                 }
2891         }
2892         ASSERT(head == NULL);
2893         ASSERT(nbufs == 0);
2894         mutex_enter(&cp->cache_lock);
2895 }
2896 
2897 void *
2898 kmem_zalloc(size_t size, int kmflag)
2899 {
2900         size_t index;
2901         void *buf;
2902 
2903         if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2904                 kmem_cache_t *cp = kmem_alloc_table[index];
2905                 buf = kmem_cache_alloc(cp, kmflag);
2906                 if (buf != NULL) {
2907                         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2908                                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2909                                 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2910                                 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2911 
2912                                 if (cp->cache_flags & KMF_LITE) {
2913                                         KMEM_BUFTAG_LITE_ENTER(btp,
2914                                             kmem_lite_count, caller());
2915                                 }
2916                         }
2917                         bzero(buf, size);
2918                 }
2919         } else {
2920                 buf = kmem_alloc(size, kmflag);
2921                 if (buf != NULL)
2922                         bzero(buf, size);
2923         }
2924         return (buf);
2925 }
2926 
2927 void *
2928 kmem_alloc(size_t size, int kmflag)
2929 {
2930         size_t index;
2931         kmem_cache_t *cp;
2932         void *buf;
2933 
2934         if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2935                 cp = kmem_alloc_table[index];
2936                 /* fall through to kmem_cache_alloc() */
2937 
2938         } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2939             kmem_big_alloc_table_max) {
2940                 cp = kmem_big_alloc_table[index];
2941                 /* fall through to kmem_cache_alloc() */
2942 
2943         } else {
2944                 if (size == 0)
2945                         return (NULL);
2946 
2947                 buf = vmem_alloc(kmem_oversize_arena, size,
2948                     kmflag & KM_VMFLAGS);
2949                 if (buf == NULL)
2950                         kmem_log_event(kmem_failure_log, NULL, NULL,
2951                             (void *)size);
2952                 else if (KMEM_DUMP(kmem_slab_cache)) {
2953                         /* stats for dump intercept */
2954                         kmem_dump_oversize_allocs++;
2955                         if (size > kmem_dump_oversize_max)
2956                                 kmem_dump_oversize_max = size;
2957                 }
2958                 return (buf);
2959         }
2960 
2961         buf = kmem_cache_alloc(cp, kmflag);
2962         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2963                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2964                 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2965                 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2966 
2967                 if (cp->cache_flags & KMF_LITE) {
2968                         KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2969                 }
2970         }
2971         return (buf);
2972 }
2973 
2974 void
2975 kmem_free(void *buf, size_t size)
2976 {
2977         size_t index;
2978         kmem_cache_t *cp;
2979 
2980         if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2981                 cp = kmem_alloc_table[index];
2982                 /* fall through to kmem_cache_free() */
2983 
2984         } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2985             kmem_big_alloc_table_max) {
2986                 cp = kmem_big_alloc_table[index];
2987                 /* fall through to kmem_cache_free() */
2988 
2989         } else {
2990                 EQUIV(buf == NULL, size == 0);
2991                 if (buf == NULL && size == 0)
2992                         return;
2993                 vmem_free(kmem_oversize_arena, buf, size);
2994                 return;
2995         }
2996 
2997         if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2998                 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2999                 uint32_t *ip = (uint32_t *)btp;
3000                 if (ip[1] != KMEM_SIZE_ENCODE(size)) {
3001                         if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
3002                                 kmem_error(KMERR_DUPFREE, cp, buf);
3003                                 return;
3004                         }
3005                         if (KMEM_SIZE_VALID(ip[1])) {
3006                                 ip[0] = KMEM_SIZE_ENCODE(size);
3007                                 kmem_error(KMERR_BADSIZE, cp, buf);
3008                         } else {
3009                                 kmem_error(KMERR_REDZONE, cp, buf);
3010                         }
3011                         return;
3012                 }
3013                 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
3014                         kmem_error(KMERR_REDZONE, cp, buf);
3015                         return;
3016                 }
3017                 btp->bt_redzone = KMEM_REDZONE_PATTERN;
3018                 if (cp->cache_flags & KMF_LITE) {
3019                         KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
3020                             caller());
3021                 }
3022         }
3023         kmem_cache_free(cp, buf);
3024 }
3025 
3026 void *
3027 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
3028 {
3029         size_t realsize = size + vmp->vm_quantum;
3030         void *addr;
3031 
3032         /*
3033          * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
3034          * vm_quantum will cause integer wraparound.  Check for this, and
3035          * blow off the firewall page in this case.  Note that such a
3036          * giant allocation (the entire kernel address space) can never
3037          * be satisfied, so it will either fail immediately (VM_NOSLEEP)
3038          * or sleep forever (VM_SLEEP).  Thus, there is no need for a
3039          * corresponding check in kmem_firewall_va_free().
3040          */
3041         if (realsize < size)
3042                 realsize = size;
3043 
3044         /*
3045          * While boot still owns resource management, make sure that this
3046          * redzone virtual address allocation is properly accounted for in
3047          * OBPs "virtual-memory" "available" lists because we're
3048          * effectively claiming them for a red zone.  If we don't do this,
3049          * the available lists become too fragmented and too large for the
3050          * current boot/kernel memory list interface.
3051          */
3052         addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3053 
3054         if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3055                 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3056 
3057         return (addr);
3058 }
3059 
3060 void
3061 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3062 {
3063         ASSERT((kvseg.s_base == NULL ?
3064             va_to_pfn((char *)addr + size) :
3065             hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3066 
3067         vmem_free(vmp, addr, size + vmp->vm_quantum);
3068 }
3069 
3070 /*
3071  * Try to allocate at least `size' bytes of memory without sleeping or
3072  * panicking. Return actual allocated size in `asize'. If allocation failed,
3073  * try final allocation with sleep or panic allowed.
3074  */
3075 void *
3076 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3077 {
3078         void *p;
3079 
3080         *asize = P2ROUNDUP(size, KMEM_ALIGN);
3081         do {
3082                 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3083                 if (p != NULL)
3084                         return (p);
3085                 *asize += KMEM_ALIGN;
3086         } while (*asize <= PAGESIZE);
3087 
3088         *asize = P2ROUNDUP(size, KMEM_ALIGN);
3089         return (kmem_alloc(*asize, kmflag));
3090 }
3091 
3092 /*
3093  * Reclaim all unused memory from a cache.
3094  */
3095 static void
3096 kmem_cache_reap(kmem_cache_t *cp)
3097 {
3098         ASSERT(taskq_member(kmem_taskq, curthread));
3099         cp->cache_reap++;
3100 
3101         /*
3102          * Ask the cache's owner to free some memory if possible.
3103          * The idea is to handle things like the inode cache, which
3104          * typically sits on a bunch of memory that it doesn't truly
3105          * *need*.  Reclaim policy is entirely up to the owner; this
3106          * callback is just an advisory plea for help.
3107          */
3108         if (cp->cache_reclaim != NULL) {
3109                 long delta;
3110 
3111                 /*
3112                  * Reclaimed memory should be reapable (not included in the
3113                  * depot's working set).
3114                  */
3115                 delta = cp->cache_full.ml_total;
3116                 cp->cache_reclaim(cp->cache_private);
3117                 delta = cp->cache_full.ml_total - delta;
3118                 if (delta > 0) {
3119                         mutex_enter(&cp->cache_depot_lock);
3120                         cp->cache_full.ml_reaplimit += delta;
3121                         cp->cache_full.ml_min += delta;
3122                         mutex_exit(&cp->cache_depot_lock);
3123                 }
3124         }
3125 
3126         kmem_depot_ws_reap(cp);
3127 
3128         if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3129                 kmem_cache_defrag(cp);
3130         }
3131 }
3132 
3133 static void
3134 kmem_reap_timeout(void *flag_arg)
3135 {
3136         uint32_t *flag = (uint32_t *)flag_arg;
3137 
3138         ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3139         *flag = 0;
3140 }
3141 
3142 static void
3143 kmem_reap_done(void *flag)
3144 {
3145         if (!callout_init_done) {
3146                 /* can't schedule a timeout at this point */
3147                 kmem_reap_timeout(flag);
3148         } else {
3149                 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3150         }
3151 }
3152 
3153 static void
3154 kmem_reap_start(void *flag)
3155 {
3156         ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3157 
3158         if (flag == &kmem_reaping) {
3159                 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3160                 /*
3161                  * if we have segkp under heap, reap segkp cache.
3162                  */
3163                 if (segkp_fromheap)
3164                         segkp_cache_free();
3165         }
3166         else
3167                 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3168 
3169         /*
3170          * We use taskq_dispatch() to schedule a timeout to clear
3171          * the flag so that kmem_reap() becomes self-throttling:
3172          * we won't reap again until the current reap completes *and*
3173          * at least kmem_reap_interval ticks have elapsed.
3174          */
3175         if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3176                 kmem_reap_done(flag);
3177 }
3178 
3179 static void
3180 kmem_reap_common(void *flag_arg)
3181 {
3182         uint32_t *flag = (uint32_t *)flag_arg;
3183 
3184         if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3185             atomic_cas_32(flag, 0, 1) != 0)
3186                 return;
3187 
3188         /*
3189          * It may not be kosher to do memory allocation when a reap is called
3190          * (for example, if vmem_populate() is in the call chain).  So we
3191          * start the reap going with a TQ_NOALLOC dispatch.  If the dispatch
3192          * fails, we reset the flag, and the next reap will try again.
3193          */
3194         if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3195                 *flag = 0;
3196 }
3197 
3198 /*
3199  * Reclaim all unused memory from all caches.  Called from the VM system
3200  * when memory gets tight.
3201  */
3202 void
3203 kmem_reap(void)
3204 {
3205         kmem_reap_common(&kmem_reaping);
3206 }
3207 
3208 /*
3209  * Reclaim all unused memory from identifier arenas, called when a vmem
3210  * arena not back by memory is exhausted.  Since reaping memory-backed caches
3211  * cannot help with identifier exhaustion, we avoid both a large amount of
3212  * work and unwanted side-effects from reclaim callbacks.
3213  */
3214 void
3215 kmem_reap_idspace(void)
3216 {
3217         kmem_reap_common(&kmem_reaping_idspace);
3218 }
3219 
3220 /*
3221  * Purge all magazines from a cache and set its magazine limit to zero.
3222  * All calls are serialized by the kmem_taskq lock, except for the final
3223  * call from kmem_cache_destroy().
3224  */
3225 static void
3226 kmem_cache_magazine_purge(kmem_cache_t *cp)
3227 {
3228         kmem_cpu_cache_t *ccp;
3229         kmem_magazine_t *mp, *pmp;
3230         int rounds, prounds, cpu_seqid;
3231 
3232         ASSERT(!list_link_active(&cp->cache_link) ||
3233             taskq_member(kmem_taskq, curthread));
3234         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3235 
3236         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3237                 ccp = &cp->cache_cpu[cpu_seqid];
3238 
3239                 mutex_enter(&ccp->cc_lock);
3240                 mp = ccp->cc_loaded;
3241                 pmp = ccp->cc_ploaded;
3242                 rounds = ccp->cc_rounds;
3243                 prounds = ccp->cc_prounds;
3244                 ccp->cc_loaded = NULL;
3245                 ccp->cc_ploaded = NULL;
3246                 ccp->cc_rounds = -1;
3247                 ccp->cc_prounds = -1;
3248                 ccp->cc_magsize = 0;
3249                 mutex_exit(&ccp->cc_lock);
3250 
3251                 if (mp)
3252                         kmem_magazine_destroy(cp, mp, rounds);
3253                 if (pmp)
3254                         kmem_magazine_destroy(cp, pmp, prounds);
3255         }
3256 
3257         kmem_depot_ws_zero(cp);
3258         kmem_depot_ws_reap(cp);
3259 }
3260 
3261 /*
3262  * Enable per-cpu magazines on a cache.
3263  */
3264 static void
3265 kmem_cache_magazine_enable(kmem_cache_t *cp)
3266 {
3267         int cpu_seqid;
3268 
3269         if (cp->cache_flags & KMF_NOMAGAZINE)
3270                 return;
3271 
3272         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3273                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3274                 mutex_enter(&ccp->cc_lock);
3275                 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3276                 mutex_exit(&ccp->cc_lock);
3277         }
3278 
3279 }
3280 
3281 /*
3282  * Reap (almost) everything right now.
3283  */
3284 void
3285 kmem_cache_reap_now(kmem_cache_t *cp)
3286 {
3287         ASSERT(list_link_active(&cp->cache_link));
3288 
3289         kmem_depot_ws_zero(cp);
3290 
3291         (void) taskq_dispatch(kmem_taskq,
3292             (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3293         taskq_wait(kmem_taskq);
3294 }
3295 
3296 /*
3297  * Recompute a cache's magazine size.  The trade-off is that larger magazines
3298  * provide a higher transfer rate with the depot, while smaller magazines
3299  * reduce memory consumption.  Magazine resizing is an expensive operation;
3300  * it should not be done frequently.
3301  *
3302  * Changes to the magazine size are serialized by the kmem_taskq lock.
3303  *
3304  * Note: at present this only grows the magazine size.  It might be useful
3305  * to allow shrinkage too.
3306  */
3307 static void
3308 kmem_cache_magazine_resize(kmem_cache_t *cp)
3309 {
3310         kmem_magtype_t *mtp = cp->cache_magtype;
3311 
3312         ASSERT(taskq_member(kmem_taskq, curthread));
3313 
3314         if (cp->cache_chunksize < mtp->mt_maxbuf) {
3315                 kmem_cache_magazine_purge(cp);
3316                 mutex_enter(&cp->cache_depot_lock);
3317                 cp->cache_magtype = ++mtp;
3318                 cp->cache_depot_contention_prev =
3319                     cp->cache_depot_contention + INT_MAX;
3320                 mutex_exit(&cp->cache_depot_lock);
3321                 kmem_cache_magazine_enable(cp);
3322         }
3323 }
3324 
3325 /*
3326  * Rescale a cache's hash table, so that the table size is roughly the
3327  * cache size.  We want the average lookup time to be extremely small.
3328  */
3329 static void
3330 kmem_hash_rescale(kmem_cache_t *cp)
3331 {
3332         kmem_bufctl_t **old_table, **new_table, *bcp;
3333         size_t old_size, new_size, h;
3334 
3335         ASSERT(taskq_member(kmem_taskq, curthread));
3336 
3337         new_size = MAX(KMEM_HASH_INITIAL,
3338             1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3339         old_size = cp->cache_hash_mask + 1;
3340 
3341         if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3342                 return;
3343 
3344         new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3345             VM_NOSLEEP);
3346         if (new_table == NULL)
3347                 return;
3348         bzero(new_table, new_size * sizeof (void *));
3349 
3350         mutex_enter(&cp->cache_lock);
3351 
3352         old_size = cp->cache_hash_mask + 1;
3353         old_table = cp->cache_hash_table;
3354 
3355         cp->cache_hash_mask = new_size - 1;
3356         cp->cache_hash_table = new_table;
3357         cp->cache_rescale++;
3358 
3359         for (h = 0; h < old_size; h++) {
3360                 bcp = old_table[h];
3361                 while (bcp != NULL) {
3362                         void *addr = bcp->bc_addr;
3363                         kmem_bufctl_t *next_bcp = bcp->bc_next;
3364                         kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3365                         bcp->bc_next = *hash_bucket;
3366                         *hash_bucket = bcp;
3367                         bcp = next_bcp;
3368                 }
3369         }
3370 
3371         mutex_exit(&cp->cache_lock);
3372 
3373         vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3374 }
3375 
3376 /*
3377  * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3378  * update, magazine resizing, and slab consolidation.
3379  */
3380 static void
3381 kmem_cache_update(kmem_cache_t *cp)
3382 {
3383         int need_hash_rescale = 0;
3384         int need_magazine_resize = 0;
3385 
3386         ASSERT(MUTEX_HELD(&kmem_cache_lock));
3387 
3388         /*
3389          * If the cache has become much larger or smaller than its hash table,
3390          * fire off a request to rescale the hash table.
3391          */
3392         mutex_enter(&cp->cache_lock);
3393 
3394         if ((cp->cache_flags & KMF_HASH) &&
3395             (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3396             (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3397             cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3398                 need_hash_rescale = 1;
3399 
3400         mutex_exit(&cp->cache_lock);
3401 
3402         /*
3403          * Update the depot working set statistics.
3404          */
3405         kmem_depot_ws_update(cp);
3406 
3407         /*
3408          * If there's a lot of contention in the depot,
3409          * increase the magazine size.
3410          */
3411         mutex_enter(&cp->cache_depot_lock);
3412 
3413         if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3414             (int)(cp->cache_depot_contention -
3415             cp->cache_depot_contention_prev) > kmem_depot_contention)
3416                 need_magazine_resize = 1;
3417 
3418         cp->cache_depot_contention_prev = cp->cache_depot_contention;
3419 
3420         mutex_exit(&cp->cache_depot_lock);
3421 
3422         if (need_hash_rescale)
3423                 (void) taskq_dispatch(kmem_taskq,
3424                     (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3425 
3426         if (need_magazine_resize)
3427                 (void) taskq_dispatch(kmem_taskq,
3428                     (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3429 
3430         if (cp->cache_defrag != NULL)
3431                 (void) taskq_dispatch(kmem_taskq,
3432                     (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3433 }
3434 
3435 static void kmem_update(void *);
3436 
3437 static void
3438 kmem_update_timeout(void *dummy)
3439 {
3440         (void) timeout(kmem_update, dummy, kmem_reap_interval);
3441 }
3442 
3443 static void
3444 kmem_update(void *dummy)
3445 {
3446         kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3447 
3448         /*
3449          * We use taskq_dispatch() to reschedule the timeout so that
3450          * kmem_update() becomes self-throttling: it won't schedule
3451          * new tasks until all previous tasks have completed.
3452          */
3453         if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP))
3454                 kmem_update_timeout(NULL);
3455 }
3456 
3457 static int
3458 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3459 {
3460         struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3461         kmem_cache_t *cp = ksp->ks_private;
3462         uint64_t cpu_buf_avail;
3463         uint64_t buf_avail = 0;
3464         int cpu_seqid;
3465         long reap;
3466 
3467         ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3468 
3469         if (rw == KSTAT_WRITE)
3470                 return (EACCES);
3471 
3472         mutex_enter(&cp->cache_lock);
3473 
3474         kmcp->kmc_alloc_fail.value.ui64              = cp->cache_alloc_fail;
3475         kmcp->kmc_alloc.value.ui64           = cp->cache_slab_alloc;
3476         kmcp->kmc_free.value.ui64            = cp->cache_slab_free;
3477         kmcp->kmc_slab_alloc.value.ui64              = cp->cache_slab_alloc;
3478         kmcp->kmc_slab_free.value.ui64               = cp->cache_slab_free;
3479 
3480         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3481                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3482 
3483                 mutex_enter(&ccp->cc_lock);
3484 
3485                 cpu_buf_avail = 0;
3486                 if (ccp->cc_rounds > 0)
3487                         cpu_buf_avail += ccp->cc_rounds;
3488                 if (ccp->cc_prounds > 0)
3489                         cpu_buf_avail += ccp->cc_prounds;
3490 
3491                 kmcp->kmc_alloc.value.ui64   += ccp->cc_alloc;
3492                 kmcp->kmc_free.value.ui64    += ccp->cc_free;
3493                 buf_avail                       += cpu_buf_avail;
3494 
3495                 mutex_exit(&ccp->cc_lock);
3496         }
3497 
3498         mutex_enter(&cp->cache_depot_lock);
3499 
3500         kmcp->kmc_depot_alloc.value.ui64     = cp->cache_full.ml_alloc;
3501         kmcp->kmc_depot_free.value.ui64              = cp->cache_empty.ml_alloc;
3502         kmcp->kmc_depot_contention.value.ui64        = cp->cache_depot_contention;
3503         kmcp->kmc_full_magazines.value.ui64  = cp->cache_full.ml_total;
3504         kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3505         kmcp->kmc_magazine_size.value.ui64   =
3506             (cp->cache_flags & KMF_NOMAGAZINE) ?
3507             0 : cp->cache_magtype->mt_magsize;
3508 
3509         kmcp->kmc_alloc.value.ui64           += cp->cache_full.ml_alloc;
3510         kmcp->kmc_free.value.ui64            += cp->cache_empty.ml_alloc;
3511         buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3512 
3513         reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3514         reap = MIN(reap, cp->cache_full.ml_total);
3515 
3516         mutex_exit(&cp->cache_depot_lock);
3517 
3518         kmcp->kmc_buf_size.value.ui64        = cp->cache_bufsize;
3519         kmcp->kmc_align.value.ui64   = cp->cache_align;
3520         kmcp->kmc_chunk_size.value.ui64      = cp->cache_chunksize;
3521         kmcp->kmc_slab_size.value.ui64       = cp->cache_slabsize;
3522         kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3523         buf_avail += cp->cache_bufslab;
3524         kmcp->kmc_buf_avail.value.ui64       = buf_avail;
3525         kmcp->kmc_buf_inuse.value.ui64       = cp->cache_buftotal - buf_avail;
3526         kmcp->kmc_buf_total.value.ui64       = cp->cache_buftotal;
3527         kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3528         kmcp->kmc_slab_create.value.ui64     = cp->cache_slab_create;
3529         kmcp->kmc_slab_destroy.value.ui64    = cp->cache_slab_destroy;
3530         kmcp->kmc_hash_size.value.ui64       = (cp->cache_flags & KMF_HASH) ?
3531             cp->cache_hash_mask + 1 : 0;
3532         kmcp->kmc_hash_lookup_depth.value.ui64       = cp->cache_lookup_depth;
3533         kmcp->kmc_hash_rescale.value.ui64    = cp->cache_rescale;
3534         kmcp->kmc_vmem_source.value.ui64     = cp->cache_arena->vm_id;
3535         kmcp->kmc_reap.value.ui64    = cp->cache_reap;
3536 
3537         if (cp->cache_defrag == NULL) {
3538                 kmcp->kmc_move_callbacks.value.ui64  = 0;
3539                 kmcp->kmc_move_yes.value.ui64                = 0;
3540                 kmcp->kmc_move_no.value.ui64         = 0;
3541                 kmcp->kmc_move_later.value.ui64              = 0;
3542                 kmcp->kmc_move_dont_need.value.ui64  = 0;
3543                 kmcp->kmc_move_dont_know.value.ui64  = 0;
3544                 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3545                 kmcp->kmc_move_slabs_freed.value.ui64        = 0;
3546                 kmcp->kmc_defrag.value.ui64          = 0;
3547                 kmcp->kmc_scan.value.ui64            = 0;
3548                 kmcp->kmc_move_reclaimable.value.ui64        = 0;
3549         } else {
3550                 int64_t reclaimable;
3551 
3552                 kmem_defrag_t *kd = cp->cache_defrag;
3553                 kmcp->kmc_move_callbacks.value.ui64  = kd->kmd_callbacks;
3554                 kmcp->kmc_move_yes.value.ui64                = kd->kmd_yes;
3555                 kmcp->kmc_move_no.value.ui64         = kd->kmd_no;
3556                 kmcp->kmc_move_later.value.ui64              = kd->kmd_later;
3557                 kmcp->kmc_move_dont_need.value.ui64  = kd->kmd_dont_need;
3558                 kmcp->kmc_move_dont_know.value.ui64  = kd->kmd_dont_know;
3559                 kmcp->kmc_move_hunt_found.value.ui64 = kd->kmd_hunt_found;
3560                 kmcp->kmc_move_slabs_freed.value.ui64        = kd->kmd_slabs_freed;
3561                 kmcp->kmc_defrag.value.ui64          = kd->kmd_defrags;
3562                 kmcp->kmc_scan.value.ui64            = kd->kmd_scans;
3563 
3564                 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3565                 reclaimable = MAX(reclaimable, 0);
3566                 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3567                 kmcp->kmc_move_reclaimable.value.ui64        = reclaimable;
3568         }
3569 
3570         mutex_exit(&cp->cache_lock);
3571         return (0);
3572 }
3573 
3574 /*
3575  * Return a named statistic about a particular cache.
3576  * This shouldn't be called very often, so it's currently designed for
3577  * simplicity (leverages existing kstat support) rather than efficiency.
3578  */
3579 uint64_t
3580 kmem_cache_stat(kmem_cache_t *cp, char *name)
3581 {
3582         int i;
3583         kstat_t *ksp = cp->cache_kstat;
3584         kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3585         uint64_t value = 0;
3586 
3587         if (ksp != NULL) {
3588                 mutex_enter(&kmem_cache_kstat_lock);
3589                 (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3590                 for (i = 0; i < ksp->ks_ndata; i++) {
3591                         if (strcmp(knp[i].name, name) == 0) {
3592                                 value = knp[i].value.ui64;
3593                                 break;
3594                         }
3595                 }
3596                 mutex_exit(&kmem_cache_kstat_lock);
3597         }
3598         return (value);
3599 }
3600 
3601 /*
3602  * Return an estimate of currently available kernel heap memory.
3603  * On 32-bit systems, physical memory may exceed virtual memory,
3604  * we just truncate the result at 1GB.
3605  */
3606 size_t
3607 kmem_avail(void)
3608 {
3609         spgcnt_t rmem = availrmem - tune.t_minarmem;
3610         spgcnt_t fmem = freemem - minfree;
3611 
3612         return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3613             1 << (30 - PAGESHIFT))));
3614 }
3615 
3616 /*
3617  * Return the maximum amount of memory that is (in theory) allocatable
3618  * from the heap. This may be used as an estimate only since there
3619  * is no guarentee this space will still be available when an allocation
3620  * request is made, nor that the space may be allocated in one big request
3621  * due to kernel heap fragmentation.
3622  */
3623 size_t
3624 kmem_maxavail(void)
3625 {
3626         spgcnt_t pmem = availrmem - tune.t_minarmem;
3627         spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3628 
3629         return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3630 }
3631 
3632 /*
3633  * Indicate whether memory-intensive kmem debugging is enabled.
3634  */
3635 int
3636 kmem_debugging(void)
3637 {
3638         return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3639 }
3640 
3641 /* binning function, sorts finely at the two extremes */
3642 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift)                          \
3643         ((((sp)->slab_refcnt <= (binshift)) ||                            \
3644             (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift)))   \
3645             ? -(sp)->slab_refcnt                                     \
3646             : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3647 
3648 /*
3649  * Minimizing the number of partial slabs on the freelist minimizes
3650  * fragmentation (the ratio of unused buffers held by the slab layer). There are
3651  * two ways to get a slab off of the freelist: 1) free all the buffers on the
3652  * slab, and 2) allocate all the buffers on the slab. It follows that we want
3653  * the most-used slabs at the front of the list where they have the best chance
3654  * of being completely allocated, and the least-used slabs at a safe distance
3655  * from the front to improve the odds that the few remaining buffers will all be
3656  * freed before another allocation can tie up the slab. For that reason a slab
3657  * with a higher slab_refcnt sorts less than than a slab with a lower
3658  * slab_refcnt.
3659  *
3660  * However, if a slab has at least one buffer that is deemed unfreeable, we
3661  * would rather have that slab at the front of the list regardless of
3662  * slab_refcnt, since even one unfreeable buffer makes the entire slab
3663  * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3664  * callback, the slab is marked unfreeable for as long as it remains on the
3665  * freelist.
3666  */
3667 static int
3668 kmem_partial_slab_cmp(const void *p0, const void *p1)
3669 {
3670         const kmem_cache_t *cp;
3671         const kmem_slab_t *s0 = p0;
3672         const kmem_slab_t *s1 = p1;
3673         int w0, w1;
3674         size_t binshift;
3675 
3676         ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3677         ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3678         ASSERT(s0->slab_cache == s1->slab_cache);
3679         cp = s1->slab_cache;
3680         ASSERT(MUTEX_HELD(&cp->cache_lock));
3681         binshift = cp->cache_partial_binshift;
3682 
3683         /* weight of first slab */
3684         w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3685         if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3686                 w0 -= cp->cache_maxchunks;
3687         }
3688 
3689         /* weight of second slab */
3690         w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3691         if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3692                 w1 -= cp->cache_maxchunks;
3693         }
3694 
3695         if (w0 < w1)
3696                 return (-1);
3697         if (w0 > w1)
3698                 return (1);
3699 
3700         /* compare pointer values */
3701         if ((uintptr_t)s0 < (uintptr_t)s1)
3702                 return (-1);
3703         if ((uintptr_t)s0 > (uintptr_t)s1)
3704                 return (1);
3705 
3706         return (0);
3707 }
3708 
3709 /*
3710  * It must be valid to call the destructor (if any) on a newly created object.
3711  * That is, the constructor (if any) must leave the object in a valid state for
3712  * the destructor.
3713  */
3714 kmem_cache_t *
3715 kmem_cache_create(
3716         char *name,             /* descriptive name for this cache */
3717         size_t bufsize,         /* size of the objects it manages */
3718         size_t align,           /* required object alignment */
3719         int (*constructor)(void *, void *, int), /* object constructor */
3720         void (*destructor)(void *, void *),     /* object destructor */
3721         void (*reclaim)(void *), /* memory reclaim callback */
3722         void *private,          /* pass-thru arg for constr/destr/reclaim */
3723         vmem_t *vmp,            /* vmem source for slab allocation */
3724         int cflags)             /* cache creation flags */
3725 {
3726         int cpu_seqid;
3727         size_t chunksize;
3728         kmem_cache_t *cp;
3729         kmem_magtype_t *mtp;
3730         size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3731 
3732 #ifdef  DEBUG
3733         /*
3734          * Cache names should conform to the rules for valid C identifiers
3735          */
3736         if (!strident_valid(name)) {
3737                 cmn_err(CE_CONT,
3738                     "kmem_cache_create: '%s' is an invalid cache name\n"
3739                     "cache names must conform to the rules for "
3740                     "C identifiers\n", name);
3741         }
3742 #endif  /* DEBUG */
3743 
3744         if (vmp == NULL)
3745                 vmp = kmem_default_arena;
3746 
3747         /*
3748          * If this kmem cache has an identifier vmem arena as its source, mark
3749          * it such to allow kmem_reap_idspace().
3750          */
3751         ASSERT(!(cflags & KMC_IDENTIFIER));   /* consumer should not set this */
3752         if (vmp->vm_cflags & VMC_IDENTIFIER)
3753                 cflags |= KMC_IDENTIFIER;
3754 
3755         /*
3756          * Get a kmem_cache structure.  We arrange that cp->cache_cpu[]
3757          * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3758          * false sharing of per-CPU data.
3759          */
3760         cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3761             P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3762         bzero(cp, csize);
3763         list_link_init(&cp->cache_link);
3764 
3765         if (align == 0)
3766                 align = KMEM_ALIGN;
3767 
3768         /*
3769          * If we're not at least KMEM_ALIGN aligned, we can't use free
3770          * memory to hold bufctl information (because we can't safely
3771          * perform word loads and stores on it).
3772          */
3773         if (align < KMEM_ALIGN)
3774                 cflags |= KMC_NOTOUCH;
3775 
3776         if (!ISP2(align) || align > vmp->vm_quantum)
3777                 panic("kmem_cache_create: bad alignment %lu", align);
3778 
3779         mutex_enter(&kmem_flags_lock);
3780         if (kmem_flags & KMF_RANDOMIZE)
3781                 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3782                     KMF_RANDOMIZE;
3783         cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3784         mutex_exit(&kmem_flags_lock);
3785 
3786         /*
3787          * Make sure all the various flags are reasonable.
3788          */
3789         ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3790 
3791         if (cp->cache_flags & KMF_LITE) {
3792                 if (bufsize >= kmem_lite_minsize &&
3793                     align <= kmem_lite_maxalign &&
3794                     P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3795                         cp->cache_flags |= KMF_BUFTAG;
3796                         cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3797                 } else {
3798                         cp->cache_flags &= ~KMF_DEBUG;
3799                 }
3800         }
3801 
3802         if (cp->cache_flags & KMF_DEADBEEF)
3803                 cp->cache_flags |= KMF_REDZONE;
3804 
3805         if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3806                 cp->cache_flags |= KMF_NOMAGAZINE;
3807 
3808         if (cflags & KMC_NODEBUG)
3809                 cp->cache_flags &= ~KMF_DEBUG;
3810 
3811         if (cflags & KMC_NOTOUCH)
3812                 cp->cache_flags &= ~KMF_TOUCH;
3813 
3814         if (cflags & KMC_PREFILL)
3815                 cp->cache_flags |= KMF_PREFILL;
3816 
3817         if (cflags & KMC_NOHASH)
3818                 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3819 
3820         if (cflags & KMC_NOMAGAZINE)
3821                 cp->cache_flags |= KMF_NOMAGAZINE;
3822 
3823         if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3824                 cp->cache_flags |= KMF_REDZONE;
3825 
3826         if (!(cp->cache_flags & KMF_AUDIT))
3827                 cp->cache_flags &= ~KMF_CONTENTS;
3828 
3829         if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3830             !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3831                 cp->cache_flags |= KMF_FIREWALL;
3832 
3833         if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3834                 cp->cache_flags &= ~KMF_FIREWALL;
3835 
3836         if (cp->cache_flags & KMF_FIREWALL) {
3837                 cp->cache_flags &= ~KMF_BUFTAG;
3838                 cp->cache_flags |= KMF_NOMAGAZINE;
3839                 ASSERT(vmp == kmem_default_arena);
3840                 vmp = kmem_firewall_arena;
3841         }
3842 
3843         /*
3844          * Set cache properties.
3845          */
3846         (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3847         strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3848         cp->cache_bufsize = bufsize;
3849         cp->cache_align = align;
3850         cp->cache_constructor = constructor;
3851         cp->cache_destructor = destructor;
3852         cp->cache_reclaim = reclaim;
3853         cp->cache_private = private;
3854         cp->cache_arena = vmp;
3855         cp->cache_cflags = cflags;
3856 
3857         /*
3858          * Determine the chunk size.
3859          */
3860         chunksize = bufsize;
3861 
3862         if (align >= KMEM_ALIGN) {
3863                 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3864                 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3865         }
3866 
3867         if (cp->cache_flags & KMF_BUFTAG) {
3868                 cp->cache_bufctl = chunksize;
3869                 cp->cache_buftag = chunksize;
3870                 if (cp->cache_flags & KMF_LITE)
3871                         chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3872                 else
3873                         chunksize += sizeof (kmem_buftag_t);
3874         }
3875 
3876         if (cp->cache_flags & KMF_DEADBEEF) {
3877                 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3878                 if (cp->cache_flags & KMF_LITE)
3879                         cp->cache_verify = sizeof (uint64_t);
3880         }
3881 
3882         cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3883 
3884         cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3885 
3886         /*
3887          * Now that we know the chunk size, determine the optimal slab size.
3888          */
3889         if (vmp == kmem_firewall_arena) {
3890                 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3891                 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3892                 cp->cache_maxcolor = cp->cache_mincolor;
3893                 cp->cache_flags |= KMF_HASH;
3894                 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3895         } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3896             !(cp->cache_flags & KMF_AUDIT) &&
3897             chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3898                 cp->cache_slabsize = vmp->vm_quantum;
3899                 cp->cache_mincolor = 0;
3900                 cp->cache_maxcolor =
3901                     (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3902                 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3903                 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3904         } else {
3905                 size_t chunks, bestfit, waste, slabsize;
3906                 size_t minwaste = LONG_MAX;
3907 
3908                 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3909                         slabsize = P2ROUNDUP(chunksize * chunks,
3910                             vmp->vm_quantum);
3911                         chunks = slabsize / chunksize;
3912                         waste = (slabsize % chunksize) / chunks;
3913                         if (waste < minwaste) {
3914                                 minwaste = waste;
3915                                 bestfit = slabsize;
3916                         }
3917                 }
3918                 if (cflags & KMC_QCACHE)
3919                         bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3920                 cp->cache_slabsize = bestfit;
3921                 cp->cache_mincolor = 0;
3922                 cp->cache_maxcolor = bestfit % chunksize;
3923                 cp->cache_flags |= KMF_HASH;
3924         }
3925 
3926         cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3927         cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3928 
3929         /*
3930          * Disallowing prefill when either the DEBUG or HASH flag is set or when
3931          * there is a constructor avoids some tricky issues with debug setup
3932          * that may be revisited later. We cannot allow prefill in a
3933          * metadata cache because of potential recursion.
3934          */
3935         if (vmp == kmem_msb_arena ||
3936             cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3937             cp->cache_constructor != NULL)
3938                 cp->cache_flags &= ~KMF_PREFILL;
3939 
3940         if (cp->cache_flags & KMF_HASH) {
3941                 ASSERT(!(cflags & KMC_NOHASH));
3942                 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3943                     kmem_bufctl_audit_cache : kmem_bufctl_cache;
3944         }
3945 
3946         if (cp->cache_maxcolor >= vmp->vm_quantum)
3947                 cp->cache_maxcolor = vmp->vm_quantum - 1;
3948 
3949         cp->cache_color = cp->cache_mincolor;
3950 
3951         /*
3952          * Initialize the rest of the slab layer.
3953          */
3954         mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3955 
3956         avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3957             sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3958         /* LINTED: E_TRUE_LOGICAL_EXPR */
3959         ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3960         /* reuse partial slab AVL linkage for complete slab list linkage */
3961         list_create(&cp->cache_complete_slabs,
3962             sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3963 
3964         if (cp->cache_flags & KMF_HASH) {
3965                 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3966                     KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3967                 bzero(cp->cache_hash_table,
3968                     KMEM_HASH_INITIAL * sizeof (void *));
3969                 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3970                 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3971         }
3972 
3973         /*
3974          * Initialize the depot.
3975          */
3976         mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3977 
3978         for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3979                 continue;
3980 
3981         cp->cache_magtype = mtp;
3982 
3983         /*
3984          * Initialize the CPU layer.
3985          */
3986         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3987                 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3988                 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3989                 ccp->cc_flags = cp->cache_flags;
3990                 ccp->cc_rounds = -1;
3991                 ccp->cc_prounds = -1;
3992         }
3993 
3994         /*
3995          * Create the cache's kstats.
3996          */
3997         if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3998             "kmem_cache", KSTAT_TYPE_NAMED,
3999             sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
4000             KSTAT_FLAG_VIRTUAL)) != NULL) {
4001                 cp->cache_kstat->ks_data = &kmem_cache_kstat;
4002                 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
4003                 cp->cache_kstat->ks_private = cp;
4004                 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
4005                 kstat_install(cp->cache_kstat);
4006         }
4007 
4008         /*
4009          * Add the cache to the global list.  This makes it visible
4010          * to kmem_update(), so the cache must be ready for business.
4011          */
4012         mutex_enter(&kmem_cache_lock);
4013         list_insert_tail(&kmem_caches, cp);
4014         mutex_exit(&kmem_cache_lock);
4015 
4016         if (kmem_ready)
4017                 kmem_cache_magazine_enable(cp);
4018 
4019         return (cp);
4020 }
4021 
4022 static int
4023 kmem_move_cmp(const void *buf, const void *p)
4024 {
4025         const kmem_move_t *kmm = p;
4026         uintptr_t v1 = (uintptr_t)buf;
4027         uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
4028         return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
4029 }
4030 
4031 static void
4032 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
4033 {
4034         kmd->kmd_reclaim_numer = 1;
4035 }
4036 
4037 /*
4038  * Initially, when choosing candidate slabs for buffers to move, we want to be
4039  * very selective and take only slabs that are less than
4040  * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4041  * slabs, then we raise the allocation ceiling incrementally. The reclaim
4042  * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4043  * longer fragmented.
4044  */
4045 static void
4046 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4047 {
4048         if (direction > 0) {
4049                 /* make it easier to find a candidate slab */
4050                 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4051                         kmd->kmd_reclaim_numer++;
4052                 }
4053         } else {
4054                 /* be more selective */
4055                 if (kmd->kmd_reclaim_numer > 1) {
4056                         kmd->kmd_reclaim_numer--;
4057                 }
4058         }
4059 }
4060 
4061 void
4062 kmem_cache_set_move(kmem_cache_t *cp,
4063     kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4064 {
4065         kmem_defrag_t *defrag;
4066 
4067         ASSERT(move != NULL);
4068         /*
4069          * The consolidator does not support NOTOUCH caches because kmem cannot
4070          * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4071          * a low order bit usable by clients to distinguish uninitialized memory
4072          * from known objects (see kmem_slab_create).
4073          */
4074         ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4075         ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4076 
4077         /*
4078          * We should not be holding anyone's cache lock when calling
4079          * kmem_cache_alloc(), so allocate in all cases before acquiring the
4080          * lock.
4081          */
4082         defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4083 
4084         mutex_enter(&cp->cache_lock);
4085 
4086         if (KMEM_IS_MOVABLE(cp)) {
4087                 if (cp->cache_move == NULL) {
4088                         ASSERT(cp->cache_slab_alloc == 0);
4089 
4090                         cp->cache_defrag = defrag;
4091                         defrag = NULL; /* nothing to free */
4092                         bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4093                         avl_create(&cp->cache_defrag->kmd_moves_pending,
4094                             kmem_move_cmp, sizeof (kmem_move_t),
4095                             offsetof(kmem_move_t, kmm_entry));
4096                         /* LINTED: E_TRUE_LOGICAL_EXPR */
4097                         ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4098                         /* reuse the slab's AVL linkage for deadlist linkage */
4099                         list_create(&cp->cache_defrag->kmd_deadlist,
4100                             sizeof (kmem_slab_t),
4101                             offsetof(kmem_slab_t, slab_link));
4102                         kmem_reset_reclaim_threshold(cp->cache_defrag);
4103                 }
4104                 cp->cache_move = move;
4105         }
4106 
4107         mutex_exit(&cp->cache_lock);
4108 
4109         if (defrag != NULL) {
4110                 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4111         }
4112 }
4113 
4114 void
4115 kmem_cache_destroy(kmem_cache_t *cp)
4116 {
4117         int cpu_seqid;
4118 
4119         /*
4120          * Remove the cache from the global cache list so that no one else
4121          * can schedule tasks on its behalf, wait for any pending tasks to
4122          * complete, purge the cache, and then destroy it.
4123          */
4124         mutex_enter(&kmem_cache_lock);
4125         list_remove(&kmem_caches, cp);
4126         mutex_exit(&kmem_cache_lock);
4127 
4128         if (kmem_taskq != NULL)
4129                 taskq_wait(kmem_taskq);
4130         if (kmem_move_taskq != NULL)
4131                 taskq_wait(kmem_move_taskq);
4132 
4133         kmem_cache_magazine_purge(cp);
4134 
4135         mutex_enter(&cp->cache_lock);
4136         if (cp->cache_buftotal != 0)
4137                 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4138                     cp->cache_name, (void *)cp);
4139         if (cp->cache_defrag != NULL) {
4140                 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4141                 list_destroy(&cp->cache_defrag->kmd_deadlist);
4142                 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4143                 cp->cache_defrag = NULL;
4144         }
4145         /*
4146          * The cache is now dead.  There should be no further activity.  We
4147          * enforce this by setting land mines in the constructor, destructor,
4148          * reclaim, and move routines that induce a kernel text fault if
4149          * invoked.
4150          */
4151         cp->cache_constructor = (int (*)(void *, void *, int))1;
4152         cp->cache_destructor = (void (*)(void *, void *))2;
4153         cp->cache_reclaim = (void (*)(void *))3;
4154         cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4155         mutex_exit(&cp->cache_lock);
4156 
4157         kstat_delete(cp->cache_kstat);
4158 
4159         if (cp->cache_hash_table != NULL)
4160                 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4161                     (cp->cache_hash_mask + 1) * sizeof (void *));
4162 
4163         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4164                 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4165 
4166         mutex_destroy(&cp->cache_depot_lock);
4167         mutex_destroy(&cp->cache_lock);
4168 
4169         vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4170 }
4171 
4172 /*ARGSUSED*/
4173 static int
4174 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4175 {
4176         ASSERT(MUTEX_HELD(&cpu_lock));
4177         if (what == CPU_UNCONFIG) {
4178                 kmem_cache_applyall(kmem_cache_magazine_purge,
4179                     kmem_taskq, TQ_SLEEP);
4180                 kmem_cache_applyall(kmem_cache_magazine_enable,
4181                     kmem_taskq, TQ_SLEEP);
4182         }
4183         return (0);
4184 }
4185 
4186 static void
4187 kmem_alloc_caches_create(const int *array, size_t count,
4188     kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4189 {
4190         char name[KMEM_CACHE_NAMELEN + 1];
4191         size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4192         size_t size = table_unit;
4193         int i;
4194 
4195         for (i = 0; i < count; i++) {
4196                 size_t cache_size = array[i];
4197                 size_t align = KMEM_ALIGN;
4198                 kmem_cache_t *cp;
4199 
4200                 /* if the table has an entry for maxbuf, we're done */
4201                 if (size > maxbuf)
4202                         break;
4203 
4204                 /* cache size must be a multiple of the table unit */
4205                 ASSERT(P2PHASE(cache_size, table_unit) == 0);
4206 
4207                 /*
4208                  * If they allocate a multiple of the coherency granularity,
4209                  * they get a coherency-granularity-aligned address.
4210                  */
4211                 if (IS_P2ALIGNED(cache_size, 64))
4212                         align = 64;
4213                 if (IS_P2ALIGNED(cache_size, PAGESIZE))
4214                         align = PAGESIZE;
4215                 (void) snprintf(name, sizeof (name),
4216                     "kmem_alloc_%lu", cache_size);
4217                 cp = kmem_cache_create(name, cache_size, align,
4218                     NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4219 
4220                 while (size <= cache_size) {
4221                         alloc_table[(size - 1) >> shift] = cp;
4222                         size += table_unit;
4223                 }
4224         }
4225 
4226         ASSERT(size > maxbuf);               /* i.e. maxbuf <= max(cache_size) */
4227 }
4228 
4229 static void
4230 kmem_cache_init(int pass, int use_large_pages)
4231 {
4232         int i;
4233         size_t maxbuf;
4234         kmem_magtype_t *mtp;
4235 
4236         for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4237                 char name[KMEM_CACHE_NAMELEN + 1];
4238 
4239                 mtp = &kmem_magtype[i];
4240                 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4241                 mtp->mt_cache = kmem_cache_create(name,
4242                     (mtp->mt_magsize + 1) * sizeof (void *),
4243                     mtp->mt_align, NULL, NULL, NULL, NULL,
4244                     kmem_msb_arena, KMC_NOHASH);
4245         }
4246 
4247         kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4248             sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4249             kmem_msb_arena, KMC_NOHASH);
4250 
4251         kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4252             sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4253             kmem_msb_arena, KMC_NOHASH);
4254 
4255         kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4256             sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4257             kmem_msb_arena, KMC_NOHASH);
4258 
4259         if (pass == 2) {
4260                 kmem_va_arena = vmem_create("kmem_va",
4261                     NULL, 0, PAGESIZE,
4262                     vmem_alloc, vmem_free, heap_arena,
4263                     8 * PAGESIZE, VM_SLEEP);
4264 
4265                 if (use_large_pages) {
4266                         kmem_default_arena = vmem_xcreate("kmem_default",
4267                             NULL, 0, PAGESIZE,
4268                             segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4269                             0, VMC_DUMPSAFE | VM_SLEEP);
4270                 } else {
4271                         kmem_default_arena = vmem_create("kmem_default",
4272                             NULL, 0, PAGESIZE,
4273                             segkmem_alloc, segkmem_free, kmem_va_arena,
4274                             0, VMC_DUMPSAFE | VM_SLEEP);
4275                 }
4276 
4277                 /* Figure out what our maximum cache size is */
4278                 maxbuf = kmem_max_cached;
4279                 if (maxbuf <= KMEM_MAXBUF) {
4280                         maxbuf = 0;
4281                         kmem_max_cached = KMEM_MAXBUF;
4282                 } else {
4283                         size_t size = 0;
4284                         size_t max =
4285                             sizeof (kmem_big_alloc_sizes) / sizeof (int);
4286                         /*
4287                          * Round maxbuf up to an existing cache size.  If maxbuf
4288                          * is larger than the largest cache, we truncate it to
4289                          * the largest cache's size.
4290                          */
4291                         for (i = 0; i < max; i++) {
4292                                 size = kmem_big_alloc_sizes[i];
4293                                 if (maxbuf <= size)
4294                                         break;
4295                         }
4296                         kmem_max_cached = maxbuf = size;
4297                 }
4298 
4299                 /*
4300                  * The big alloc table may not be completely overwritten, so
4301                  * we clear out any stale cache pointers from the first pass.
4302                  */
4303                 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4304         } else {
4305                 /*
4306                  * During the first pass, the kmem_alloc_* caches
4307                  * are treated as metadata.
4308                  */
4309                 kmem_default_arena = kmem_msb_arena;
4310                 maxbuf = KMEM_BIG_MAXBUF_32BIT;
4311         }
4312 
4313         /*
4314          * Set up the default caches to back kmem_alloc()
4315          */
4316         kmem_alloc_caches_create(
4317             kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4318             kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4319 
4320         kmem_alloc_caches_create(
4321             kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4322             kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4323 
4324         kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4325 }
4326 
4327 void
4328 kmem_init(void)
4329 {
4330         kmem_cache_t *cp;
4331         int old_kmem_flags = kmem_flags;
4332         int use_large_pages = 0;
4333         size_t maxverify, minfirewall;
4334 
4335         kstat_init();
4336 
4337         /*
4338          * Small-memory systems (< 24 MB) can't handle kmem_flags overhead.
4339          */
4340         if (physmem < btop(24 << 20) && !(old_kmem_flags & KMF_STICKY))
4341                 kmem_flags = 0;
4342 
4343         /*
4344          * Don't do firewalled allocations if the heap is less than 1TB
4345          * (i.e. on a 32-bit kernel)
4346          * The resulting VM_NEXTFIT allocations would create too much
4347          * fragmentation in a small heap.
4348          */
4349 #if defined(_LP64)
4350         maxverify = minfirewall = PAGESIZE / 2;
4351 #else
4352         maxverify = minfirewall = ULONG_MAX;
4353 #endif
4354 
4355         /* LINTED */
4356         ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4357 
4358         list_create(&kmem_caches, sizeof (kmem_cache_t),
4359             offsetof(kmem_cache_t, cache_link));
4360 
4361         kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4362             vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4363             VM_SLEEP | VMC_NO_QCACHE);
4364 
4365         kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4366             PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4367             VMC_DUMPSAFE | VM_SLEEP);
4368 
4369         kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4370             segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4371 
4372         kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4373             segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4374 
4375         kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4376             segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4377 
4378         kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4379             NULL, 0, PAGESIZE,
4380             kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4381             0, VM_SLEEP);
4382 
4383         kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4384             segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4385             VMC_DUMPSAFE | VM_SLEEP);
4386 
4387         /* temporary oversize arena for mod_read_system_file */
4388         kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4389             segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4390 
4391         kmem_reap_interval = 15 * hz;
4392 
4393         /*
4394          * Read /etc/system.  This is a chicken-and-egg problem because
4395          * kmem_flags may be set in /etc/system, but mod_read_system_file()
4396          * needs to use the allocator.  The simplest solution is to create
4397          * all the standard kmem caches, read /etc/system, destroy all the
4398          * caches we just created, and then create them all again in light
4399          * of the (possibly) new kmem_flags and other kmem tunables.
4400          */
4401         kmem_cache_init(1, 0);
4402 
4403         mod_read_system_file(boothowto & RB_ASKNAME);
4404 
4405         while ((cp = list_tail(&kmem_caches)) != NULL)
4406                 kmem_cache_destroy(cp);
4407 
4408         vmem_destroy(kmem_oversize_arena);
4409 
4410         if (old_kmem_flags & KMF_STICKY)
4411                 kmem_flags = old_kmem_flags;
4412 
4413         if (!(kmem_flags & KMF_AUDIT))
4414                 vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4415 
4416         if (kmem_maxverify == 0)
4417                 kmem_maxverify = maxverify;
4418 
4419         if (kmem_minfirewall == 0)
4420                 kmem_minfirewall = minfirewall;
4421 
4422         /*
4423          * give segkmem a chance to figure out if we are using large pages
4424          * for the kernel heap
4425          */
4426         use_large_pages = segkmem_lpsetup();
4427 
4428         /*
4429          * To protect against corruption, we keep the actual number of callers
4430          * KMF_LITE records seperate from the tunable.  We arbitrarily clamp
4431          * to 16, since the overhead for small buffers quickly gets out of
4432          * hand.
4433          *
4434          * The real limit would depend on the needs of the largest KMC_NOHASH
4435          * cache.
4436          */
4437         kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4438         kmem_lite_pcs = kmem_lite_count;
4439 
4440         /*
4441          * Normally, we firewall oversized allocations when possible, but
4442          * if we are using large pages for kernel memory, and we don't have
4443          * any non-LITE debugging flags set, we want to allocate oversized
4444          * buffers from large pages, and so skip the firewalling.
4445          */
4446         if (use_large_pages &&
4447             ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4448                 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4449                     PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4450                     0, VMC_DUMPSAFE | VM_SLEEP);
4451         } else {
4452                 kmem_oversize_arena = vmem_create("kmem_oversize",
4453                     NULL, 0, PAGESIZE,
4454                     segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4455                     kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4456                     VM_SLEEP);
4457         }
4458 
4459         kmem_cache_init(2, use_large_pages);
4460 
4461         if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4462                 if (kmem_transaction_log_size == 0)
4463                         kmem_transaction_log_size = kmem_maxavail() / 50;
4464                 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4465         }
4466 
4467         if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4468                 if (kmem_content_log_size == 0)
4469                         kmem_content_log_size = kmem_maxavail() / 50;
4470                 kmem_content_log = kmem_log_init(kmem_content_log_size);
4471         }
4472 
4473         kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4474 
4475         kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4476 
4477         /*
4478          * Initialize STREAMS message caches so allocb() is available.
4479          * This allows us to initialize the logging framework (cmn_err(9F),
4480          * strlog(9F), etc) so we can start recording messages.
4481          */
4482         streams_msg_init();
4483 
4484         /*
4485          * Initialize the ZSD framework in Zones so modules loaded henceforth
4486          * can register their callbacks.
4487          */
4488         zone_zsd_init();
4489 
4490         log_init();
4491         taskq_init();
4492 
4493         /*
4494          * Warn about invalid or dangerous values of kmem_flags.
4495          * Always warn about unsupported values.
4496          */
4497         if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4498             KMF_CONTENTS | KMF_LITE)) != 0) ||
4499             ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4500                 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
4501                     "See the Solaris Tunable Parameters Reference Manual.",
4502                     kmem_flags);
4503 
4504 #ifdef DEBUG
4505         if ((kmem_flags & KMF_DEBUG) == 0)
4506                 cmn_err(CE_NOTE, "kmem debugging disabled.");
4507 #else
4508         /*
4509          * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4510          * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4511          * if KMF_AUDIT is set). We should warn the user about the performance
4512          * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4513          * isn't set (since that disables AUDIT).
4514          */
4515         if (!(kmem_flags & KMF_LITE) &&
4516             (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4517                 cmn_err(CE_WARN, "High-overhead kmem debugging features "
4518                     "enabled (kmem_flags = 0x%x).  Performance degradation "
4519                     "and large memory overhead possible. See the Solaris "
4520                     "Tunable Parameters Reference Manual.", kmem_flags);
4521 #endif /* not DEBUG */
4522 
4523         kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4524 
4525         kmem_ready = 1;
4526 
4527         /*
4528          * Initialize the platform-specific aligned/DMA memory allocator.
4529          */
4530         ka_init();
4531 
4532         /*
4533          * Initialize 32-bit ID cache.
4534          */
4535         id32_init();
4536 
4537         /*
4538          * Initialize the networking stack so modules loaded can
4539          * register their callbacks.
4540          */
4541         netstack_init();
4542 }
4543 
4544 static void
4545 kmem_move_init(void)
4546 {
4547         kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4548             sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4549             kmem_msb_arena, KMC_NOHASH);
4550         kmem_move_cache = kmem_cache_create("kmem_move_cache",
4551             sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4552             kmem_msb_arena, KMC_NOHASH);
4553 
4554         /*
4555          * kmem guarantees that move callbacks are sequential and that even
4556          * across multiple caches no two moves ever execute simultaneously.
4557          * Move callbacks are processed on a separate taskq so that client code
4558          * does not interfere with internal maintenance tasks.
4559          */
4560         kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4561             minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4562 }
4563 
4564 void
4565 kmem_thread_init(void)
4566 {
4567         kmem_move_init();
4568         kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4569             300, INT_MAX, TASKQ_PREPOPULATE);
4570 }
4571 
4572 void
4573 kmem_mp_init(void)
4574 {
4575         mutex_enter(&cpu_lock);
4576         register_cpu_setup_func(kmem_cpu_setup, NULL);
4577         mutex_exit(&cpu_lock);
4578 
4579         kmem_update_timeout(NULL);
4580 
4581         taskq_mp_init();
4582 }
4583 
4584 /*
4585  * Return the slab of the allocated buffer, or NULL if the buffer is not
4586  * allocated. This function may be called with a known slab address to determine
4587  * whether or not the buffer is allocated, or with a NULL slab address to obtain
4588  * an allocated buffer's slab.
4589  */
4590 static kmem_slab_t *
4591 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4592 {
4593         kmem_bufctl_t *bcp, *bufbcp;
4594 
4595         ASSERT(MUTEX_HELD(&cp->cache_lock));
4596         ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4597 
4598         if (cp->cache_flags & KMF_HASH) {
4599                 for (bcp = *KMEM_HASH(cp, buf);
4600                     (bcp != NULL) && (bcp->bc_addr != buf);
4601                     bcp = bcp->bc_next) {
4602                         continue;
4603                 }
4604                 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4605                 return (bcp == NULL ? NULL : bcp->bc_slab);
4606         }
4607 
4608         if (sp == NULL) {
4609                 sp = KMEM_SLAB(cp, buf);
4610         }
4611         bufbcp = KMEM_BUFCTL(cp, buf);
4612         for (bcp = sp->slab_head;
4613             (bcp != NULL) && (bcp != bufbcp);
4614             bcp = bcp->bc_next) {
4615                 continue;
4616         }
4617         return (bcp == NULL ? sp : NULL);
4618 }
4619 
4620 static boolean_t
4621 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4622 {
4623         long refcnt = sp->slab_refcnt;
4624 
4625         ASSERT(cp->cache_defrag != NULL);
4626 
4627         /*
4628          * For code coverage we want to be able to move an object within the
4629          * same slab (the only partial slab) even if allocating the destination
4630          * buffer resulted in a completely allocated slab.
4631          */
4632         if (flags & KMM_DEBUG) {
4633                 return ((flags & KMM_DESPERATE) ||
4634                     ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4635         }
4636 
4637         /* If we're desperate, we don't care if the client said NO. */
4638         if (flags & KMM_DESPERATE) {
4639                 return (refcnt < sp->slab_chunks); /* any partial */
4640         }
4641 
4642         if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4643                 return (B_FALSE);
4644         }
4645 
4646         if ((refcnt == 1) || kmem_move_any_partial) {
4647                 return (refcnt < sp->slab_chunks);
4648         }
4649 
4650         /*
4651          * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4652          * slabs with a progressively higher percentage of used buffers can be
4653          * reclaimed until the cache as a whole is no longer fragmented.
4654          *
4655          *      sp->slab_refcnt   kmd_reclaim_numer
4656          *      --------------- < ------------------
4657          *      sp->slab_chunks   KMEM_VOID_FRACTION
4658          */
4659         return ((refcnt * KMEM_VOID_FRACTION) <
4660             (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4661 }
4662 
4663 static void *
4664 kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf,
4665     void *tbuf)
4666 {
4667         int i;          /* magazine round index */
4668 
4669         for (i = 0; i < n; i++) {
4670                 if (buf == m->mag_round[i]) {
4671                         if (cp->cache_flags & KMF_BUFTAG) {
4672                                 (void) kmem_cache_free_debug(cp, tbuf,
4673                                     caller());
4674                         }
4675                         m->mag_round[i] = tbuf;
4676                         return (buf);
4677                 }
4678         }
4679 
4680         return (NULL);
4681 }
4682 
4683 /*
4684  * Hunt the magazine layer for the given buffer. If found, the buffer is
4685  * removed from the magazine layer and returned, otherwise NULL is returned.
4686  * The state of the returned buffer is freed and constructed.
4687  */
4688 static void *
4689 kmem_hunt_mags(kmem_cache_t *cp, void *buf)
4690 {
4691         kmem_cpu_cache_t *ccp;
4692         kmem_magazine_t *m;
4693         int cpu_seqid;
4694         int n;          /* magazine rounds */
4695         void *tbuf;     /* temporary swap buffer */
4696 
4697         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4698 
4699         /*
4700          * Allocated a buffer to swap with the one we hope to pull out of a
4701          * magazine when found.
4702          */
4703         tbuf = kmem_cache_alloc(cp, KM_NOSLEEP);
4704         if (tbuf == NULL) {
4705                 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_alloc_fail);
4706                 return (NULL);
4707         }
4708         if (tbuf == buf) {
4709                 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_lucky);
4710                 if (cp->cache_flags & KMF_BUFTAG) {
4711                         (void) kmem_cache_free_debug(cp, buf, caller());
4712                 }
4713                 return (buf);
4714         }
4715 
4716         /* Hunt the depot. */
4717         mutex_enter(&cp->cache_depot_lock);
4718         n = cp->cache_magtype->mt_magsize;
4719         for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) {
4720                 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4721                         mutex_exit(&cp->cache_depot_lock);
4722                         return (buf);
4723                 }
4724         }
4725         mutex_exit(&cp->cache_depot_lock);
4726 
4727         /* Hunt the per-CPU magazines. */
4728         for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
4729                 ccp = &cp->cache_cpu[cpu_seqid];
4730 
4731                 mutex_enter(&ccp->cc_lock);
4732                 m = ccp->cc_loaded;
4733                 n = ccp->cc_rounds;
4734                 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4735                         mutex_exit(&ccp->cc_lock);
4736                         return (buf);
4737                 }
4738                 m = ccp->cc_ploaded;
4739                 n = ccp->cc_prounds;
4740                 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4741                         mutex_exit(&ccp->cc_lock);
4742                         return (buf);
4743                 }
4744                 mutex_exit(&ccp->cc_lock);
4745         }
4746 
4747         kmem_cache_free(cp, tbuf);
4748         return (NULL);
4749 }
4750 
4751 /*
4752  * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4753  * or when the buffer is freed.
4754  */
4755 static void
4756 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4757 {
4758         ASSERT(MUTEX_HELD(&cp->cache_lock));
4759         ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4760 
4761         if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4762                 return;
4763         }
4764 
4765         if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4766                 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4767                         avl_remove(&cp->cache_partial_slabs, sp);
4768                         sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4769                         sp->slab_stuck_offset = (uint32_t)-1;
4770                         avl_add(&cp->cache_partial_slabs, sp);
4771                 }
4772         } else {
4773                 sp->slab_later_count = 0;
4774                 sp->slab_stuck_offset = (uint32_t)-1;
4775         }
4776 }
4777 
4778 static void
4779 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4780 {
4781         ASSERT(taskq_member(kmem_move_taskq, curthread));
4782         ASSERT(MUTEX_HELD(&cp->cache_lock));
4783         ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4784 
4785         if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4786                 return;
4787         }
4788 
4789         avl_remove(&cp->cache_partial_slabs, sp);
4790         sp->slab_later_count = 0;
4791         sp->slab_flags |= KMEM_SLAB_NOMOVE;
4792         sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4793         avl_add(&cp->cache_partial_slabs, sp);
4794 }
4795 
4796 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4797 
4798 /*
4799  * The move callback takes two buffer addresses, the buffer to be moved, and a
4800  * newly allocated and constructed buffer selected by kmem as the destination.
4801  * It also takes the size of the buffer and an optional user argument specified
4802  * at cache creation time. kmem guarantees that the buffer to be moved has not
4803  * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4804  * guarantee the present whereabouts of the buffer to be moved, so it is up to
4805  * the client to safely determine whether or not it is still using the buffer.
4806  * The client must not free either of the buffers passed to the move callback,
4807  * since kmem wants to free them directly to the slab layer. The client response
4808  * tells kmem which of the two buffers to free:
4809  *
4810  * YES          kmem frees the old buffer (the move was successful)
4811  * NO           kmem frees the new buffer, marks the slab of the old buffer
4812  *              non-reclaimable to avoid bothering the client again
4813  * LATER        kmem frees the new buffer, increments slab_later_count
4814  * DONT_KNOW    kmem frees the new buffer, searches mags for the old buffer
4815  * DONT_NEED    kmem frees both the old buffer and the new buffer
4816  *
4817  * The pending callback argument now being processed contains both of the
4818  * buffers (old and new) passed to the move callback function, the slab of the
4819  * old buffer, and flags related to the move request, such as whether or not the
4820  * system was desperate for memory.
4821  *
4822  * Slabs are not freed while there is a pending callback, but instead are kept
4823  * on a deadlist, which is drained after the last callback completes. This means
4824  * that slabs are safe to access until kmem_move_end(), no matter how many of
4825  * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4826  * zero for as long as the slab remains on the deadlist and until the slab is
4827  * freed.
4828  */
4829 static void
4830 kmem_move_buffer(kmem_move_t *callback)
4831 {
4832         kmem_cbrc_t response;
4833         kmem_slab_t *sp = callback->kmm_from_slab;
4834         kmem_cache_t *cp = sp->slab_cache;
4835         boolean_t free_on_slab;
4836 
4837         ASSERT(taskq_member(kmem_move_taskq, curthread));
4838         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4839         ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4840 
4841         /*
4842          * The number of allocated buffers on the slab may have changed since we
4843          * last checked the slab's reclaimability (when the pending move was
4844          * enqueued), or the client may have responded NO when asked to move
4845          * another buffer on the same slab.
4846          */
4847         if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4848                 KMEM_STAT_ADD(kmem_move_stats.kms_no_longer_reclaimable);
4849                 KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4850                     kmem_move_stats.kms_notify_no_longer_reclaimable);
4851                 kmem_slab_free(cp, callback->kmm_to_buf);
4852                 kmem_move_end(cp, callback);
4853                 return;
4854         }
4855 
4856         /*
4857          * Hunting magazines is expensive, so we'll wait to do that until the
4858          * client responds KMEM_CBRC_DONT_KNOW. However, checking the slab layer
4859          * is cheap, so we might as well do that here in case we can avoid
4860          * bothering the client.
4861          */
4862         mutex_enter(&cp->cache_lock);
4863         free_on_slab = (kmem_slab_allocated(cp, sp,
4864             callback->kmm_from_buf) == NULL);
4865         mutex_exit(&cp->cache_lock);
4866 
4867         if (free_on_slab) {
4868                 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab);
4869                 kmem_slab_free(cp, callback->kmm_to_buf);
4870                 kmem_move_end(cp, callback);
4871                 return;
4872         }
4873 
4874         if (cp->cache_flags & KMF_BUFTAG) {
4875                 /*
4876                  * Make kmem_cache_alloc_debug() apply the constructor for us.
4877                  */
4878                 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4879                     KM_NOSLEEP, 1, caller()) != 0) {
4880                         KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail);
4881                         kmem_move_end(cp, callback);
4882                         return;
4883                 }
4884         } else if (cp->cache_constructor != NULL &&
4885             cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4886             KM_NOSLEEP) != 0) {
4887                 atomic_inc_64(&cp->cache_alloc_fail);
4888                 KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail);
4889                 kmem_slab_free(cp, callback->kmm_to_buf);
4890                 kmem_move_end(cp, callback);
4891                 return;
4892         }
4893 
4894         KMEM_STAT_ADD(kmem_move_stats.kms_callbacks);
4895         KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4896             kmem_move_stats.kms_notify_callbacks);
4897         cp->cache_defrag->kmd_callbacks++;
4898         cp->cache_defrag->kmd_thread = curthread;
4899         cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4900         cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4901         DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4902             callback);
4903 
4904         response = cp->cache_move(callback->kmm_from_buf,
4905             callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4906 
4907         DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4908             callback, kmem_cbrc_t, response);
4909         cp->cache_defrag->kmd_thread = NULL;
4910         cp->cache_defrag->kmd_from_buf = NULL;
4911         cp->cache_defrag->kmd_to_buf = NULL;
4912 
4913         if (response == KMEM_CBRC_YES) {
4914                 KMEM_STAT_ADD(kmem_move_stats.kms_yes);
4915                 cp->cache_defrag->kmd_yes++;
4916                 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4917                 /* slab safe to access until kmem_move_end() */
4918                 if (sp->slab_refcnt == 0)
4919                         cp->cache_defrag->kmd_slabs_freed++;
4920                 mutex_enter(&cp->cache_lock);
4921                 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4922                 mutex_exit(&cp->cache_lock);
4923                 kmem_move_end(cp, callback);
4924                 return;
4925         }
4926 
4927         switch (response) {
4928         case KMEM_CBRC_NO:
4929                 KMEM_STAT_ADD(kmem_move_stats.kms_no);
4930                 cp->cache_defrag->kmd_no++;
4931                 mutex_enter(&cp->cache_lock);
4932                 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4933                 mutex_exit(&cp->cache_lock);
4934                 break;
4935         case KMEM_CBRC_LATER:
4936                 KMEM_STAT_ADD(kmem_move_stats.kms_later);
4937                 cp->cache_defrag->kmd_later++;
4938                 mutex_enter(&cp->cache_lock);
4939                 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4940                         mutex_exit(&cp->cache_lock);
4941                         break;
4942                 }
4943 
4944                 if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4945                         KMEM_STAT_ADD(kmem_move_stats.kms_disbelief);
4946                         kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4947                 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4948                         sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4949                             callback->kmm_from_buf);
4950                 }
4951                 mutex_exit(&cp->cache_lock);
4952                 break;
4953         case KMEM_CBRC_DONT_NEED:
4954                 KMEM_STAT_ADD(kmem_move_stats.kms_dont_need);
4955                 cp->cache_defrag->kmd_dont_need++;
4956                 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4957                 if (sp->slab_refcnt == 0)
4958                         cp->cache_defrag->kmd_slabs_freed++;
4959                 mutex_enter(&cp->cache_lock);
4960                 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4961                 mutex_exit(&cp->cache_lock);
4962                 break;
4963         case KMEM_CBRC_DONT_KNOW:
4964                 KMEM_STAT_ADD(kmem_move_stats.kms_dont_know);
4965                 cp->cache_defrag->kmd_dont_know++;
4966                 if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) {
4967                         KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_mag);
4968                         cp->cache_defrag->kmd_hunt_found++;
4969                         kmem_slab_free_constructed(cp, callback->kmm_from_buf,
4970                             B_TRUE);
4971                         if (sp->slab_refcnt == 0)
4972                                 cp->cache_defrag->kmd_slabs_freed++;
4973                         mutex_enter(&cp->cache_lock);
4974                         kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4975                         mutex_exit(&cp->cache_lock);
4976                 }
4977                 break;
4978         default:
4979                 panic("'%s' (%p) unexpected move callback response %d\n",
4980                     cp->cache_name, (void *)cp, response);
4981         }
4982 
4983         kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4984         kmem_move_end(cp, callback);
4985 }
4986 
4987 /* Return B_FALSE if there is insufficient memory for the move request. */
4988 static boolean_t
4989 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4990 {
4991         void *to_buf;
4992         avl_index_t index;
4993         kmem_move_t *callback, *pending;
4994         ulong_t n;
4995 
4996         ASSERT(taskq_member(kmem_taskq, curthread));
4997         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4998         ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4999 
5000         callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
5001         if (callback == NULL) {
5002                 KMEM_STAT_ADD(kmem_move_stats.kms_callback_alloc_fail);
5003                 return (B_FALSE);
5004         }
5005 
5006         callback->kmm_from_slab = sp;
5007         callback->kmm_from_buf = buf;
5008         callback->kmm_flags = flags;
5009 
5010         mutex_enter(&cp->cache_lock);
5011 
5012         n = avl_numnodes(&cp->cache_partial_slabs);
5013         if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
5014                 mutex_exit(&cp->cache_lock);
5015                 kmem_cache_free(kmem_move_cache, callback);
5016                 return (B_TRUE); /* there is no need for the move request */
5017         }
5018 
5019         pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
5020         if (pending != NULL) {
5021                 /*
5022                  * If the move is already pending and we're desperate now,
5023                  * update the move flags.
5024                  */
5025                 if (flags & KMM_DESPERATE) {
5026                         pending->kmm_flags |= KMM_DESPERATE;
5027                 }
5028                 mutex_exit(&cp->cache_lock);
5029                 KMEM_STAT_ADD(kmem_move_stats.kms_already_pending);
5030                 kmem_cache_free(kmem_move_cache, callback);
5031                 return (B_TRUE);
5032         }
5033 
5034         to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
5035             B_FALSE);
5036         callback->kmm_to_buf = to_buf;
5037         avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
5038 
5039         mutex_exit(&cp->cache_lock);
5040 
5041         if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
5042             callback, TQ_NOSLEEP)) {
5043                 KMEM_STAT_ADD(kmem_move_stats.kms_callback_taskq_fail);
5044                 mutex_enter(&cp->cache_lock);
5045                 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5046                 mutex_exit(&cp->cache_lock);
5047                 kmem_slab_free(cp, to_buf);
5048                 kmem_cache_free(kmem_move_cache, callback);
5049                 return (B_FALSE);
5050         }
5051 
5052         return (B_TRUE);
5053 }
5054 
5055 static void
5056 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
5057 {
5058         avl_index_t index;
5059 
5060         ASSERT(cp->cache_defrag != NULL);
5061         ASSERT(taskq_member(kmem_move_taskq, curthread));
5062         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
5063 
5064         mutex_enter(&cp->cache_lock);
5065         VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
5066             callback->kmm_from_buf, &index) != NULL);
5067         avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5068         if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
5069                 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5070                 kmem_slab_t *sp;
5071 
5072                 /*
5073                  * The last pending move completed. Release all slabs from the
5074                  * front of the dead list except for any slab at the tail that
5075                  * needs to be released from the context of kmem_move_buffers().
5076                  * kmem deferred unmapping the buffers on these slabs in order
5077                  * to guarantee that buffers passed to the move callback have
5078                  * been touched only by kmem or by the client itself.
5079                  */
5080                 while ((sp = list_remove_head(deadlist)) != NULL) {
5081                         if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
5082                                 list_insert_tail(deadlist, sp);
5083                                 break;
5084                         }
5085                         cp->cache_defrag->kmd_deadcount--;
5086                         cp->cache_slab_destroy++;
5087                         mutex_exit(&cp->cache_lock);
5088                         kmem_slab_destroy(cp, sp);
5089                         KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
5090                         mutex_enter(&cp->cache_lock);
5091                 }
5092         }
5093         mutex_exit(&cp->cache_lock);
5094         kmem_cache_free(kmem_move_cache, callback);
5095 }
5096 
5097 /*
5098  * Move buffers from least used slabs first by scanning backwards from the end
5099  * of the partial slab list. Scan at most max_scan candidate slabs and move
5100  * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
5101  * If desperate to reclaim memory, move buffers from any partial slab, otherwise
5102  * skip slabs with a ratio of allocated buffers at or above the current
5103  * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
5104  * scan is aborted) so that the caller can adjust the reclaimability threshold
5105  * depending on how many reclaimable slabs it finds.
5106  *
5107  * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
5108  * move request, since it is not valid for kmem_move_begin() to call
5109  * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
5110  */
5111 static int
5112 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
5113     int flags)
5114 {
5115         kmem_slab_t *sp;
5116         void *buf;
5117         int i, j; /* slab index, buffer index */
5118         int s; /* reclaimable slabs */
5119         int b; /* allocated (movable) buffers on reclaimable slab */
5120         boolean_t success;
5121         int refcnt;
5122         int nomove;
5123 
5124         ASSERT(taskq_member(kmem_taskq, curthread));
5125         ASSERT(MUTEX_HELD(&cp->cache_lock));
5126         ASSERT(kmem_move_cache != NULL);
5127         ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
5128         ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5129             avl_numnodes(&cp->cache_partial_slabs) > 1);
5130 
5131         if (kmem_move_blocked) {
5132                 return (0);
5133         }
5134 
5135         if (kmem_move_fulltilt) {
5136                 flags |= KMM_DESPERATE;
5137         }
5138 
5139         if (max_scan == 0 || (flags & KMM_DESPERATE)) {
5140                 /*
5141                  * Scan as many slabs as needed to find the desired number of
5142                  * candidate slabs.
5143                  */
5144                 max_scan = (size_t)-1;
5145         }
5146 
5147         if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5148                 /* Find as many candidate slabs as possible. */
5149                 max_slabs = (size_t)-1;
5150         }
5151 
5152         sp = avl_last(&cp->cache_partial_slabs);
5153         ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5154         for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5155             ((sp != avl_first(&cp->cache_partial_slabs)) ||
5156             (flags & KMM_DEBUG));
5157             sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5158 
5159                 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5160                         continue;
5161                 }
5162                 s++;
5163 
5164                 /* Look for allocated buffers to move. */
5165                 for (j = 0, b = 0, buf = sp->slab_base;
5166                     (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5167                     buf = (((char *)buf) + cp->cache_chunksize), j++) {
5168 
5169                         if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5170                                 continue;
5171                         }
5172 
5173                         b++;
5174 
5175                         /*
5176                          * Prevent the slab from being destroyed while we drop
5177                          * cache_lock and while the pending move is not yet
5178                          * registered. Flag the pending move while
5179                          * kmd_moves_pending may still be empty, since we can't
5180                          * yet rely on a non-zero pending move count to prevent
5181                          * the slab from being destroyed.
5182                          */
5183                         ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5184                         sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5185                         /*
5186                          * Recheck refcnt and nomove after reacquiring the lock,
5187                          * since these control the order of partial slabs, and
5188                          * we want to know if we can pick up the scan where we
5189                          * left off.
5190                          */
5191                         refcnt = sp->slab_refcnt;
5192                         nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5193                         mutex_exit(&cp->cache_lock);
5194 
5195                         success = kmem_move_begin(cp, sp, buf, flags);
5196 
5197                         /*
5198                          * Now, before the lock is reacquired, kmem could
5199                          * process all pending move requests and purge the
5200                          * deadlist, so that upon reacquiring the lock, sp has
5201                          * been remapped. Or, the client may free all the
5202                          * objects on the slab while the pending moves are still
5203                          * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5204                          * flag causes the slab to be put at the end of the
5205                          * deadlist and prevents it from being destroyed, since
5206                          * we plan to destroy it here after reacquiring the
5207                          * lock.
5208                          */
5209                         mutex_enter(&cp->cache_lock);
5210                         ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5211                         sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5212 
5213                         if (sp->slab_refcnt == 0) {
5214                                 list_t *deadlist =
5215                                     &cp->cache_defrag->kmd_deadlist;
5216                                 list_remove(deadlist, sp);
5217 
5218                                 if (!avl_is_empty(
5219                                     &cp->cache_defrag->kmd_moves_pending)) {
5220                                         /*
5221                                          * A pending move makes it unsafe to
5222                                          * destroy the slab, because even though
5223                                          * the move is no longer needed, the
5224                                          * context where that is determined
5225                                          * requires the slab to exist.
5226                                          * Fortunately, a pending move also
5227                                          * means we don't need to destroy the
5228                                          * slab here, since it will get
5229                                          * destroyed along with any other slabs
5230                                          * on the deadlist after the last
5231                                          * pending move completes.
5232                                          */
5233                                         list_insert_head(deadlist, sp);
5234                                         KMEM_STAT_ADD(kmem_move_stats.
5235                                             kms_endscan_slab_dead);
5236                                         return (-1);
5237                                 }
5238 
5239                                 /*
5240                                  * Destroy the slab now if it was completely
5241                                  * freed while we dropped cache_lock and there
5242                                  * are no pending moves. Since slab_refcnt
5243                                  * cannot change once it reaches zero, no new
5244                                  * pending moves from that slab are possible.
5245                                  */
5246                                 cp->cache_defrag->kmd_deadcount--;
5247                                 cp->cache_slab_destroy++;
5248                                 mutex_exit(&cp->cache_lock);
5249                                 kmem_slab_destroy(cp, sp);
5250                                 KMEM_STAT_ADD(kmem_move_stats.
5251                                     kms_dead_slabs_freed);
5252                                 KMEM_STAT_ADD(kmem_move_stats.
5253                                     kms_endscan_slab_destroyed);
5254                                 mutex_enter(&cp->cache_lock);
5255                                 /*
5256                                  * Since we can't pick up the scan where we left
5257                                  * off, abort the scan and say nothing about the
5258                                  * number of reclaimable slabs.
5259                                  */
5260                                 return (-1);
5261                         }
5262 
5263                         if (!success) {
5264                                 /*
5265                                  * Abort the scan if there is not enough memory
5266                                  * for the request and say nothing about the
5267                                  * number of reclaimable slabs.
5268                                  */
5269                                 KMEM_STAT_COND_ADD(s < max_slabs,
5270                                     kmem_move_stats.kms_endscan_nomem);
5271                                 return (-1);
5272                         }
5273 
5274                         /*
5275                          * The slab's position changed while the lock was
5276                          * dropped, so we don't know where we are in the
5277                          * sequence any more.
5278                          */
5279                         if (sp->slab_refcnt != refcnt) {
5280                                 /*
5281                                  * If this is a KMM_DEBUG move, the slab_refcnt
5282                                  * may have changed because we allocated a
5283                                  * destination buffer on the same slab. In that
5284                                  * case, we're not interested in counting it.
5285                                  */
5286                                 KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) &&
5287                                     (s < max_slabs),
5288                                     kmem_move_stats.kms_endscan_refcnt_changed);
5289                                 return (-1);
5290                         }
5291                         if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove) {
5292                                 KMEM_STAT_COND_ADD(s < max_slabs,
5293                                     kmem_move_stats.kms_endscan_nomove_changed);
5294                                 return (-1);
5295                         }
5296 
5297                         /*
5298                          * Generating a move request allocates a destination
5299                          * buffer from the slab layer, bumping the first partial
5300                          * slab if it is completely allocated. If the current
5301                          * slab becomes the first partial slab as a result, we
5302                          * can't continue to scan backwards.
5303                          *
5304                          * If this is a KMM_DEBUG move and we allocated the
5305                          * destination buffer from the last partial slab, then
5306                          * the buffer we're moving is on the same slab and our
5307                          * slab_refcnt has changed, causing us to return before
5308                          * reaching here if there are no partial slabs left.
5309                          */
5310                         ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5311                         if (sp == avl_first(&cp->cache_partial_slabs)) {
5312                                 /*
5313                                  * We're not interested in a second KMM_DEBUG
5314                                  * move.
5315                                  */
5316                                 goto end_scan;
5317                         }
5318                 }
5319         }
5320 end_scan:
5321 
5322         KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) &&
5323             (s < max_slabs) &&
5324             (sp == avl_first(&cp->cache_partial_slabs)),
5325             kmem_move_stats.kms_endscan_freelist);
5326 
5327         return (s);
5328 }
5329 
5330 typedef struct kmem_move_notify_args {
5331         kmem_cache_t *kmna_cache;
5332         void *kmna_buf;
5333 } kmem_move_notify_args_t;
5334 
5335 static void
5336 kmem_cache_move_notify_task(void *arg)
5337 {
5338         kmem_move_notify_args_t *args = arg;
5339         kmem_cache_t *cp = args->kmna_cache;
5340         void *buf = args->kmna_buf;
5341         kmem_slab_t *sp;
5342 
5343         ASSERT(taskq_member(kmem_taskq, curthread));
5344         ASSERT(list_link_active(&cp->cache_link));
5345 
5346         kmem_free(args, sizeof (kmem_move_notify_args_t));
5347         mutex_enter(&cp->cache_lock);
5348         sp = kmem_slab_allocated(cp, NULL, buf);
5349 
5350         /* Ignore the notification if the buffer is no longer allocated. */
5351         if (sp == NULL) {
5352                 mutex_exit(&cp->cache_lock);
5353                 return;
5354         }
5355 
5356         /* Ignore the notification if there's no reason to move the buffer. */
5357         if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5358                 /*
5359                  * So far the notification is not ignored. Ignore the
5360                  * notification if the slab is not marked by an earlier refusal
5361                  * to move a buffer.
5362                  */
5363                 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5364                     (sp->slab_later_count == 0)) {
5365                         mutex_exit(&cp->cache_lock);
5366                         return;
5367                 }
5368 
5369                 kmem_slab_move_yes(cp, sp, buf);
5370                 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5371                 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5372                 mutex_exit(&cp->cache_lock);
5373                 /* see kmem_move_buffers() about dropping the lock */
5374                 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5375                 mutex_enter(&cp->cache_lock);
5376                 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5377                 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5378                 if (sp->slab_refcnt == 0) {
5379                         list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5380                         list_remove(deadlist, sp);
5381 
5382                         if (!avl_is_empty(
5383                             &cp->cache_defrag->kmd_moves_pending)) {
5384                                 list_insert_head(deadlist, sp);
5385                                 mutex_exit(&cp->cache_lock);
5386                                 KMEM_STAT_ADD(kmem_move_stats.
5387                                     kms_notify_slab_dead);
5388                                 return;
5389                         }
5390 
5391                         cp->cache_defrag->kmd_deadcount--;
5392                         cp->cache_slab_destroy++;
5393                         mutex_exit(&cp->cache_lock);
5394                         kmem_slab_destroy(cp, sp);
5395                         KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
5396                         KMEM_STAT_ADD(kmem_move_stats.
5397                             kms_notify_slab_destroyed);
5398                         return;
5399                 }
5400         } else {
5401                 kmem_slab_move_yes(cp, sp, buf);
5402         }
5403         mutex_exit(&cp->cache_lock);
5404 }
5405 
5406 void
5407 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5408 {
5409         kmem_move_notify_args_t *args;
5410 
5411         KMEM_STAT_ADD(kmem_move_stats.kms_notify);
5412         args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5413         if (args != NULL) {
5414                 args->kmna_cache = cp;
5415                 args->kmna_buf = buf;
5416                 if (!taskq_dispatch(kmem_taskq,
5417                     (task_func_t *)kmem_cache_move_notify_task, args,
5418                     TQ_NOSLEEP))
5419                         kmem_free(args, sizeof (kmem_move_notify_args_t));
5420         }
5421 }
5422 
5423 static void
5424 kmem_cache_defrag(kmem_cache_t *cp)
5425 {
5426         size_t n;
5427 
5428         ASSERT(cp->cache_defrag != NULL);
5429 
5430         mutex_enter(&cp->cache_lock);
5431         n = avl_numnodes(&cp->cache_partial_slabs);
5432         if (n > 1) {
5433                 /* kmem_move_buffers() drops and reacquires cache_lock */
5434                 KMEM_STAT_ADD(kmem_move_stats.kms_defrags);
5435                 cp->cache_defrag->kmd_defrags++;
5436                 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5437         }
5438         mutex_exit(&cp->cache_lock);
5439 }
5440 
5441 /* Is this cache above the fragmentation threshold? */
5442 static boolean_t
5443 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5444 {
5445         /*
5446          *      nfree           kmem_frag_numer
5447          * ------------------ > ---------------
5448          * cp->cache_buftotal        kmem_frag_denom
5449          */
5450         return ((nfree * kmem_frag_denom) >
5451             (cp->cache_buftotal * kmem_frag_numer));
5452 }
5453 
5454 static boolean_t
5455 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5456 {
5457         boolean_t fragmented;
5458         uint64_t nfree;
5459 
5460         ASSERT(MUTEX_HELD(&cp->cache_lock));
5461         *doreap = B_FALSE;
5462 
5463         if (kmem_move_fulltilt) {
5464                 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5465                         return (B_TRUE);
5466                 }
5467         } else {
5468                 if ((cp->cache_complete_slab_count + avl_numnodes(
5469                     &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5470                         return (B_FALSE);
5471                 }
5472         }
5473 
5474         nfree = cp->cache_bufslab;
5475         fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5476             kmem_cache_frag_threshold(cp, nfree));
5477 
5478         /*
5479          * Free buffers in the magazine layer appear allocated from the point of
5480          * view of the slab layer. We want to know if the slab layer would
5481          * appear fragmented if we included free buffers from magazines that
5482          * have fallen out of the working set.
5483          */
5484         if (!fragmented) {
5485                 long reap;
5486 
5487                 mutex_enter(&cp->cache_depot_lock);
5488                 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5489                 reap = MIN(reap, cp->cache_full.ml_total);
5490                 mutex_exit(&cp->cache_depot_lock);
5491 
5492                 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5493                 if (kmem_cache_frag_threshold(cp, nfree)) {
5494                         *doreap = B_TRUE;
5495                 }
5496         }
5497 
5498         return (fragmented);
5499 }
5500 
5501 /* Called periodically from kmem_taskq */
5502 static void
5503 kmem_cache_scan(kmem_cache_t *cp)
5504 {
5505         boolean_t reap = B_FALSE;
5506         kmem_defrag_t *kmd;
5507 
5508         ASSERT(taskq_member(kmem_taskq, curthread));
5509 
5510         mutex_enter(&cp->cache_lock);
5511 
5512         kmd = cp->cache_defrag;
5513         if (kmd->kmd_consolidate > 0) {
5514                 kmd->kmd_consolidate--;
5515                 mutex_exit(&cp->cache_lock);
5516                 kmem_cache_reap(cp);
5517                 return;
5518         }
5519 
5520         if (kmem_cache_is_fragmented(cp, &reap)) {
5521                 size_t slabs_found;
5522 
5523                 /*
5524                  * Consolidate reclaimable slabs from the end of the partial
5525                  * slab list (scan at most kmem_reclaim_scan_range slabs to find
5526                  * reclaimable slabs). Keep track of how many candidate slabs we
5527                  * looked for and how many we actually found so we can adjust
5528                  * the definition of a candidate slab if we're having trouble
5529                  * finding them.
5530                  *
5531                  * kmem_move_buffers() drops and reacquires cache_lock.
5532                  */
5533                 KMEM_STAT_ADD(kmem_move_stats.kms_scans);
5534                 kmd->kmd_scans++;
5535                 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5536                     kmem_reclaim_max_slabs, 0);
5537                 if (slabs_found >= 0) {
5538                         kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5539                         kmd->kmd_slabs_found += slabs_found;
5540                 }
5541 
5542                 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5543                         kmd->kmd_tries = 0;
5544 
5545                         /*
5546                          * If we had difficulty finding candidate slabs in
5547                          * previous scans, adjust the threshold so that
5548                          * candidates are easier to find.
5549                          */
5550                         if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5551                                 kmem_adjust_reclaim_threshold(kmd, -1);
5552                         } else if ((kmd->kmd_slabs_found * 2) <
5553                             kmd->kmd_slabs_sought) {
5554                                 kmem_adjust_reclaim_threshold(kmd, 1);
5555                         }
5556                         kmd->kmd_slabs_sought = 0;
5557                         kmd->kmd_slabs_found = 0;
5558                 }
5559         } else {
5560                 kmem_reset_reclaim_threshold(cp->cache_defrag);
5561 #ifdef  DEBUG
5562                 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5563                         /*
5564                          * In a debug kernel we want the consolidator to
5565                          * run occasionally even when there is plenty of
5566                          * memory.
5567                          */
5568                         uint16_t debug_rand;
5569 
5570                         (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5571                         if (!kmem_move_noreap &&
5572                             ((debug_rand % kmem_mtb_reap) == 0)) {
5573                                 mutex_exit(&cp->cache_lock);
5574                                 KMEM_STAT_ADD(kmem_move_stats.kms_debug_reaps);
5575                                 kmem_cache_reap(cp);
5576                                 return;
5577                         } else if ((debug_rand % kmem_mtb_move) == 0) {
5578                                 KMEM_STAT_ADD(kmem_move_stats.kms_scans);
5579                                 KMEM_STAT_ADD(kmem_move_stats.kms_debug_scans);
5580                                 kmd->kmd_scans++;
5581                                 (void) kmem_move_buffers(cp,
5582                                     kmem_reclaim_scan_range, 1, KMM_DEBUG);
5583                         }
5584                 }
5585 #endif  /* DEBUG */
5586         }
5587 
5588         mutex_exit(&cp->cache_lock);
5589 
5590         if (reap) {
5591                 KMEM_STAT_ADD(kmem_move_stats.kms_scan_depot_ws_reaps);
5592                 kmem_depot_ws_reap(cp);
5593         }
5594 }