Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/kmem.c
+++ new/usr/src/uts/common/os/kmem.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1994, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * Kernel memory allocator, as described in the following two papers and a
27 27 * statement about the consolidator:
28 28 *
29 29 * Jeff Bonwick,
30 30 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
31 31 * Proceedings of the Summer 1994 Usenix Conference.
32 32 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
33 33 *
34 34 * Jeff Bonwick and Jonathan Adams,
35 35 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
36 36 * Arbitrary Resources.
37 37 * Proceedings of the 2001 Usenix Conference.
38 38 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
39 39 *
40 40 * kmem Slab Consolidator Big Theory Statement:
41 41 *
42 42 * 1. Motivation
43 43 *
44 44 * As stated in Bonwick94, slabs provide the following advantages over other
45 45 * allocation structures in terms of memory fragmentation:
46 46 *
47 47 * - Internal fragmentation (per-buffer wasted space) is minimal.
48 48 * - Severe external fragmentation (unused buffers on the free list) is
49 49 * unlikely.
50 50 *
51 51 * Segregating objects by size eliminates one source of external fragmentation,
52 52 * and according to Bonwick:
53 53 *
54 54 * The other reason that slabs reduce external fragmentation is that all
55 55 * objects in a slab are of the same type, so they have the same lifetime
56 56 * distribution. The resulting segregation of short-lived and long-lived
57 57 * objects at slab granularity reduces the likelihood of an entire page being
58 58 * held hostage due to a single long-lived allocation [Barrett93, Hanson90].
59 59 *
60 60 * While unlikely, severe external fragmentation remains possible. Clients that
61 61 * allocate both short- and long-lived objects from the same cache cannot
62 62 * anticipate the distribution of long-lived objects within the allocator's slab
63 63 * implementation. Even a small percentage of long-lived objects distributed
64 64 * randomly across many slabs can lead to a worst case scenario where the client
65 65 * frees the majority of its objects and the system gets back almost none of the
66 66 * slabs. Despite the client doing what it reasonably can to help the system
67 67 * reclaim memory, the allocator cannot shake free enough slabs because of
68 68 * lonely allocations stubbornly hanging on. Although the allocator is in a
69 69 * position to diagnose the fragmentation, there is nothing that the allocator
70 70 * by itself can do about it. It only takes a single allocated object to prevent
71 71 * an entire slab from being reclaimed, and any object handed out by
72 72 * kmem_cache_alloc() is by definition in the client's control. Conversely,
73 73 * although the client is in a position to move a long-lived object, it has no
74 74 * way of knowing if the object is causing fragmentation, and if so, where to
75 75 * move it. A solution necessarily requires further cooperation between the
76 76 * allocator and the client.
77 77 *
78 78 * 2. Move Callback
79 79 *
80 80 * The kmem slab consolidator therefore adds a move callback to the
81 81 * allocator/client interface, improving worst-case external fragmentation in
82 82 * kmem caches that supply a function to move objects from one memory location
83 83 * to another. In a situation of low memory kmem attempts to consolidate all of
84 84 * a cache's slabs at once; otherwise it works slowly to bring external
85 85 * fragmentation within the 1/8 limit guaranteed for internal fragmentation,
86 86 * thereby helping to avoid a low memory situation in the future.
87 87 *
88 88 * The callback has the following signature:
89 89 *
90 90 * kmem_cbrc_t move(void *old, void *new, size_t size, void *user_arg)
91 91 *
92 92 * It supplies the kmem client with two addresses: the allocated object that
93 93 * kmem wants to move and a buffer selected by kmem for the client to use as the
94 94 * copy destination. The callback is kmem's way of saying "Please get off of
95 95 * this buffer and use this one instead." kmem knows where it wants to move the
96 96 * object in order to best reduce fragmentation. All the client needs to know
97 97 * about the second argument (void *new) is that it is an allocated, constructed
98 98 * object ready to take the contents of the old object. When the move function
99 99 * is called, the system is likely to be low on memory, and the new object
100 100 * spares the client from having to worry about allocating memory for the
101 101 * requested move. The third argument supplies the size of the object, in case a
102 102 * single move function handles multiple caches whose objects differ only in
103 103 * size (such as zio_buf_512, zio_buf_1024, etc). Finally, the same optional
104 104 * user argument passed to the constructor, destructor, and reclaim functions is
105 105 * also passed to the move callback.
106 106 *
107 107 * 2.1 Setting the Move Callback
108 108 *
109 109 * The client sets the move callback after creating the cache and before
110 110 * allocating from it:
111 111 *
112 112 * object_cache = kmem_cache_create(...);
113 113 * kmem_cache_set_move(object_cache, object_move);
114 114 *
115 115 * 2.2 Move Callback Return Values
116 116 *
117 117 * Only the client knows about its own data and when is a good time to move it.
118 118 * The client is cooperating with kmem to return unused memory to the system,
119 119 * and kmem respectfully accepts this help at the client's convenience. When
120 120 * asked to move an object, the client can respond with any of the following:
121 121 *
122 122 * typedef enum kmem_cbrc {
123 123 * KMEM_CBRC_YES,
124 124 * KMEM_CBRC_NO,
125 125 * KMEM_CBRC_LATER,
126 126 * KMEM_CBRC_DONT_NEED,
127 127 * KMEM_CBRC_DONT_KNOW
128 128 * } kmem_cbrc_t;
129 129 *
130 130 * The client must not explicitly kmem_cache_free() either of the objects passed
131 131 * to the callback, since kmem wants to free them directly to the slab layer
132 132 * (bypassing the per-CPU magazine layer). The response tells kmem which of the
133 133 * objects to free:
134 134 *
135 135 * YES: (Did it) The client moved the object, so kmem frees the old one.
136 136 * NO: (Never) The client refused, so kmem frees the new object (the
137 137 * unused copy destination). kmem also marks the slab of the old
138 138 * object so as not to bother the client with further callbacks for
139 139 * that object as long as the slab remains on the partial slab list.
140 140 * (The system won't be getting the slab back as long as the
141 141 * immovable object holds it hostage, so there's no point in moving
142 142 * any of its objects.)
143 143 * LATER: The client is using the object and cannot move it now, so kmem
144 144 * frees the new object (the unused copy destination). kmem still
145 145 * attempts to move other objects off the slab, since it expects to
146 146 * succeed in clearing the slab in a later callback. The client
147 147 * should use LATER instead of NO if the object is likely to become
148 148 * movable very soon.
149 149 * DONT_NEED: The client no longer needs the object, so kmem frees the old along
150 150 * with the new object (the unused copy destination). This response
151 151 * is the client's opportunity to be a model citizen and give back as
152 152 * much as it can.
153 153 * DONT_KNOW: The client does not know about the object because
154 154 * a) the client has just allocated the object and not yet put it
155 155 * wherever it expects to find known objects
156 156 * b) the client has removed the object from wherever it expects to
157 157 * find known objects and is about to free it, or
158 158 * c) the client has freed the object.
159 159 * In all these cases (a, b, and c) kmem frees the new object (the
160 160 * unused copy destination) and searches for the old object in the
161 161 * magazine layer. If found, the object is removed from the magazine
162 162 * layer and freed to the slab layer so it will no longer hold the
163 163 * slab hostage.
164 164 *
165 165 * 2.3 Object States
166 166 *
167 167 * Neither kmem nor the client can be assumed to know the object's whereabouts
168 168 * at the time of the callback. An object belonging to a kmem cache may be in
169 169 * any of the following states:
170 170 *
171 171 * 1. Uninitialized on the slab
172 172 * 2. Allocated from the slab but not constructed (still uninitialized)
173 173 * 3. Allocated from the slab, constructed, but not yet ready for business
174 174 * (not in a valid state for the move callback)
175 175 * 4. In use (valid and known to the client)
176 176 * 5. About to be freed (no longer in a valid state for the move callback)
177 177 * 6. Freed to a magazine (still constructed)
178 178 * 7. Allocated from a magazine, not yet ready for business (not in a valid
179 179 * state for the move callback), and about to return to state #4
180 180 * 8. Deconstructed on a magazine that is about to be freed
181 181 * 9. Freed to the slab
182 182 *
183 183 * Since the move callback may be called at any time while the object is in any
184 184 * of the above states (except state #1), the client needs a safe way to
185 185 * determine whether or not it knows about the object. Specifically, the client
186 186 * needs to know whether or not the object is in state #4, the only state in
187 187 * which a move is valid. If the object is in any other state, the client should
188 188 * immediately return KMEM_CBRC_DONT_KNOW, since it is unsafe to access any of
189 189 * the object's fields.
190 190 *
191 191 * Note that although an object may be in state #4 when kmem initiates the move
192 192 * request, the object may no longer be in that state by the time kmem actually
193 193 * calls the move function. Not only does the client free objects
194 194 * asynchronously, kmem itself puts move requests on a queue where thay are
195 195 * pending until kmem processes them from another context. Also, objects freed
196 196 * to a magazine appear allocated from the point of view of the slab layer, so
197 197 * kmem may even initiate requests for objects in a state other than state #4.
198 198 *
199 199 * 2.3.1 Magazine Layer
200 200 *
201 201 * An important insight revealed by the states listed above is that the magazine
202 202 * layer is populated only by kmem_cache_free(). Magazines of constructed
203 203 * objects are never populated directly from the slab layer (which contains raw,
204 204 * unconstructed objects). Whenever an allocation request cannot be satisfied
205 205 * from the magazine layer, the magazines are bypassed and the request is
206 206 * satisfied from the slab layer (creating a new slab if necessary). kmem calls
207 207 * the object constructor only when allocating from the slab layer, and only in
208 208 * response to kmem_cache_alloc() or to prepare the destination buffer passed in
209 209 * the move callback. kmem does not preconstruct objects in anticipation of
210 210 * kmem_cache_alloc().
211 211 *
212 212 * 2.3.2 Object Constructor and Destructor
213 213 *
214 214 * If the client supplies a destructor, it must be valid to call the destructor
215 215 * on a newly created object (immediately after the constructor).
216 216 *
217 217 * 2.4 Recognizing Known Objects
218 218 *
219 219 * There is a simple test to determine safely whether or not the client knows
220 220 * about a given object in the move callback. It relies on the fact that kmem
221 221 * guarantees that the object of the move callback has only been touched by the
222 222 * client itself or else by kmem. kmem does this by ensuring that none of the
223 223 * cache's slabs are freed to the virtual memory (VM) subsystem while a move
224 224 * callback is pending. When the last object on a slab is freed, if there is a
225 225 * pending move, kmem puts the slab on a per-cache dead list and defers freeing
226 226 * slabs on that list until all pending callbacks are completed. That way,
227 227 * clients can be certain that the object of a move callback is in one of the
228 228 * states listed above, making it possible to distinguish known objects (in
229 229 * state #4) using the two low order bits of any pointer member (with the
230 230 * exception of 'char *' or 'short *' which may not be 4-byte aligned on some
231 231 * platforms).
232 232 *
233 233 * The test works as long as the client always transitions objects from state #4
234 234 * (known, in use) to state #5 (about to be freed, invalid) by setting the low
235 235 * order bit of the client-designated pointer member. Since kmem only writes
236 236 * invalid memory patterns, such as 0xbaddcafe to uninitialized memory and
237 237 * 0xdeadbeef to freed memory, any scribbling on the object done by kmem is
238 238 * guaranteed to set at least one of the two low order bits. Therefore, given an
239 239 * object with a back pointer to a 'container_t *o_container', the client can
240 240 * test
241 241 *
242 242 * container_t *container = object->o_container;
243 243 * if ((uintptr_t)container & 0x3) {
244 244 * return (KMEM_CBRC_DONT_KNOW);
245 245 * }
246 246 *
247 247 * Typically, an object will have a pointer to some structure with a list or
248 248 * hash where objects from the cache are kept while in use. Assuming that the
249 249 * client has some way of knowing that the container structure is valid and will
250 250 * not go away during the move, and assuming that the structure includes a lock
251 251 * to protect whatever collection is used, then the client would continue as
252 252 * follows:
253 253 *
254 254 * // Ensure that the container structure does not go away.
255 255 * if (container_hold(container) == 0) {
256 256 * return (KMEM_CBRC_DONT_KNOW);
257 257 * }
258 258 * mutex_enter(&container->c_objects_lock);
259 259 * if (container != object->o_container) {
260 260 * mutex_exit(&container->c_objects_lock);
261 261 * container_rele(container);
262 262 * return (KMEM_CBRC_DONT_KNOW);
263 263 * }
264 264 *
265 265 * At this point the client knows that the object cannot be freed as long as
266 266 * c_objects_lock is held. Note that after acquiring the lock, the client must
267 267 * recheck the o_container pointer in case the object was removed just before
268 268 * acquiring the lock.
269 269 *
270 270 * When the client is about to free an object, it must first remove that object
271 271 * from the list, hash, or other structure where it is kept. At that time, to
272 272 * mark the object so it can be distinguished from the remaining, known objects,
273 273 * the client sets the designated low order bit:
274 274 *
275 275 * mutex_enter(&container->c_objects_lock);
276 276 * object->o_container = (void *)((uintptr_t)object->o_container | 0x1);
277 277 * list_remove(&container->c_objects, object);
278 278 * mutex_exit(&container->c_objects_lock);
279 279 *
280 280 * In the common case, the object is freed to the magazine layer, where it may
281 281 * be reused on a subsequent allocation without the overhead of calling the
282 282 * constructor. While in the magazine it appears allocated from the point of
283 283 * view of the slab layer, making it a candidate for the move callback. Most
284 284 * objects unrecognized by the client in the move callback fall into this
285 285 * category and are cheaply distinguished from known objects by the test
286 286 * described earlier. Since recognition is cheap for the client, and searching
287 287 * magazines is expensive for kmem, kmem defers searching until the client first
288 288 * returns KMEM_CBRC_DONT_KNOW. As long as the needed effort is reasonable, kmem
289 289 * elsewhere does what it can to avoid bothering the client unnecessarily.
290 290 *
291 291 * Invalidating the designated pointer member before freeing the object marks
292 292 * the object to be avoided in the callback, and conversely, assigning a valid
293 293 * value to the designated pointer member after allocating the object makes the
294 294 * object fair game for the callback:
295 295 *
296 296 * ... allocate object ...
297 297 * ... set any initial state not set by the constructor ...
298 298 *
299 299 * mutex_enter(&container->c_objects_lock);
300 300 * list_insert_tail(&container->c_objects, object);
301 301 * membar_producer();
302 302 * object->o_container = container;
303 303 * mutex_exit(&container->c_objects_lock);
304 304 *
305 305 * Note that everything else must be valid before setting o_container makes the
306 306 * object fair game for the move callback. The membar_producer() call ensures
307 307 * that all the object's state is written to memory before setting the pointer
308 308 * that transitions the object from state #3 or #7 (allocated, constructed, not
309 309 * yet in use) to state #4 (in use, valid). That's important because the move
310 310 * function has to check the validity of the pointer before it can safely
311 311 * acquire the lock protecting the collection where it expects to find known
312 312 * objects.
313 313 *
314 314 * This method of distinguishing known objects observes the usual symmetry:
315 315 * invalidating the designated pointer is the first thing the client does before
316 316 * freeing the object, and setting the designated pointer is the last thing the
317 317 * client does after allocating the object. Of course, the client is not
318 318 * required to use this method. Fundamentally, how the client recognizes known
319 319 * objects is completely up to the client, but this method is recommended as an
320 320 * efficient and safe way to take advantage of the guarantees made by kmem. If
321 321 * the entire object is arbitrary data without any markable bits from a suitable
322 322 * pointer member, then the client must find some other method, such as
323 323 * searching a hash table of known objects.
324 324 *
325 325 * 2.5 Preventing Objects From Moving
326 326 *
327 327 * Besides a way to distinguish known objects, the other thing that the client
328 328 * needs is a strategy to ensure that an object will not move while the client
329 329 * is actively using it. The details of satisfying this requirement tend to be
330 330 * highly cache-specific. It might seem that the same rules that let a client
331 331 * remove an object safely should also decide when an object can be moved
332 332 * safely. However, any object state that makes a removal attempt invalid is
333 333 * likely to be long-lasting for objects that the client does not expect to
334 334 * remove. kmem knows nothing about the object state and is equally likely (from
335 335 * the client's point of view) to request a move for any object in the cache,
336 336 * whether prepared for removal or not. Even a low percentage of objects stuck
337 337 * in place by unremovability will defeat the consolidator if the stuck objects
338 338 * are the same long-lived allocations likely to hold slabs hostage.
339 339 * Fundamentally, the consolidator is not aimed at common cases. Severe external
340 340 * fragmentation is a worst case scenario manifested as sparsely allocated
341 341 * slabs, by definition a low percentage of the cache's objects. When deciding
342 342 * what makes an object movable, keep in mind the goal of the consolidator: to
343 343 * bring worst-case external fragmentation within the limits guaranteed for
344 344 * internal fragmentation. Removability is a poor criterion if it is likely to
345 345 * exclude more than an insignificant percentage of objects for long periods of
346 346 * time.
347 347 *
348 348 * A tricky general solution exists, and it has the advantage of letting you
349 349 * move any object at almost any moment, practically eliminating the likelihood
350 350 * that an object can hold a slab hostage. However, if there is a cache-specific
351 351 * way to ensure that an object is not actively in use in the vast majority of
352 352 * cases, a simpler solution that leverages this cache-specific knowledge is
353 353 * preferred.
354 354 *
355 355 * 2.5.1 Cache-Specific Solution
356 356 *
357 357 * As an example of a cache-specific solution, the ZFS znode cache takes
358 358 * advantage of the fact that the vast majority of znodes are only being
359 359 * referenced from the DNLC. (A typical case might be a few hundred in active
360 360 * use and a hundred thousand in the DNLC.) In the move callback, after the ZFS
361 361 * client has established that it recognizes the znode and can access its fields
362 362 * safely (using the method described earlier), it then tests whether the znode
363 363 * is referenced by anything other than the DNLC. If so, it assumes that the
364 364 * znode may be in active use and is unsafe to move, so it drops its locks and
365 365 * returns KMEM_CBRC_LATER. The advantage of this strategy is that everywhere
366 366 * else znodes are used, no change is needed to protect against the possibility
367 367 * of the znode moving. The disadvantage is that it remains possible for an
368 368 * application to hold a znode slab hostage with an open file descriptor.
369 369 * However, this case ought to be rare and the consolidator has a way to deal
370 370 * with it: If the client responds KMEM_CBRC_LATER repeatedly for the same
371 371 * object, kmem eventually stops believing it and treats the slab as if the
372 372 * client had responded KMEM_CBRC_NO. Having marked the hostage slab, kmem can
373 373 * then focus on getting it off of the partial slab list by allocating rather
374 374 * than freeing all of its objects. (Either way of getting a slab off the
375 375 * free list reduces fragmentation.)
376 376 *
377 377 * 2.5.2 General Solution
378 378 *
379 379 * The general solution, on the other hand, requires an explicit hold everywhere
380 380 * the object is used to prevent it from moving. To keep the client locking
381 381 * strategy as uncomplicated as possible, kmem guarantees the simplifying
382 382 * assumption that move callbacks are sequential, even across multiple caches.
383 383 * Internally, a global queue processed by a single thread supports all caches
384 384 * implementing the callback function. No matter how many caches supply a move
385 385 * function, the consolidator never moves more than one object at a time, so the
386 386 * client does not have to worry about tricky lock ordering involving several
387 387 * related objects from different kmem caches.
388 388 *
389 389 * The general solution implements the explicit hold as a read-write lock, which
390 390 * allows multiple readers to access an object from the cache simultaneously
391 391 * while a single writer is excluded from moving it. A single rwlock for the
392 392 * entire cache would lock out all threads from using any of the cache's objects
393 393 * even though only a single object is being moved, so to reduce contention,
394 394 * the client can fan out the single rwlock into an array of rwlocks hashed by
395 395 * the object address, making it probable that moving one object will not
396 396 * prevent other threads from using a different object. The rwlock cannot be a
397 397 * member of the object itself, because the possibility of the object moving
398 398 * makes it unsafe to access any of the object's fields until the lock is
399 399 * acquired.
400 400 *
401 401 * Assuming a small, fixed number of locks, it's possible that multiple objects
402 402 * will hash to the same lock. A thread that needs to use multiple objects in
403 403 * the same function may acquire the same lock multiple times. Since rwlocks are
404 404 * reentrant for readers, and since there is never more than a single writer at
405 405 * a time (assuming that the client acquires the lock as a writer only when
406 406 * moving an object inside the callback), there would seem to be no problem.
407 407 * However, a client locking multiple objects in the same function must handle
408 408 * one case of potential deadlock: Assume that thread A needs to prevent both
409 409 * object 1 and object 2 from moving, and thread B, the callback, meanwhile
410 410 * tries to move object 3. It's possible, if objects 1, 2, and 3 all hash to the
411 411 * same lock, that thread A will acquire the lock for object 1 as a reader
412 412 * before thread B sets the lock's write-wanted bit, preventing thread A from
413 413 * reacquiring the lock for object 2 as a reader. Unable to make forward
414 414 * progress, thread A will never release the lock for object 1, resulting in
415 415 * deadlock.
416 416 *
417 417 * There are two ways of avoiding the deadlock just described. The first is to
418 418 * use rw_tryenter() rather than rw_enter() in the callback function when
419 419 * attempting to acquire the lock as a writer. If tryenter discovers that the
420 420 * same object (or another object hashed to the same lock) is already in use, it
421 421 * aborts the callback and returns KMEM_CBRC_LATER. The second way is to use
422 422 * rprwlock_t (declared in common/fs/zfs/sys/rprwlock.h) instead of rwlock_t,
423 423 * since it allows a thread to acquire the lock as a reader in spite of a
424 424 * waiting writer. This second approach insists on moving the object now, no
425 425 * matter how many readers the move function must wait for in order to do so,
426 426 * and could delay the completion of the callback indefinitely (blocking
427 427 * callbacks to other clients). In practice, a less insistent callback using
428 428 * rw_tryenter() returns KMEM_CBRC_LATER infrequently enough that there seems
429 429 * little reason to use anything else.
430 430 *
431 431 * Avoiding deadlock is not the only problem that an implementation using an
432 432 * explicit hold needs to solve. Locking the object in the first place (to
433 433 * prevent it from moving) remains a problem, since the object could move
434 434 * between the time you obtain a pointer to the object and the time you acquire
435 435 * the rwlock hashed to that pointer value. Therefore the client needs to
436 436 * recheck the value of the pointer after acquiring the lock, drop the lock if
437 437 * the value has changed, and try again. This requires a level of indirection:
438 438 * something that points to the object rather than the object itself, that the
439 439 * client can access safely while attempting to acquire the lock. (The object
440 440 * itself cannot be referenced safely because it can move at any time.)
441 441 * The following lock-acquisition function takes whatever is safe to reference
442 442 * (arg), follows its pointer to the object (using function f), and tries as
443 443 * often as necessary to acquire the hashed lock and verify that the object
444 444 * still has not moved:
445 445 *
446 446 * object_t *
447 447 * object_hold(object_f f, void *arg)
448 448 * {
449 449 * object_t *op;
450 450 *
451 451 * op = f(arg);
452 452 * if (op == NULL) {
453 453 * return (NULL);
454 454 * }
455 455 *
456 456 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
457 457 * while (op != f(arg)) {
458 458 * rw_exit(OBJECT_RWLOCK(op));
459 459 * op = f(arg);
460 460 * if (op == NULL) {
461 461 * break;
462 462 * }
463 463 * rw_enter(OBJECT_RWLOCK(op), RW_READER);
464 464 * }
465 465 *
466 466 * return (op);
467 467 * }
468 468 *
469 469 * The OBJECT_RWLOCK macro hashes the object address to obtain the rwlock. The
470 470 * lock reacquisition loop, while necessary, almost never executes. The function
471 471 * pointer f (used to obtain the object pointer from arg) has the following type
472 472 * definition:
473 473 *
474 474 * typedef object_t *(*object_f)(void *arg);
475 475 *
476 476 * An object_f implementation is likely to be as simple as accessing a structure
477 477 * member:
478 478 *
479 479 * object_t *
480 480 * s_object(void *arg)
481 481 * {
482 482 * something_t *sp = arg;
483 483 * return (sp->s_object);
484 484 * }
485 485 *
486 486 * The flexibility of a function pointer allows the path to the object to be
487 487 * arbitrarily complex and also supports the notion that depending on where you
488 488 * are using the object, you may need to get it from someplace different.
489 489 *
490 490 * The function that releases the explicit hold is simpler because it does not
491 491 * have to worry about the object moving:
492 492 *
493 493 * void
494 494 * object_rele(object_t *op)
495 495 * {
496 496 * rw_exit(OBJECT_RWLOCK(op));
497 497 * }
498 498 *
499 499 * The caller is spared these details so that obtaining and releasing an
500 500 * explicit hold feels like a simple mutex_enter()/mutex_exit() pair. The caller
501 501 * of object_hold() only needs to know that the returned object pointer is valid
502 502 * if not NULL and that the object will not move until released.
503 503 *
504 504 * Although object_hold() prevents an object from moving, it does not prevent it
505 505 * from being freed. The caller must take measures before calling object_hold()
506 506 * (afterwards is too late) to ensure that the held object cannot be freed. The
507 507 * caller must do so without accessing the unsafe object reference, so any lock
508 508 * or reference count used to ensure the continued existence of the object must
509 509 * live outside the object itself.
510 510 *
511 511 * Obtaining a new object is a special case where an explicit hold is impossible
512 512 * for the caller. Any function that returns a newly allocated object (either as
513 513 * a return value, or as an in-out paramter) must return it already held; after
514 514 * the caller gets it is too late, since the object cannot be safely accessed
515 515 * without the level of indirection described earlier. The following
516 516 * object_alloc() example uses the same code shown earlier to transition a new
517 517 * object into the state of being recognized (by the client) as a known object.
518 518 * The function must acquire the hold (rw_enter) before that state transition
519 519 * makes the object movable:
520 520 *
521 521 * static object_t *
522 522 * object_alloc(container_t *container)
523 523 * {
524 524 * object_t *object = kmem_cache_alloc(object_cache, 0);
525 525 * ... set any initial state not set by the constructor ...
526 526 * rw_enter(OBJECT_RWLOCK(object), RW_READER);
527 527 * mutex_enter(&container->c_objects_lock);
528 528 * list_insert_tail(&container->c_objects, object);
529 529 * membar_producer();
530 530 * object->o_container = container;
531 531 * mutex_exit(&container->c_objects_lock);
532 532 * return (object);
533 533 * }
534 534 *
535 535 * Functions that implicitly acquire an object hold (any function that calls
536 536 * object_alloc() to supply an object for the caller) need to be carefully noted
537 537 * so that the matching object_rele() is not neglected. Otherwise, leaked holds
538 538 * prevent all objects hashed to the affected rwlocks from ever being moved.
539 539 *
540 540 * The pointer to a held object can be hashed to the holding rwlock even after
541 541 * the object has been freed. Although it is possible to release the hold
542 542 * after freeing the object, you may decide to release the hold implicitly in
543 543 * whatever function frees the object, so as to release the hold as soon as
544 544 * possible, and for the sake of symmetry with the function that implicitly
545 545 * acquires the hold when it allocates the object. Here, object_free() releases
546 546 * the hold acquired by object_alloc(). Its implicit object_rele() forms a
547 547 * matching pair with object_hold():
548 548 *
549 549 * void
550 550 * object_free(object_t *object)
551 551 * {
552 552 * container_t *container;
553 553 *
554 554 * ASSERT(object_held(object));
555 555 * container = object->o_container;
556 556 * mutex_enter(&container->c_objects_lock);
557 557 * object->o_container =
558 558 * (void *)((uintptr_t)object->o_container | 0x1);
559 559 * list_remove(&container->c_objects, object);
560 560 * mutex_exit(&container->c_objects_lock);
561 561 * object_rele(object);
562 562 * kmem_cache_free(object_cache, object);
563 563 * }
564 564 *
565 565 * Note that object_free() cannot safely accept an object pointer as an argument
566 566 * unless the object is already held. Any function that calls object_free()
567 567 * needs to be carefully noted since it similarly forms a matching pair with
568 568 * object_hold().
569 569 *
570 570 * To complete the picture, the following callback function implements the
571 571 * general solution by moving objects only if they are currently unheld:
572 572 *
573 573 * static kmem_cbrc_t
574 574 * object_move(void *buf, void *newbuf, size_t size, void *arg)
575 575 * {
576 576 * object_t *op = buf, *np = newbuf;
577 577 * container_t *container;
578 578 *
579 579 * container = op->o_container;
580 580 * if ((uintptr_t)container & 0x3) {
581 581 * return (KMEM_CBRC_DONT_KNOW);
582 582 * }
583 583 *
584 584 * // Ensure that the container structure does not go away.
585 585 * if (container_hold(container) == 0) {
586 586 * return (KMEM_CBRC_DONT_KNOW);
587 587 * }
588 588 *
589 589 * mutex_enter(&container->c_objects_lock);
590 590 * if (container != op->o_container) {
591 591 * mutex_exit(&container->c_objects_lock);
592 592 * container_rele(container);
593 593 * return (KMEM_CBRC_DONT_KNOW);
594 594 * }
595 595 *
596 596 * if (rw_tryenter(OBJECT_RWLOCK(op), RW_WRITER) == 0) {
597 597 * mutex_exit(&container->c_objects_lock);
598 598 * container_rele(container);
599 599 * return (KMEM_CBRC_LATER);
600 600 * }
601 601 *
602 602 * object_move_impl(op, np); // critical section
603 603 * rw_exit(OBJECT_RWLOCK(op));
604 604 *
605 605 * op->o_container = (void *)((uintptr_t)op->o_container | 0x1);
606 606 * list_link_replace(&op->o_link_node, &np->o_link_node);
607 607 * mutex_exit(&container->c_objects_lock);
608 608 * container_rele(container);
609 609 * return (KMEM_CBRC_YES);
610 610 * }
611 611 *
612 612 * Note that object_move() must invalidate the designated o_container pointer of
613 613 * the old object in the same way that object_free() does, since kmem will free
614 614 * the object in response to the KMEM_CBRC_YES return value.
615 615 *
616 616 * The lock order in object_move() differs from object_alloc(), which locks
617 617 * OBJECT_RWLOCK first and &container->c_objects_lock second, but as long as the
618 618 * callback uses rw_tryenter() (preventing the deadlock described earlier), it's
619 619 * not a problem. Holding the lock on the object list in the example above
620 620 * through the entire callback not only prevents the object from going away, it
621 621 * also allows you to lock the list elsewhere and know that none of its elements
622 622 * will move during iteration.
623 623 *
624 624 * Adding an explicit hold everywhere an object from the cache is used is tricky
625 625 * and involves much more change to client code than a cache-specific solution
626 626 * that leverages existing state to decide whether or not an object is
627 627 * movable. However, this approach has the advantage that no object remains
628 628 * immovable for any significant length of time, making it extremely unlikely
629 629 * that long-lived allocations can continue holding slabs hostage; and it works
630 630 * for any cache.
631 631 *
632 632 * 3. Consolidator Implementation
633 633 *
634 634 * Once the client supplies a move function that a) recognizes known objects and
635 635 * b) avoids moving objects that are actively in use, the remaining work is up
636 636 * to the consolidator to decide which objects to move and when to issue
637 637 * callbacks.
638 638 *
639 639 * The consolidator relies on the fact that a cache's slabs are ordered by
640 640 * usage. Each slab has a fixed number of objects. Depending on the slab's
641 641 * "color" (the offset of the first object from the beginning of the slab;
642 642 * offsets are staggered to mitigate false sharing of cache lines) it is either
643 643 * the maximum number of objects per slab determined at cache creation time or
644 644 * else the number closest to the maximum that fits within the space remaining
645 645 * after the initial offset. A completely allocated slab may contribute some
646 646 * internal fragmentation (per-slab overhead) but no external fragmentation, so
647 647 * it is of no interest to the consolidator. At the other extreme, slabs whose
648 648 * objects have all been freed to the slab are released to the virtual memory
649 649 * (VM) subsystem (objects freed to magazines are still allocated as far as the
650 650 * slab is concerned). External fragmentation exists when there are slabs
651 651 * somewhere between these extremes. A partial slab has at least one but not all
652 652 * of its objects allocated. The more partial slabs, and the fewer allocated
653 653 * objects on each of them, the higher the fragmentation. Hence the
654 654 * consolidator's overall strategy is to reduce the number of partial slabs by
655 655 * moving allocated objects from the least allocated slabs to the most allocated
656 656 * slabs.
657 657 *
658 658 * Partial slabs are kept in an AVL tree ordered by usage. Completely allocated
659 659 * slabs are kept separately in an unordered list. Since the majority of slabs
660 660 * tend to be completely allocated (a typical unfragmented cache may have
661 661 * thousands of complete slabs and only a single partial slab), separating
662 662 * complete slabs improves the efficiency of partial slab ordering, since the
663 663 * complete slabs do not affect the depth or balance of the AVL tree. This
664 664 * ordered sequence of partial slabs acts as a "free list" supplying objects for
665 665 * allocation requests.
666 666 *
667 667 * Objects are always allocated from the first partial slab in the free list,
668 668 * where the allocation is most likely to eliminate a partial slab (by
669 669 * completely allocating it). Conversely, when a single object from a completely
670 670 * allocated slab is freed to the slab, that slab is added to the front of the
671 671 * free list. Since most free list activity involves highly allocated slabs
672 672 * coming and going at the front of the list, slabs tend naturally toward the
673 673 * ideal order: highly allocated at the front, sparsely allocated at the back.
674 674 * Slabs with few allocated objects are likely to become completely free if they
675 675 * keep a safe distance away from the front of the free list. Slab misorders
676 676 * interfere with the natural tendency of slabs to become completely free or
677 677 * completely allocated. For example, a slab with a single allocated object
678 678 * needs only a single free to escape the cache; its natural desire is
679 679 * frustrated when it finds itself at the front of the list where a second
680 680 * allocation happens just before the free could have released it. Another slab
681 681 * with all but one object allocated might have supplied the buffer instead, so
682 682 * that both (as opposed to neither) of the slabs would have been taken off the
683 683 * free list.
684 684 *
685 685 * Although slabs tend naturally toward the ideal order, misorders allowed by a
686 686 * simple list implementation defeat the consolidator's strategy of merging
687 687 * least- and most-allocated slabs. Without an AVL tree to guarantee order, kmem
688 688 * needs another way to fix misorders to optimize its callback strategy. One
689 689 * approach is to periodically scan a limited number of slabs, advancing a
690 690 * marker to hold the current scan position, and to move extreme misorders to
691 691 * the front or back of the free list and to the front or back of the current
692 692 * scan range. By making consecutive scan ranges overlap by one slab, the least
693 693 * allocated slab in the current range can be carried along from the end of one
694 694 * scan to the start of the next.
695 695 *
696 696 * Maintaining partial slabs in an AVL tree relieves kmem of this additional
697 697 * task, however. Since most of the cache's activity is in the magazine layer,
698 698 * and allocations from the slab layer represent only a startup cost, the
699 699 * overhead of maintaining a balanced tree is not a significant concern compared
700 700 * to the opportunity of reducing complexity by eliminating the partial slab
701 701 * scanner just described. The overhead of an AVL tree is minimized by
702 702 * maintaining only partial slabs in the tree and keeping completely allocated
703 703 * slabs separately in a list. To avoid increasing the size of the slab
704 704 * structure the AVL linkage pointers are reused for the slab's list linkage,
705 705 * since the slab will always be either partial or complete, never stored both
706 706 * ways at the same time. To further minimize the overhead of the AVL tree the
707 707 * compare function that orders partial slabs by usage divides the range of
708 708 * allocated object counts into bins such that counts within the same bin are
709 709 * considered equal. Binning partial slabs makes it less likely that allocating
710 710 * or freeing a single object will change the slab's order, requiring a tree
711 711 * reinsertion (an avl_remove() followed by an avl_add(), both potentially
712 712 * requiring some rebalancing of the tree). Allocation counts closest to
713 713 * completely free and completely allocated are left unbinned (finely sorted) to
714 714 * better support the consolidator's strategy of merging slabs at either
715 715 * extreme.
716 716 *
717 717 * 3.1 Assessing Fragmentation and Selecting Candidate Slabs
718 718 *
719 719 * The consolidator piggybacks on the kmem maintenance thread and is called on
720 720 * the same interval as kmem_cache_update(), once per cache every fifteen
721 721 * seconds. kmem maintains a running count of unallocated objects in the slab
722 722 * layer (cache_bufslab). The consolidator checks whether that number exceeds
723 723 * 12.5% (1/8) of the total objects in the cache (cache_buftotal), and whether
724 724 * there is a significant number of slabs in the cache (arbitrarily a minimum
725 725 * 101 total slabs). Unused objects that have fallen out of the magazine layer's
726 726 * working set are included in the assessment, and magazines in the depot are
727 727 * reaped if those objects would lift cache_bufslab above the fragmentation
728 728 * threshold. Once the consolidator decides that a cache is fragmented, it looks
729 729 * for a candidate slab to reclaim, starting at the end of the partial slab free
730 730 * list and scanning backwards. At first the consolidator is choosy: only a slab
731 731 * with fewer than 12.5% (1/8) of its objects allocated qualifies (or else a
732 732 * single allocated object, regardless of percentage). If there is difficulty
733 733 * finding a candidate slab, kmem raises the allocation threshold incrementally,
734 734 * up to a maximum 87.5% (7/8), so that eventually the consolidator will reduce
735 735 * external fragmentation (unused objects on the free list) below 12.5% (1/8),
736 736 * even in the worst case of every slab in the cache being almost 7/8 allocated.
737 737 * The threshold can also be lowered incrementally when candidate slabs are easy
738 738 * to find, and the threshold is reset to the minimum 1/8 as soon as the cache
739 739 * is no longer fragmented.
740 740 *
741 741 * 3.2 Generating Callbacks
742 742 *
743 743 * Once an eligible slab is chosen, a callback is generated for every allocated
744 744 * object on the slab, in the hope that the client will move everything off the
745 745 * slab and make it reclaimable. Objects selected as move destinations are
746 746 * chosen from slabs at the front of the free list. Assuming slabs in the ideal
747 747 * order (most allocated at the front, least allocated at the back) and a
748 748 * cooperative client, the consolidator will succeed in removing slabs from both
749 749 * ends of the free list, completely allocating on the one hand and completely
750 750 * freeing on the other. Objects selected as move destinations are allocated in
751 751 * the kmem maintenance thread where move requests are enqueued. A separate
752 752 * callback thread removes pending callbacks from the queue and calls the
753 753 * client. The separate thread ensures that client code (the move function) does
754 754 * not interfere with internal kmem maintenance tasks. A map of pending
755 755 * callbacks keyed by object address (the object to be moved) is checked to
756 756 * ensure that duplicate callbacks are not generated for the same object.
757 757 * Allocating the move destination (the object to move to) prevents subsequent
758 758 * callbacks from selecting the same destination as an earlier pending callback.
759 759 *
760 760 * Move requests can also be generated by kmem_cache_reap() when the system is
761 761 * desperate for memory and by kmem_cache_move_notify(), called by the client to
762 762 * notify kmem that a move refused earlier with KMEM_CBRC_LATER is now possible.
763 763 * The map of pending callbacks is protected by the same lock that protects the
764 764 * slab layer.
765 765 *
766 766 * When the system is desperate for memory, kmem does not bother to determine
767 767 * whether or not the cache exceeds the fragmentation threshold, but tries to
768 768 * consolidate as many slabs as possible. Normally, the consolidator chews
769 769 * slowly, one sparsely allocated slab at a time during each maintenance
770 770 * interval that the cache is fragmented. When desperate, the consolidator
771 771 * starts at the last partial slab and enqueues callbacks for every allocated
772 772 * object on every partial slab, working backwards until it reaches the first
773 773 * partial slab. The first partial slab, meanwhile, advances in pace with the
774 774 * consolidator as allocations to supply move destinations for the enqueued
775 775 * callbacks use up the highly allocated slabs at the front of the free list.
776 776 * Ideally, the overgrown free list collapses like an accordion, starting at
777 777 * both ends and ending at the center with a single partial slab.
778 778 *
779 779 * 3.3 Client Responses
780 780 *
781 781 * When the client returns KMEM_CBRC_NO in response to the move callback, kmem
782 782 * marks the slab that supplied the stuck object non-reclaimable and moves it to
783 783 * front of the free list. The slab remains marked as long as it remains on the
784 784 * free list, and it appears more allocated to the partial slab compare function
785 785 * than any unmarked slab, no matter how many of its objects are allocated.
786 786 * Since even one immovable object ties up the entire slab, the goal is to
787 787 * completely allocate any slab that cannot be completely freed. kmem does not
788 788 * bother generating callbacks to move objects from a marked slab unless the
789 789 * system is desperate.
790 790 *
791 791 * When the client responds KMEM_CBRC_LATER, kmem increments a count for the
792 792 * slab. If the client responds LATER too many times, kmem disbelieves and
793 793 * treats the response as a NO. The count is cleared when the slab is taken off
794 794 * the partial slab list or when the client moves one of the slab's objects.
795 795 *
796 796 * 4. Observability
797 797 *
798 798 * A kmem cache's external fragmentation is best observed with 'mdb -k' using
799 799 * the ::kmem_slabs dcmd. For a complete description of the command, enter
800 800 * '::help kmem_slabs' at the mdb prompt.
801 801 */
802 802
803 803 #include <sys/kmem_impl.h>
804 804 #include <sys/vmem_impl.h>
805 805 #include <sys/param.h>
806 806 #include <sys/sysmacros.h>
807 807 #include <sys/vm.h>
808 808 #include <sys/proc.h>
809 809 #include <sys/tuneable.h>
810 810 #include <sys/systm.h>
811 811 #include <sys/cmn_err.h>
812 812 #include <sys/debug.h>
813 813 #include <sys/sdt.h>
814 814 #include <sys/mutex.h>
815 815 #include <sys/bitmap.h>
816 816 #include <sys/atomic.h>
817 817 #include <sys/kobj.h>
818 818 #include <sys/disp.h>
819 819 #include <vm/seg_kmem.h>
820 820 #include <sys/log.h>
821 821 #include <sys/callb.h>
822 822 #include <sys/taskq.h>
823 823 #include <sys/modctl.h>
824 824 #include <sys/reboot.h>
825 825 #include <sys/id32.h>
826 826 #include <sys/zone.h>
827 827 #include <sys/netstack.h>
828 828 #ifdef DEBUG
829 829 #include <sys/random.h>
830 830 #endif
831 831
832 832 extern void streams_msg_init(void);
833 833 extern int segkp_fromheap;
834 834 extern void segkp_cache_free(void);
835 835 extern int callout_init_done;
836 836
837 837 struct kmem_cache_kstat {
838 838 kstat_named_t kmc_buf_size;
839 839 kstat_named_t kmc_align;
840 840 kstat_named_t kmc_chunk_size;
841 841 kstat_named_t kmc_slab_size;
842 842 kstat_named_t kmc_alloc;
843 843 kstat_named_t kmc_alloc_fail;
844 844 kstat_named_t kmc_free;
845 845 kstat_named_t kmc_depot_alloc;
846 846 kstat_named_t kmc_depot_free;
847 847 kstat_named_t kmc_depot_contention;
848 848 kstat_named_t kmc_slab_alloc;
849 849 kstat_named_t kmc_slab_free;
850 850 kstat_named_t kmc_buf_constructed;
851 851 kstat_named_t kmc_buf_avail;
852 852 kstat_named_t kmc_buf_inuse;
853 853 kstat_named_t kmc_buf_total;
854 854 kstat_named_t kmc_buf_max;
855 855 kstat_named_t kmc_slab_create;
856 856 kstat_named_t kmc_slab_destroy;
857 857 kstat_named_t kmc_vmem_source;
858 858 kstat_named_t kmc_hash_size;
859 859 kstat_named_t kmc_hash_lookup_depth;
860 860 kstat_named_t kmc_hash_rescale;
861 861 kstat_named_t kmc_full_magazines;
862 862 kstat_named_t kmc_empty_magazines;
863 863 kstat_named_t kmc_magazine_size;
864 864 kstat_named_t kmc_reap; /* number of kmem_cache_reap() calls */
865 865 kstat_named_t kmc_defrag; /* attempts to defrag all partial slabs */
866 866 kstat_named_t kmc_scan; /* attempts to defrag one partial slab */
867 867 kstat_named_t kmc_move_callbacks; /* sum of yes, no, later, dn, dk */
868 868 kstat_named_t kmc_move_yes;
869 869 kstat_named_t kmc_move_no;
870 870 kstat_named_t kmc_move_later;
871 871 kstat_named_t kmc_move_dont_need;
872 872 kstat_named_t kmc_move_dont_know; /* obj unrecognized by client ... */
873 873 kstat_named_t kmc_move_hunt_found; /* ... but found in mag layer */
874 874 kstat_named_t kmc_move_slabs_freed; /* slabs freed by consolidator */
875 875 kstat_named_t kmc_move_reclaimable; /* buffers, if consolidator ran */
876 876 } kmem_cache_kstat = {
877 877 { "buf_size", KSTAT_DATA_UINT64 },
878 878 { "align", KSTAT_DATA_UINT64 },
879 879 { "chunk_size", KSTAT_DATA_UINT64 },
880 880 { "slab_size", KSTAT_DATA_UINT64 },
881 881 { "alloc", KSTAT_DATA_UINT64 },
882 882 { "alloc_fail", KSTAT_DATA_UINT64 },
883 883 { "free", KSTAT_DATA_UINT64 },
884 884 { "depot_alloc", KSTAT_DATA_UINT64 },
885 885 { "depot_free", KSTAT_DATA_UINT64 },
886 886 { "depot_contention", KSTAT_DATA_UINT64 },
887 887 { "slab_alloc", KSTAT_DATA_UINT64 },
888 888 { "slab_free", KSTAT_DATA_UINT64 },
889 889 { "buf_constructed", KSTAT_DATA_UINT64 },
890 890 { "buf_avail", KSTAT_DATA_UINT64 },
891 891 { "buf_inuse", KSTAT_DATA_UINT64 },
892 892 { "buf_total", KSTAT_DATA_UINT64 },
893 893 { "buf_max", KSTAT_DATA_UINT64 },
894 894 { "slab_create", KSTAT_DATA_UINT64 },
895 895 { "slab_destroy", KSTAT_DATA_UINT64 },
896 896 { "vmem_source", KSTAT_DATA_UINT64 },
897 897 { "hash_size", KSTAT_DATA_UINT64 },
898 898 { "hash_lookup_depth", KSTAT_DATA_UINT64 },
899 899 { "hash_rescale", KSTAT_DATA_UINT64 },
900 900 { "full_magazines", KSTAT_DATA_UINT64 },
901 901 { "empty_magazines", KSTAT_DATA_UINT64 },
902 902 { "magazine_size", KSTAT_DATA_UINT64 },
903 903 { "reap", KSTAT_DATA_UINT64 },
904 904 { "defrag", KSTAT_DATA_UINT64 },
905 905 { "scan", KSTAT_DATA_UINT64 },
906 906 { "move_callbacks", KSTAT_DATA_UINT64 },
907 907 { "move_yes", KSTAT_DATA_UINT64 },
908 908 { "move_no", KSTAT_DATA_UINT64 },
909 909 { "move_later", KSTAT_DATA_UINT64 },
910 910 { "move_dont_need", KSTAT_DATA_UINT64 },
911 911 { "move_dont_know", KSTAT_DATA_UINT64 },
912 912 { "move_hunt_found", KSTAT_DATA_UINT64 },
913 913 { "move_slabs_freed", KSTAT_DATA_UINT64 },
914 914 { "move_reclaimable", KSTAT_DATA_UINT64 },
915 915 };
916 916
917 917 static kmutex_t kmem_cache_kstat_lock;
918 918
919 919 /*
920 920 * The default set of caches to back kmem_alloc().
921 921 * These sizes should be reevaluated periodically.
922 922 *
923 923 * We want allocations that are multiples of the coherency granularity
924 924 * (64 bytes) to be satisfied from a cache which is a multiple of 64
925 925 * bytes, so that it will be 64-byte aligned. For all multiples of 64,
926 926 * the next kmem_cache_size greater than or equal to it must be a
927 927 * multiple of 64.
928 928 *
929 929 * We split the table into two sections: size <= 4k and size > 4k. This
930 930 * saves a lot of space and cache footprint in our cache tables.
931 931 */
932 932 static const int kmem_alloc_sizes[] = {
933 933 1 * 8,
934 934 2 * 8,
935 935 3 * 8,
936 936 4 * 8, 5 * 8, 6 * 8, 7 * 8,
937 937 4 * 16, 5 * 16, 6 * 16, 7 * 16,
938 938 4 * 32, 5 * 32, 6 * 32, 7 * 32,
939 939 4 * 64, 5 * 64, 6 * 64, 7 * 64,
940 940 4 * 128, 5 * 128, 6 * 128, 7 * 128,
941 941 P2ALIGN(8192 / 7, 64),
942 942 P2ALIGN(8192 / 6, 64),
943 943 P2ALIGN(8192 / 5, 64),
944 944 P2ALIGN(8192 / 4, 64),
945 945 P2ALIGN(8192 / 3, 64),
946 946 P2ALIGN(8192 / 2, 64),
947 947 };
948 948
949 949 static const int kmem_big_alloc_sizes[] = {
950 950 2 * 4096, 3 * 4096,
951 951 2 * 8192, 3 * 8192,
952 952 4 * 8192, 5 * 8192, 6 * 8192, 7 * 8192,
953 953 8 * 8192, 9 * 8192, 10 * 8192, 11 * 8192,
954 954 12 * 8192, 13 * 8192, 14 * 8192, 15 * 8192,
955 955 16 * 8192
956 956 };
957 957
958 958 #define KMEM_MAXBUF 4096
959 959 #define KMEM_BIG_MAXBUF_32BIT 32768
960 960 #define KMEM_BIG_MAXBUF 131072
961 961
962 962 #define KMEM_BIG_MULTIPLE 4096 /* big_alloc_sizes must be a multiple */
963 963 #define KMEM_BIG_SHIFT 12 /* lg(KMEM_BIG_MULTIPLE) */
964 964
965 965 static kmem_cache_t *kmem_alloc_table[KMEM_MAXBUF >> KMEM_ALIGN_SHIFT];
966 966 static kmem_cache_t *kmem_big_alloc_table[KMEM_BIG_MAXBUF >> KMEM_BIG_SHIFT];
967 967
968 968 #define KMEM_ALLOC_TABLE_MAX (KMEM_MAXBUF >> KMEM_ALIGN_SHIFT)
969 969 static size_t kmem_big_alloc_table_max = 0; /* # of filled elements */
970 970
971 971 static kmem_magtype_t kmem_magtype[] = {
972 972 { 1, 8, 3200, 65536 },
973 973 { 3, 16, 256, 32768 },
974 974 { 7, 32, 64, 16384 },
975 975 { 15, 64, 0, 8192 },
976 976 { 31, 64, 0, 4096 },
977 977 { 47, 64, 0, 2048 },
978 978 { 63, 64, 0, 1024 },
979 979 { 95, 64, 0, 512 },
980 980 { 143, 64, 0, 0 },
981 981 };
982 982
983 983 static uint32_t kmem_reaping;
984 984 static uint32_t kmem_reaping_idspace;
985 985
986 986 /*
987 987 * kmem tunables
988 988 */
989 989 clock_t kmem_reap_interval; /* cache reaping rate [15 * HZ ticks] */
990 990 int kmem_depot_contention = 3; /* max failed tryenters per real interval */
991 991 pgcnt_t kmem_reapahead = 0; /* start reaping N pages before pageout */
992 992 int kmem_panic = 1; /* whether to panic on error */
993 993 int kmem_logging = 1; /* kmem_log_enter() override */
994 994 uint32_t kmem_mtbf = 0; /* mean time between failures [default: off] */
995 995 size_t kmem_transaction_log_size; /* transaction log size [2% of memory] */
996 996 size_t kmem_content_log_size; /* content log size [2% of memory] */
997 997 size_t kmem_failure_log_size; /* failure log [4 pages per CPU] */
998 998 size_t kmem_slab_log_size; /* slab create log [4 pages per CPU] */
999 999 size_t kmem_content_maxsave = 256; /* KMF_CONTENTS max bytes to log */
1000 1000 size_t kmem_lite_minsize = 0; /* minimum buffer size for KMF_LITE */
1001 1001 size_t kmem_lite_maxalign = 1024; /* maximum buffer alignment for KMF_LITE */
1002 1002 int kmem_lite_pcs = 4; /* number of PCs to store in KMF_LITE mode */
1003 1003 size_t kmem_maxverify; /* maximum bytes to inspect in debug routines */
1004 1004 size_t kmem_minfirewall; /* hardware-enforced redzone threshold */
1005 1005
1006 1006 #ifdef _LP64
1007 1007 size_t kmem_max_cached = KMEM_BIG_MAXBUF; /* maximum kmem_alloc cache */
1008 1008 #else
1009 1009 size_t kmem_max_cached = KMEM_BIG_MAXBUF_32BIT; /* maximum kmem_alloc cache */
1010 1010 #endif
1011 1011
1012 1012 #ifdef DEBUG
1013 1013 int kmem_flags = KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE | KMF_CONTENTS;
1014 1014 #else
1015 1015 int kmem_flags = 0;
1016 1016 #endif
1017 1017 int kmem_ready;
1018 1018
1019 1019 static kmem_cache_t *kmem_slab_cache;
1020 1020 static kmem_cache_t *kmem_bufctl_cache;
1021 1021 static kmem_cache_t *kmem_bufctl_audit_cache;
1022 1022
1023 1023 static kmutex_t kmem_cache_lock; /* inter-cache linkage only */
1024 1024 static list_t kmem_caches;
1025 1025
1026 1026 static taskq_t *kmem_taskq;
1027 1027 static kmutex_t kmem_flags_lock;
1028 1028 static vmem_t *kmem_metadata_arena;
1029 1029 static vmem_t *kmem_msb_arena; /* arena for metadata caches */
1030 1030 static vmem_t *kmem_cache_arena;
1031 1031 static vmem_t *kmem_hash_arena;
1032 1032 static vmem_t *kmem_log_arena;
1033 1033 static vmem_t *kmem_oversize_arena;
1034 1034 static vmem_t *kmem_va_arena;
1035 1035 static vmem_t *kmem_default_arena;
1036 1036 static vmem_t *kmem_firewall_va_arena;
1037 1037 static vmem_t *kmem_firewall_arena;
1038 1038
1039 1039 /*
1040 1040 * Define KMEM_STATS to turn on statistic gathering. By default, it is only
1041 1041 * turned on when DEBUG is also defined.
1042 1042 */
1043 1043 #ifdef DEBUG
1044 1044 #define KMEM_STATS
1045 1045 #endif /* DEBUG */
1046 1046
1047 1047 #ifdef KMEM_STATS
1048 1048 #define KMEM_STAT_ADD(stat) ((stat)++)
1049 1049 #define KMEM_STAT_COND_ADD(cond, stat) ((void) (!(cond) || (stat)++))
1050 1050 #else
1051 1051 #define KMEM_STAT_ADD(stat) /* nothing */
1052 1052 #define KMEM_STAT_COND_ADD(cond, stat) /* nothing */
1053 1053 #endif /* KMEM_STATS */
1054 1054
1055 1055 /*
1056 1056 * kmem slab consolidator thresholds (tunables)
1057 1057 */
1058 1058 size_t kmem_frag_minslabs = 101; /* minimum total slabs */
1059 1059 size_t kmem_frag_numer = 1; /* free buffers (numerator) */
1060 1060 size_t kmem_frag_denom = KMEM_VOID_FRACTION; /* buffers (denominator) */
1061 1061 /*
1062 1062 * Maximum number of slabs from which to move buffers during a single
1063 1063 * maintenance interval while the system is not low on memory.
1064 1064 */
1065 1065 size_t kmem_reclaim_max_slabs = 1;
1066 1066 /*
1067 1067 * Number of slabs to scan backwards from the end of the partial slab list
1068 1068 * when searching for buffers to relocate.
1069 1069 */
1070 1070 size_t kmem_reclaim_scan_range = 12;
1071 1071
1072 1072 #ifdef KMEM_STATS
1073 1073 static struct {
1074 1074 uint64_t kms_callbacks;
1075 1075 uint64_t kms_yes;
1076 1076 uint64_t kms_no;
1077 1077 uint64_t kms_later;
1078 1078 uint64_t kms_dont_need;
1079 1079 uint64_t kms_dont_know;
1080 1080 uint64_t kms_hunt_found_mag;
1081 1081 uint64_t kms_hunt_found_slab;
1082 1082 uint64_t kms_hunt_alloc_fail;
1083 1083 uint64_t kms_hunt_lucky;
1084 1084 uint64_t kms_notify;
1085 1085 uint64_t kms_notify_callbacks;
1086 1086 uint64_t kms_disbelief;
1087 1087 uint64_t kms_already_pending;
1088 1088 uint64_t kms_callback_alloc_fail;
1089 1089 uint64_t kms_callback_taskq_fail;
1090 1090 uint64_t kms_endscan_slab_dead;
1091 1091 uint64_t kms_endscan_slab_destroyed;
1092 1092 uint64_t kms_endscan_nomem;
1093 1093 uint64_t kms_endscan_refcnt_changed;
1094 1094 uint64_t kms_endscan_nomove_changed;
1095 1095 uint64_t kms_endscan_freelist;
1096 1096 uint64_t kms_avl_update;
1097 1097 uint64_t kms_avl_noupdate;
1098 1098 uint64_t kms_no_longer_reclaimable;
1099 1099 uint64_t kms_notify_no_longer_reclaimable;
1100 1100 uint64_t kms_notify_slab_dead;
1101 1101 uint64_t kms_notify_slab_destroyed;
1102 1102 uint64_t kms_alloc_fail;
1103 1103 uint64_t kms_constructor_fail;
1104 1104 uint64_t kms_dead_slabs_freed;
1105 1105 uint64_t kms_defrags;
1106 1106 uint64_t kms_scans;
1107 1107 uint64_t kms_scan_depot_ws_reaps;
1108 1108 uint64_t kms_debug_reaps;
1109 1109 uint64_t kms_debug_scans;
1110 1110 } kmem_move_stats;
1111 1111 #endif /* KMEM_STATS */
1112 1112
1113 1113 /* consolidator knobs */
1114 1114 static boolean_t kmem_move_noreap;
1115 1115 static boolean_t kmem_move_blocked;
1116 1116 static boolean_t kmem_move_fulltilt;
1117 1117 static boolean_t kmem_move_any_partial;
1118 1118
1119 1119 #ifdef DEBUG
1120 1120 /*
1121 1121 * kmem consolidator debug tunables:
1122 1122 * Ensure code coverage by occasionally running the consolidator even when the
1123 1123 * caches are not fragmented (they may never be). These intervals are mean time
1124 1124 * in cache maintenance intervals (kmem_cache_update).
1125 1125 */
1126 1126 uint32_t kmem_mtb_move = 60; /* defrag 1 slab (~15min) */
1127 1127 uint32_t kmem_mtb_reap = 1800; /* defrag all slabs (~7.5hrs) */
1128 1128 #endif /* DEBUG */
1129 1129
1130 1130 static kmem_cache_t *kmem_defrag_cache;
1131 1131 static kmem_cache_t *kmem_move_cache;
1132 1132 static taskq_t *kmem_move_taskq;
1133 1133
1134 1134 static void kmem_cache_scan(kmem_cache_t *);
1135 1135 static void kmem_cache_defrag(kmem_cache_t *);
1136 1136 static void kmem_slab_prefill(kmem_cache_t *, kmem_slab_t *);
1137 1137
1138 1138
1139 1139 kmem_log_header_t *kmem_transaction_log;
1140 1140 kmem_log_header_t *kmem_content_log;
1141 1141 kmem_log_header_t *kmem_failure_log;
1142 1142 kmem_log_header_t *kmem_slab_log;
1143 1143
1144 1144 static int kmem_lite_count; /* # of PCs in kmem_buftag_lite_t */
1145 1145
1146 1146 #define KMEM_BUFTAG_LITE_ENTER(bt, count, caller) \
1147 1147 if ((count) > 0) { \
1148 1148 pc_t *_s = ((kmem_buftag_lite_t *)(bt))->bt_history; \
1149 1149 pc_t *_e; \
1150 1150 /* memmove() the old entries down one notch */ \
1151 1151 for (_e = &_s[(count) - 1]; _e > _s; _e--) \
1152 1152 *_e = *(_e - 1); \
1153 1153 *_s = (uintptr_t)(caller); \
1154 1154 }
1155 1155
1156 1156 #define KMERR_MODIFIED 0 /* buffer modified while on freelist */
1157 1157 #define KMERR_REDZONE 1 /* redzone violation (write past end of buf) */
1158 1158 #define KMERR_DUPFREE 2 /* freed a buffer twice */
1159 1159 #define KMERR_BADADDR 3 /* freed a bad (unallocated) address */
1160 1160 #define KMERR_BADBUFTAG 4 /* buftag corrupted */
1161 1161 #define KMERR_BADBUFCTL 5 /* bufctl corrupted */
1162 1162 #define KMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
1163 1163 #define KMERR_BADSIZE 7 /* alloc size != free size */
1164 1164 #define KMERR_BADBASE 8 /* buffer base address wrong */
1165 1165
1166 1166 struct {
1167 1167 hrtime_t kmp_timestamp; /* timestamp of panic */
1168 1168 int kmp_error; /* type of kmem error */
1169 1169 void *kmp_buffer; /* buffer that induced panic */
1170 1170 void *kmp_realbuf; /* real start address for buffer */
1171 1171 kmem_cache_t *kmp_cache; /* buffer's cache according to client */
1172 1172 kmem_cache_t *kmp_realcache; /* actual cache containing buffer */
1173 1173 kmem_slab_t *kmp_slab; /* slab accoring to kmem_findslab() */
1174 1174 kmem_bufctl_t *kmp_bufctl; /* bufctl */
1175 1175 } kmem_panic_info;
1176 1176
1177 1177
1178 1178 static void
1179 1179 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
1180 1180 {
1181 1181 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1182 1182 uint64_t *buf = buf_arg;
1183 1183
1184 1184 while (buf < bufend)
1185 1185 *buf++ = pattern;
1186 1186 }
1187 1187
1188 1188 static void *
1189 1189 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
1190 1190 {
1191 1191 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1192 1192 uint64_t *buf;
1193 1193
1194 1194 for (buf = buf_arg; buf < bufend; buf++)
1195 1195 if (*buf != pattern)
1196 1196 return (buf);
1197 1197 return (NULL);
1198 1198 }
1199 1199
1200 1200 static void *
1201 1201 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
1202 1202 {
1203 1203 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
1204 1204 uint64_t *buf;
1205 1205
1206 1206 for (buf = buf_arg; buf < bufend; buf++) {
1207 1207 if (*buf != old) {
1208 1208 copy_pattern(old, buf_arg,
1209 1209 (char *)buf - (char *)buf_arg);
1210 1210 return (buf);
1211 1211 }
1212 1212 *buf = new;
1213 1213 }
1214 1214
1215 1215 return (NULL);
1216 1216 }
1217 1217
1218 1218 static void
1219 1219 kmem_cache_applyall(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1220 1220 {
1221 1221 kmem_cache_t *cp;
1222 1222
1223 1223 mutex_enter(&kmem_cache_lock);
1224 1224 for (cp = list_head(&kmem_caches); cp != NULL;
1225 1225 cp = list_next(&kmem_caches, cp))
1226 1226 if (tq != NULL)
1227 1227 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1228 1228 tqflag);
1229 1229 else
1230 1230 func(cp);
1231 1231 mutex_exit(&kmem_cache_lock);
1232 1232 }
1233 1233
1234 1234 static void
1235 1235 kmem_cache_applyall_id(void (*func)(kmem_cache_t *), taskq_t *tq, int tqflag)
1236 1236 {
1237 1237 kmem_cache_t *cp;
1238 1238
1239 1239 mutex_enter(&kmem_cache_lock);
1240 1240 for (cp = list_head(&kmem_caches); cp != NULL;
1241 1241 cp = list_next(&kmem_caches, cp)) {
1242 1242 if (!(cp->cache_cflags & KMC_IDENTIFIER))
1243 1243 continue;
1244 1244 if (tq != NULL)
1245 1245 (void) taskq_dispatch(tq, (task_func_t *)func, cp,
1246 1246 tqflag);
1247 1247 else
1248 1248 func(cp);
1249 1249 }
1250 1250 mutex_exit(&kmem_cache_lock);
1251 1251 }
1252 1252
1253 1253 /*
1254 1254 * Debugging support. Given a buffer address, find its slab.
1255 1255 */
1256 1256 static kmem_slab_t *
1257 1257 kmem_findslab(kmem_cache_t *cp, void *buf)
1258 1258 {
1259 1259 kmem_slab_t *sp;
1260 1260
1261 1261 mutex_enter(&cp->cache_lock);
1262 1262 for (sp = list_head(&cp->cache_complete_slabs); sp != NULL;
1263 1263 sp = list_next(&cp->cache_complete_slabs, sp)) {
1264 1264 if (KMEM_SLAB_MEMBER(sp, buf)) {
1265 1265 mutex_exit(&cp->cache_lock);
1266 1266 return (sp);
1267 1267 }
1268 1268 }
1269 1269 for (sp = avl_first(&cp->cache_partial_slabs); sp != NULL;
1270 1270 sp = AVL_NEXT(&cp->cache_partial_slabs, sp)) {
1271 1271 if (KMEM_SLAB_MEMBER(sp, buf)) {
1272 1272 mutex_exit(&cp->cache_lock);
1273 1273 return (sp);
1274 1274 }
1275 1275 }
1276 1276 mutex_exit(&cp->cache_lock);
1277 1277
1278 1278 return (NULL);
1279 1279 }
1280 1280
1281 1281 static void
1282 1282 kmem_error(int error, kmem_cache_t *cparg, void *bufarg)
1283 1283 {
1284 1284 kmem_buftag_t *btp = NULL;
1285 1285 kmem_bufctl_t *bcp = NULL;
1286 1286 kmem_cache_t *cp = cparg;
1287 1287 kmem_slab_t *sp;
1288 1288 uint64_t *off;
1289 1289 void *buf = bufarg;
1290 1290
1291 1291 kmem_logging = 0; /* stop logging when a bad thing happens */
1292 1292
1293 1293 kmem_panic_info.kmp_timestamp = gethrtime();
1294 1294
1295 1295 sp = kmem_findslab(cp, buf);
1296 1296 if (sp == NULL) {
1297 1297 for (cp = list_tail(&kmem_caches); cp != NULL;
1298 1298 cp = list_prev(&kmem_caches, cp)) {
1299 1299 if ((sp = kmem_findslab(cp, buf)) != NULL)
1300 1300 break;
1301 1301 }
1302 1302 }
1303 1303
1304 1304 if (sp == NULL) {
1305 1305 cp = NULL;
1306 1306 error = KMERR_BADADDR;
1307 1307 } else {
1308 1308 if (cp != cparg)
1309 1309 error = KMERR_BADCACHE;
1310 1310 else
1311 1311 buf = (char *)bufarg - ((uintptr_t)bufarg -
1312 1312 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
1313 1313 if (buf != bufarg)
1314 1314 error = KMERR_BADBASE;
1315 1315 if (cp->cache_flags & KMF_BUFTAG)
1316 1316 btp = KMEM_BUFTAG(cp, buf);
1317 1317 if (cp->cache_flags & KMF_HASH) {
1318 1318 mutex_enter(&cp->cache_lock);
1319 1319 for (bcp = *KMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
1320 1320 if (bcp->bc_addr == buf)
1321 1321 break;
1322 1322 mutex_exit(&cp->cache_lock);
1323 1323 if (bcp == NULL && btp != NULL)
1324 1324 bcp = btp->bt_bufctl;
1325 1325 if (kmem_findslab(cp->cache_bufctl_cache, bcp) ==
1326 1326 NULL || P2PHASE((uintptr_t)bcp, KMEM_ALIGN) ||
1327 1327 bcp->bc_addr != buf) {
1328 1328 error = KMERR_BADBUFCTL;
1329 1329 bcp = NULL;
1330 1330 }
1331 1331 }
1332 1332 }
1333 1333
1334 1334 kmem_panic_info.kmp_error = error;
1335 1335 kmem_panic_info.kmp_buffer = bufarg;
1336 1336 kmem_panic_info.kmp_realbuf = buf;
1337 1337 kmem_panic_info.kmp_cache = cparg;
1338 1338 kmem_panic_info.kmp_realcache = cp;
1339 1339 kmem_panic_info.kmp_slab = sp;
1340 1340 kmem_panic_info.kmp_bufctl = bcp;
1341 1341
1342 1342 printf("kernel memory allocator: ");
1343 1343
1344 1344 switch (error) {
1345 1345
1346 1346 case KMERR_MODIFIED:
1347 1347 printf("buffer modified after being freed\n");
1348 1348 off = verify_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1349 1349 if (off == NULL) /* shouldn't happen */
1350 1350 off = buf;
1351 1351 printf("modification occurred at offset 0x%lx "
1352 1352 "(0x%llx replaced by 0x%llx)\n",
1353 1353 (uintptr_t)off - (uintptr_t)buf,
1354 1354 (longlong_t)KMEM_FREE_PATTERN, (longlong_t)*off);
1355 1355 break;
1356 1356
1357 1357 case KMERR_REDZONE:
1358 1358 printf("redzone violation: write past end of buffer\n");
1359 1359 break;
1360 1360
1361 1361 case KMERR_BADADDR:
1362 1362 printf("invalid free: buffer not in cache\n");
1363 1363 break;
1364 1364
1365 1365 case KMERR_DUPFREE:
1366 1366 printf("duplicate free: buffer freed twice\n");
1367 1367 break;
1368 1368
1369 1369 case KMERR_BADBUFTAG:
1370 1370 printf("boundary tag corrupted\n");
1371 1371 printf("bcp ^ bxstat = %lx, should be %lx\n",
1372 1372 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
1373 1373 KMEM_BUFTAG_FREE);
1374 1374 break;
1375 1375
1376 1376 case KMERR_BADBUFCTL:
1377 1377 printf("bufctl corrupted\n");
1378 1378 break;
1379 1379
1380 1380 case KMERR_BADCACHE:
1381 1381 printf("buffer freed to wrong cache\n");
1382 1382 printf("buffer was allocated from %s,\n", cp->cache_name);
1383 1383 printf("caller attempting free to %s.\n", cparg->cache_name);
1384 1384 break;
1385 1385
1386 1386 case KMERR_BADSIZE:
1387 1387 printf("bad free: free size (%u) != alloc size (%u)\n",
1388 1388 KMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
1389 1389 KMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
1390 1390 break;
1391 1391
1392 1392 case KMERR_BADBASE:
1393 1393 printf("bad free: free address (%p) != alloc address (%p)\n",
1394 1394 bufarg, buf);
1395 1395 break;
1396 1396 }
1397 1397
1398 1398 printf("buffer=%p bufctl=%p cache: %s\n",
1399 1399 bufarg, (void *)bcp, cparg->cache_name);
1400 1400
1401 1401 if (bcp != NULL && (cp->cache_flags & KMF_AUDIT) &&
1402 1402 error != KMERR_BADBUFCTL) {
1403 1403 int d;
1404 1404 timestruc_t ts;
1405 1405 kmem_bufctl_audit_t *bcap = (kmem_bufctl_audit_t *)bcp;
1406 1406
1407 1407 hrt2ts(kmem_panic_info.kmp_timestamp - bcap->bc_timestamp, &ts);
1408 1408 printf("previous transaction on buffer %p:\n", buf);
1409 1409 printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
1410 1410 (void *)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
1411 1411 (void *)sp, cp->cache_name);
1412 1412 for (d = 0; d < MIN(bcap->bc_depth, KMEM_STACK_DEPTH); d++) {
1413 1413 ulong_t off;
1414 1414 char *sym = kobj_getsymname(bcap->bc_stack[d], &off);
1415 1415 printf("%s+%lx\n", sym ? sym : "?", off);
1416 1416 }
1417 1417 }
1418 1418 if (kmem_panic > 0)
1419 1419 panic("kernel heap corruption detected");
1420 1420 if (kmem_panic == 0)
1421 1421 debug_enter(NULL);
1422 1422 kmem_logging = 1; /* resume logging */
1423 1423 }
1424 1424
1425 1425 static kmem_log_header_t *
1426 1426 kmem_log_init(size_t logsize)
1427 1427 {
1428 1428 kmem_log_header_t *lhp;
1429 1429 int nchunks = 4 * max_ncpus;
1430 1430 size_t lhsize = (size_t)&((kmem_log_header_t *)0)->lh_cpu[max_ncpus];
1431 1431 int i;
1432 1432
1433 1433 /*
1434 1434 * Make sure that lhp->lh_cpu[] is nicely aligned
1435 1435 * to prevent false sharing of cache lines.
1436 1436 */
1437 1437 lhsize = P2ROUNDUP(lhsize, KMEM_ALIGN);
1438 1438 lhp = vmem_xalloc(kmem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1439 1439 NULL, NULL, VM_SLEEP);
1440 1440 bzero(lhp, lhsize);
1441 1441
1442 1442 mutex_init(&lhp->lh_lock, NULL, MUTEX_DEFAULT, NULL);
1443 1443 lhp->lh_nchunks = nchunks;
1444 1444 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks + 1, PAGESIZE);
1445 1445 lhp->lh_base = vmem_alloc(kmem_log_arena,
1446 1446 lhp->lh_chunksize * nchunks, VM_SLEEP);
1447 1447 lhp->lh_free = vmem_alloc(kmem_log_arena,
1448 1448 nchunks * sizeof (int), VM_SLEEP);
1449 1449 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1450 1450
1451 1451 for (i = 0; i < max_ncpus; i++) {
1452 1452 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1453 1453 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1454 1454 clhp->clh_chunk = i;
1455 1455 }
1456 1456
1457 1457 for (i = max_ncpus; i < nchunks; i++)
1458 1458 lhp->lh_free[i] = i;
1459 1459
1460 1460 lhp->lh_head = max_ncpus;
1461 1461 lhp->lh_tail = 0;
1462 1462
1463 1463 return (lhp);
1464 1464 }
1465 1465
1466 1466 static void *
1467 1467 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1468 1468 {
1469 1469 void *logspace;
1470 1470 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1471 1471
1472 1472 if (lhp == NULL || kmem_logging == 0 || panicstr)
1473 1473 return (NULL);
1474 1474
1475 1475 mutex_enter(&clhp->clh_lock);
1476 1476 clhp->clh_hits++;
1477 1477 if (size > clhp->clh_avail) {
1478 1478 mutex_enter(&lhp->lh_lock);
1479 1479 lhp->lh_hits++;
1480 1480 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1481 1481 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1482 1482 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1483 1483 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1484 1484 clhp->clh_current = lhp->lh_base +
1485 1485 clhp->clh_chunk * lhp->lh_chunksize;
1486 1486 clhp->clh_avail = lhp->lh_chunksize;
1487 1487 if (size > lhp->lh_chunksize)
1488 1488 size = lhp->lh_chunksize;
1489 1489 mutex_exit(&lhp->lh_lock);
1490 1490 }
1491 1491 logspace = clhp->clh_current;
1492 1492 clhp->clh_current += size;
1493 1493 clhp->clh_avail -= size;
1494 1494 bcopy(data, logspace, size);
1495 1495 mutex_exit(&clhp->clh_lock);
1496 1496 return (logspace);
1497 1497 }
1498 1498
1499 1499 #define KMEM_AUDIT(lp, cp, bcp) \
1500 1500 { \
1501 1501 kmem_bufctl_audit_t *_bcp = (kmem_bufctl_audit_t *)(bcp); \
1502 1502 _bcp->bc_timestamp = gethrtime(); \
1503 1503 _bcp->bc_thread = curthread; \
1504 1504 _bcp->bc_depth = getpcstack(_bcp->bc_stack, KMEM_STACK_DEPTH); \
1505 1505 _bcp->bc_lastlog = kmem_log_enter((lp), _bcp, sizeof (*_bcp)); \
1506 1506 }
1507 1507
1508 1508 static void
1509 1509 kmem_log_event(kmem_log_header_t *lp, kmem_cache_t *cp,
1510 1510 kmem_slab_t *sp, void *addr)
1511 1511 {
1512 1512 kmem_bufctl_audit_t bca;
1513 1513
1514 1514 bzero(&bca, sizeof (kmem_bufctl_audit_t));
1515 1515 bca.bc_addr = addr;
1516 1516 bca.bc_slab = sp;
1517 1517 bca.bc_cache = cp;
1518 1518 KMEM_AUDIT(lp, cp, &bca);
1519 1519 }
1520 1520
1521 1521 /*
1522 1522 * Create a new slab for cache cp.
1523 1523 */
1524 1524 static kmem_slab_t *
1525 1525 kmem_slab_create(kmem_cache_t *cp, int kmflag)
1526 1526 {
1527 1527 size_t slabsize = cp->cache_slabsize;
1528 1528 size_t chunksize = cp->cache_chunksize;
1529 1529 int cache_flags = cp->cache_flags;
1530 1530 size_t color, chunks;
1531 1531 char *buf, *slab;
1532 1532 kmem_slab_t *sp;
1533 1533 kmem_bufctl_t *bcp;
1534 1534 vmem_t *vmp = cp->cache_arena;
1535 1535
1536 1536 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1537 1537
1538 1538 color = cp->cache_color + cp->cache_align;
1539 1539 if (color > cp->cache_maxcolor)
1540 1540 color = cp->cache_mincolor;
1541 1541 cp->cache_color = color;
1542 1542
1543 1543 slab = vmem_alloc(vmp, slabsize, kmflag & KM_VMFLAGS);
1544 1544
1545 1545 if (slab == NULL)
1546 1546 goto vmem_alloc_failure;
1547 1547
1548 1548 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1549 1549
1550 1550 /*
1551 1551 * Reverify what was already checked in kmem_cache_set_move(), since the
1552 1552 * consolidator depends (for correctness) on slabs being initialized
1553 1553 * with the 0xbaddcafe memory pattern (setting a low order bit usable by
1554 1554 * clients to distinguish uninitialized memory from known objects).
1555 1555 */
1556 1556 ASSERT((cp->cache_move == NULL) || !(cp->cache_cflags & KMC_NOTOUCH));
1557 1557 if (!(cp->cache_cflags & KMC_NOTOUCH))
1558 1558 copy_pattern(KMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1559 1559
1560 1560 if (cache_flags & KMF_HASH) {
1561 1561 if ((sp = kmem_cache_alloc(kmem_slab_cache, kmflag)) == NULL)
1562 1562 goto slab_alloc_failure;
1563 1563 chunks = (slabsize - color) / chunksize;
1564 1564 } else {
1565 1565 sp = KMEM_SLAB(cp, slab);
1566 1566 chunks = (slabsize - sizeof (kmem_slab_t) - color) / chunksize;
1567 1567 }
1568 1568
1569 1569 sp->slab_cache = cp;
1570 1570 sp->slab_head = NULL;
1571 1571 sp->slab_refcnt = 0;
1572 1572 sp->slab_base = buf = slab + color;
1573 1573 sp->slab_chunks = chunks;
1574 1574 sp->slab_stuck_offset = (uint32_t)-1;
1575 1575 sp->slab_later_count = 0;
1576 1576 sp->slab_flags = 0;
1577 1577
1578 1578 ASSERT(chunks > 0);
1579 1579 while (chunks-- != 0) {
1580 1580 if (cache_flags & KMF_HASH) {
1581 1581 bcp = kmem_cache_alloc(cp->cache_bufctl_cache, kmflag);
1582 1582 if (bcp == NULL)
1583 1583 goto bufctl_alloc_failure;
1584 1584 if (cache_flags & KMF_AUDIT) {
1585 1585 kmem_bufctl_audit_t *bcap =
1586 1586 (kmem_bufctl_audit_t *)bcp;
1587 1587 bzero(bcap, sizeof (kmem_bufctl_audit_t));
1588 1588 bcap->bc_cache = cp;
1589 1589 }
1590 1590 bcp->bc_addr = buf;
1591 1591 bcp->bc_slab = sp;
1592 1592 } else {
1593 1593 bcp = KMEM_BUFCTL(cp, buf);
1594 1594 }
1595 1595 if (cache_flags & KMF_BUFTAG) {
1596 1596 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1597 1597 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1598 1598 btp->bt_bufctl = bcp;
1599 1599 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
1600 1600 if (cache_flags & KMF_DEADBEEF) {
1601 1601 copy_pattern(KMEM_FREE_PATTERN, buf,
1602 1602 cp->cache_verify);
1603 1603 }
1604 1604 }
1605 1605 bcp->bc_next = sp->slab_head;
1606 1606 sp->slab_head = bcp;
1607 1607 buf += chunksize;
1608 1608 }
1609 1609
1610 1610 kmem_log_event(kmem_slab_log, cp, sp, slab);
1611 1611
1612 1612 return (sp);
1613 1613
1614 1614 bufctl_alloc_failure:
1615 1615
1616 1616 while ((bcp = sp->slab_head) != NULL) {
1617 1617 sp->slab_head = bcp->bc_next;
1618 1618 kmem_cache_free(cp->cache_bufctl_cache, bcp);
↓ open down ↓ |
1618 lines elided |
↑ open up ↑ |
1619 1619 }
1620 1620 kmem_cache_free(kmem_slab_cache, sp);
1621 1621
1622 1622 slab_alloc_failure:
1623 1623
1624 1624 vmem_free(vmp, slab, slabsize);
1625 1625
1626 1626 vmem_alloc_failure:
1627 1627
1628 1628 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1629 - atomic_add_64(&cp->cache_alloc_fail, 1);
1629 + atomic_inc_64(&cp->cache_alloc_fail);
1630 1630
1631 1631 return (NULL);
1632 1632 }
1633 1633
1634 1634 /*
1635 1635 * Destroy a slab.
1636 1636 */
1637 1637 static void
1638 1638 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1639 1639 {
1640 1640 vmem_t *vmp = cp->cache_arena;
1641 1641 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1642 1642
1643 1643 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1644 1644 ASSERT(sp->slab_refcnt == 0);
1645 1645
1646 1646 if (cp->cache_flags & KMF_HASH) {
1647 1647 kmem_bufctl_t *bcp;
1648 1648 while ((bcp = sp->slab_head) != NULL) {
1649 1649 sp->slab_head = bcp->bc_next;
1650 1650 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1651 1651 }
1652 1652 kmem_cache_free(kmem_slab_cache, sp);
1653 1653 }
1654 1654 vmem_free(vmp, slab, cp->cache_slabsize);
1655 1655 }
1656 1656
1657 1657 static void *
1658 1658 kmem_slab_alloc_impl(kmem_cache_t *cp, kmem_slab_t *sp, boolean_t prefill)
1659 1659 {
1660 1660 kmem_bufctl_t *bcp, **hash_bucket;
1661 1661 void *buf;
1662 1662 boolean_t new_slab = (sp->slab_refcnt == 0);
1663 1663
1664 1664 ASSERT(MUTEX_HELD(&cp->cache_lock));
1665 1665 /*
1666 1666 * kmem_slab_alloc() drops cache_lock when it creates a new slab, so we
1667 1667 * can't ASSERT(avl_is_empty(&cp->cache_partial_slabs)) here when the
1668 1668 * slab is newly created.
1669 1669 */
1670 1670 ASSERT(new_slab || (KMEM_SLAB_IS_PARTIAL(sp) &&
1671 1671 (sp == avl_first(&cp->cache_partial_slabs))));
1672 1672 ASSERT(sp->slab_cache == cp);
1673 1673
1674 1674 cp->cache_slab_alloc++;
1675 1675 cp->cache_bufslab--;
1676 1676 sp->slab_refcnt++;
1677 1677
1678 1678 bcp = sp->slab_head;
1679 1679 sp->slab_head = bcp->bc_next;
1680 1680
1681 1681 if (cp->cache_flags & KMF_HASH) {
1682 1682 /*
1683 1683 * Add buffer to allocated-address hash table.
1684 1684 */
1685 1685 buf = bcp->bc_addr;
1686 1686 hash_bucket = KMEM_HASH(cp, buf);
1687 1687 bcp->bc_next = *hash_bucket;
1688 1688 *hash_bucket = bcp;
1689 1689 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1690 1690 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1691 1691 }
1692 1692 } else {
1693 1693 buf = KMEM_BUF(cp, bcp);
1694 1694 }
1695 1695
1696 1696 ASSERT(KMEM_SLAB_MEMBER(sp, buf));
1697 1697
1698 1698 if (sp->slab_head == NULL) {
1699 1699 ASSERT(KMEM_SLAB_IS_ALL_USED(sp));
1700 1700 if (new_slab) {
1701 1701 ASSERT(sp->slab_chunks == 1);
1702 1702 } else {
1703 1703 ASSERT(sp->slab_chunks > 1); /* the slab was partial */
1704 1704 avl_remove(&cp->cache_partial_slabs, sp);
1705 1705 sp->slab_later_count = 0; /* clear history */
1706 1706 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
1707 1707 sp->slab_stuck_offset = (uint32_t)-1;
1708 1708 }
1709 1709 list_insert_head(&cp->cache_complete_slabs, sp);
1710 1710 cp->cache_complete_slab_count++;
1711 1711 return (buf);
1712 1712 }
1713 1713
1714 1714 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
1715 1715 /*
1716 1716 * Peek to see if the magazine layer is enabled before
1717 1717 * we prefill. We're not holding the cpu cache lock,
1718 1718 * so the peek could be wrong, but there's no harm in it.
1719 1719 */
1720 1720 if (new_slab && prefill && (cp->cache_flags & KMF_PREFILL) &&
1721 1721 (KMEM_CPU_CACHE(cp)->cc_magsize != 0)) {
1722 1722 kmem_slab_prefill(cp, sp);
1723 1723 return (buf);
1724 1724 }
1725 1725
1726 1726 if (new_slab) {
1727 1727 avl_add(&cp->cache_partial_slabs, sp);
1728 1728 return (buf);
1729 1729 }
1730 1730
1731 1731 /*
1732 1732 * The slab is now more allocated than it was, so the
1733 1733 * order remains unchanged.
1734 1734 */
1735 1735 ASSERT(!avl_update(&cp->cache_partial_slabs, sp));
1736 1736 return (buf);
1737 1737 }
1738 1738
1739 1739 /*
1740 1740 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1741 1741 */
1742 1742 static void *
1743 1743 kmem_slab_alloc(kmem_cache_t *cp, int kmflag)
1744 1744 {
1745 1745 kmem_slab_t *sp;
1746 1746 void *buf;
1747 1747 boolean_t test_destructor;
1748 1748
1749 1749 mutex_enter(&cp->cache_lock);
1750 1750 test_destructor = (cp->cache_slab_alloc == 0);
1751 1751 sp = avl_first(&cp->cache_partial_slabs);
1752 1752 if (sp == NULL) {
1753 1753 ASSERT(cp->cache_bufslab == 0);
1754 1754
1755 1755 /*
1756 1756 * The freelist is empty. Create a new slab.
1757 1757 */
1758 1758 mutex_exit(&cp->cache_lock);
1759 1759 if ((sp = kmem_slab_create(cp, kmflag)) == NULL) {
1760 1760 return (NULL);
1761 1761 }
1762 1762 mutex_enter(&cp->cache_lock);
1763 1763 cp->cache_slab_create++;
1764 1764 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1765 1765 cp->cache_bufmax = cp->cache_buftotal;
1766 1766 cp->cache_bufslab += sp->slab_chunks;
1767 1767 }
1768 1768
1769 1769 buf = kmem_slab_alloc_impl(cp, sp, B_TRUE);
1770 1770 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1771 1771 (cp->cache_complete_slab_count +
1772 1772 avl_numnodes(&cp->cache_partial_slabs) +
1773 1773 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1774 1774 mutex_exit(&cp->cache_lock);
1775 1775
1776 1776 if (test_destructor && cp->cache_destructor != NULL) {
1777 1777 /*
1778 1778 * On the first kmem_slab_alloc(), assert that it is valid to
1779 1779 * call the destructor on a newly constructed object without any
1780 1780 * client involvement.
1781 1781 */
1782 1782 if ((cp->cache_constructor == NULL) ||
1783 1783 cp->cache_constructor(buf, cp->cache_private,
1784 1784 kmflag) == 0) {
1785 1785 cp->cache_destructor(buf, cp->cache_private);
1786 1786 }
1787 1787 copy_pattern(KMEM_UNINITIALIZED_PATTERN, buf,
1788 1788 cp->cache_bufsize);
1789 1789 if (cp->cache_flags & KMF_DEADBEEF) {
1790 1790 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
1791 1791 }
1792 1792 }
1793 1793
1794 1794 return (buf);
1795 1795 }
1796 1796
1797 1797 static void kmem_slab_move_yes(kmem_cache_t *, kmem_slab_t *, void *);
1798 1798
1799 1799 /*
1800 1800 * Free a raw (unconstructed) buffer to cp's slab layer.
1801 1801 */
1802 1802 static void
1803 1803 kmem_slab_free(kmem_cache_t *cp, void *buf)
1804 1804 {
1805 1805 kmem_slab_t *sp;
1806 1806 kmem_bufctl_t *bcp, **prev_bcpp;
1807 1807
1808 1808 ASSERT(buf != NULL);
1809 1809
1810 1810 mutex_enter(&cp->cache_lock);
1811 1811 cp->cache_slab_free++;
1812 1812
1813 1813 if (cp->cache_flags & KMF_HASH) {
1814 1814 /*
1815 1815 * Look up buffer in allocated-address hash table.
1816 1816 */
1817 1817 prev_bcpp = KMEM_HASH(cp, buf);
1818 1818 while ((bcp = *prev_bcpp) != NULL) {
1819 1819 if (bcp->bc_addr == buf) {
1820 1820 *prev_bcpp = bcp->bc_next;
1821 1821 sp = bcp->bc_slab;
1822 1822 break;
1823 1823 }
1824 1824 cp->cache_lookup_depth++;
1825 1825 prev_bcpp = &bcp->bc_next;
1826 1826 }
1827 1827 } else {
1828 1828 bcp = KMEM_BUFCTL(cp, buf);
1829 1829 sp = KMEM_SLAB(cp, buf);
1830 1830 }
1831 1831
1832 1832 if (bcp == NULL || sp->slab_cache != cp || !KMEM_SLAB_MEMBER(sp, buf)) {
1833 1833 mutex_exit(&cp->cache_lock);
1834 1834 kmem_error(KMERR_BADADDR, cp, buf);
1835 1835 return;
1836 1836 }
1837 1837
1838 1838 if (KMEM_SLAB_OFFSET(sp, buf) == sp->slab_stuck_offset) {
1839 1839 /*
1840 1840 * If this is the buffer that prevented the consolidator from
1841 1841 * clearing the slab, we can reset the slab flags now that the
1842 1842 * buffer is freed. (It makes sense to do this in
1843 1843 * kmem_cache_free(), where the client gives up ownership of the
1844 1844 * buffer, but on the hot path the test is too expensive.)
1845 1845 */
1846 1846 kmem_slab_move_yes(cp, sp, buf);
1847 1847 }
1848 1848
1849 1849 if ((cp->cache_flags & (KMF_AUDIT | KMF_BUFTAG)) == KMF_AUDIT) {
1850 1850 if (cp->cache_flags & KMF_CONTENTS)
1851 1851 ((kmem_bufctl_audit_t *)bcp)->bc_contents =
1852 1852 kmem_log_enter(kmem_content_log, buf,
1853 1853 cp->cache_contents);
1854 1854 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
1855 1855 }
1856 1856
1857 1857 bcp->bc_next = sp->slab_head;
1858 1858 sp->slab_head = bcp;
1859 1859
1860 1860 cp->cache_bufslab++;
1861 1861 ASSERT(sp->slab_refcnt >= 1);
1862 1862
1863 1863 if (--sp->slab_refcnt == 0) {
1864 1864 /*
1865 1865 * There are no outstanding allocations from this slab,
1866 1866 * so we can reclaim the memory.
1867 1867 */
1868 1868 if (sp->slab_chunks == 1) {
1869 1869 list_remove(&cp->cache_complete_slabs, sp);
1870 1870 cp->cache_complete_slab_count--;
1871 1871 } else {
1872 1872 avl_remove(&cp->cache_partial_slabs, sp);
1873 1873 }
1874 1874
1875 1875 cp->cache_buftotal -= sp->slab_chunks;
1876 1876 cp->cache_bufslab -= sp->slab_chunks;
1877 1877 /*
1878 1878 * Defer releasing the slab to the virtual memory subsystem
1879 1879 * while there is a pending move callback, since we guarantee
1880 1880 * that buffers passed to the move callback have only been
1881 1881 * touched by kmem or by the client itself. Since the memory
1882 1882 * patterns baddcafe (uninitialized) and deadbeef (freed) both
1883 1883 * set at least one of the two lowest order bits, the client can
1884 1884 * test those bits in the move callback to determine whether or
1885 1885 * not it knows about the buffer (assuming that the client also
1886 1886 * sets one of those low order bits whenever it frees a buffer).
1887 1887 */
1888 1888 if (cp->cache_defrag == NULL ||
1889 1889 (avl_is_empty(&cp->cache_defrag->kmd_moves_pending) &&
1890 1890 !(sp->slab_flags & KMEM_SLAB_MOVE_PENDING))) {
1891 1891 cp->cache_slab_destroy++;
1892 1892 mutex_exit(&cp->cache_lock);
1893 1893 kmem_slab_destroy(cp, sp);
1894 1894 } else {
1895 1895 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
1896 1896 /*
1897 1897 * Slabs are inserted at both ends of the deadlist to
1898 1898 * distinguish between slabs freed while move callbacks
1899 1899 * are pending (list head) and a slab freed while the
1900 1900 * lock is dropped in kmem_move_buffers() (list tail) so
1901 1901 * that in both cases slab_destroy() is called from the
1902 1902 * right context.
1903 1903 */
1904 1904 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
1905 1905 list_insert_tail(deadlist, sp);
1906 1906 } else {
1907 1907 list_insert_head(deadlist, sp);
1908 1908 }
1909 1909 cp->cache_defrag->kmd_deadcount++;
1910 1910 mutex_exit(&cp->cache_lock);
1911 1911 }
1912 1912 return;
1913 1913 }
1914 1914
1915 1915 if (bcp->bc_next == NULL) {
1916 1916 /* Transition the slab from completely allocated to partial. */
1917 1917 ASSERT(sp->slab_refcnt == (sp->slab_chunks - 1));
1918 1918 ASSERT(sp->slab_chunks > 1);
1919 1919 list_remove(&cp->cache_complete_slabs, sp);
1920 1920 cp->cache_complete_slab_count--;
1921 1921 avl_add(&cp->cache_partial_slabs, sp);
1922 1922 } else {
1923 1923 #ifdef DEBUG
1924 1924 if (avl_update_gt(&cp->cache_partial_slabs, sp)) {
1925 1925 KMEM_STAT_ADD(kmem_move_stats.kms_avl_update);
1926 1926 } else {
1927 1927 KMEM_STAT_ADD(kmem_move_stats.kms_avl_noupdate);
1928 1928 }
1929 1929 #else
1930 1930 (void) avl_update_gt(&cp->cache_partial_slabs, sp);
1931 1931 #endif
1932 1932 }
1933 1933
1934 1934 ASSERT((cp->cache_slab_create - cp->cache_slab_destroy) ==
1935 1935 (cp->cache_complete_slab_count +
1936 1936 avl_numnodes(&cp->cache_partial_slabs) +
1937 1937 (cp->cache_defrag == NULL ? 0 : cp->cache_defrag->kmd_deadcount)));
1938 1938 mutex_exit(&cp->cache_lock);
1939 1939 }
1940 1940
1941 1941 /*
1942 1942 * Return -1 if kmem_error, 1 if constructor fails, 0 if successful.
1943 1943 */
1944 1944 static int
1945 1945 kmem_cache_alloc_debug(kmem_cache_t *cp, void *buf, int kmflag, int construct,
1946 1946 caddr_t caller)
1947 1947 {
1948 1948 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
1949 1949 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
1950 1950 uint32_t mtbf;
1951 1951
1952 1952 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
1953 1953 kmem_error(KMERR_BADBUFTAG, cp, buf);
1954 1954 return (-1);
1955 1955 }
1956 1956
1957 1957 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_ALLOC;
1958 1958
1959 1959 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
1960 1960 kmem_error(KMERR_BADBUFCTL, cp, buf);
1961 1961 return (-1);
1962 1962 }
1963 1963
1964 1964 if (cp->cache_flags & KMF_DEADBEEF) {
1965 1965 if (!construct && (cp->cache_flags & KMF_LITE)) {
1966 1966 if (*(uint64_t *)buf != KMEM_FREE_PATTERN) {
1967 1967 kmem_error(KMERR_MODIFIED, cp, buf);
1968 1968 return (-1);
1969 1969 }
1970 1970 if (cp->cache_constructor != NULL)
1971 1971 *(uint64_t *)buf = btp->bt_redzone;
1972 1972 else
1973 1973 *(uint64_t *)buf = KMEM_UNINITIALIZED_PATTERN;
1974 1974 } else {
1975 1975 construct = 1;
1976 1976 if (verify_and_copy_pattern(KMEM_FREE_PATTERN,
1977 1977 KMEM_UNINITIALIZED_PATTERN, buf,
1978 1978 cp->cache_verify)) {
1979 1979 kmem_error(KMERR_MODIFIED, cp, buf);
1980 1980 return (-1);
1981 1981 }
1982 1982 }
1983 1983 }
1984 1984 btp->bt_redzone = KMEM_REDZONE_PATTERN;
1985 1985
1986 1986 if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1987 1987 gethrtime() % mtbf == 0 &&
↓ open down ↓ |
348 lines elided |
↑ open up ↑ |
1988 1988 (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1989 1989 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1990 1990 if (!construct && cp->cache_destructor != NULL)
1991 1991 cp->cache_destructor(buf, cp->cache_private);
1992 1992 } else {
1993 1993 mtbf = 0;
1994 1994 }
1995 1995
1996 1996 if (mtbf || (construct && cp->cache_constructor != NULL &&
1997 1997 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1998 - atomic_add_64(&cp->cache_alloc_fail, 1);
1998 + atomic_inc_64(&cp->cache_alloc_fail);
1999 1999 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2000 2000 if (cp->cache_flags & KMF_DEADBEEF)
2001 2001 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2002 2002 kmem_slab_free(cp, buf);
2003 2003 return (1);
2004 2004 }
2005 2005
2006 2006 if (cp->cache_flags & KMF_AUDIT) {
2007 2007 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2008 2008 }
2009 2009
2010 2010 if ((cp->cache_flags & KMF_LITE) &&
2011 2011 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2012 2012 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2013 2013 }
2014 2014
2015 2015 return (0);
2016 2016 }
2017 2017
2018 2018 static int
2019 2019 kmem_cache_free_debug(kmem_cache_t *cp, void *buf, caddr_t caller)
2020 2020 {
2021 2021 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2022 2022 kmem_bufctl_audit_t *bcp = (kmem_bufctl_audit_t *)btp->bt_bufctl;
2023 2023 kmem_slab_t *sp;
2024 2024
2025 2025 if (btp->bt_bxstat != ((intptr_t)bcp ^ KMEM_BUFTAG_ALLOC)) {
2026 2026 if (btp->bt_bxstat == ((intptr_t)bcp ^ KMEM_BUFTAG_FREE)) {
2027 2027 kmem_error(KMERR_DUPFREE, cp, buf);
2028 2028 return (-1);
2029 2029 }
2030 2030 sp = kmem_findslab(cp, buf);
2031 2031 if (sp == NULL || sp->slab_cache != cp)
2032 2032 kmem_error(KMERR_BADADDR, cp, buf);
2033 2033 else
2034 2034 kmem_error(KMERR_REDZONE, cp, buf);
2035 2035 return (-1);
2036 2036 }
2037 2037
2038 2038 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2039 2039
2040 2040 if ((cp->cache_flags & KMF_HASH) && bcp->bc_addr != buf) {
2041 2041 kmem_error(KMERR_BADBUFCTL, cp, buf);
2042 2042 return (-1);
2043 2043 }
2044 2044
2045 2045 if (btp->bt_redzone != KMEM_REDZONE_PATTERN) {
2046 2046 kmem_error(KMERR_REDZONE, cp, buf);
2047 2047 return (-1);
2048 2048 }
2049 2049
2050 2050 if (cp->cache_flags & KMF_AUDIT) {
2051 2051 if (cp->cache_flags & KMF_CONTENTS)
2052 2052 bcp->bc_contents = kmem_log_enter(kmem_content_log,
2053 2053 buf, cp->cache_contents);
2054 2054 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2055 2055 }
2056 2056
2057 2057 if ((cp->cache_flags & KMF_LITE) &&
2058 2058 !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2059 2059 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2060 2060 }
2061 2061
2062 2062 if (cp->cache_flags & KMF_DEADBEEF) {
2063 2063 if (cp->cache_flags & KMF_LITE)
2064 2064 btp->bt_redzone = *(uint64_t *)buf;
2065 2065 else if (cp->cache_destructor != NULL)
2066 2066 cp->cache_destructor(buf, cp->cache_private);
2067 2067
2068 2068 copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2069 2069 }
2070 2070
2071 2071 return (0);
2072 2072 }
2073 2073
2074 2074 /*
2075 2075 * Free each object in magazine mp to cp's slab layer, and free mp itself.
2076 2076 */
2077 2077 static void
2078 2078 kmem_magazine_destroy(kmem_cache_t *cp, kmem_magazine_t *mp, int nrounds)
2079 2079 {
2080 2080 int round;
2081 2081
2082 2082 ASSERT(!list_link_active(&cp->cache_link) ||
2083 2083 taskq_member(kmem_taskq, curthread));
2084 2084
2085 2085 for (round = 0; round < nrounds; round++) {
2086 2086 void *buf = mp->mag_round[round];
2087 2087
2088 2088 if (cp->cache_flags & KMF_DEADBEEF) {
2089 2089 if (verify_pattern(KMEM_FREE_PATTERN, buf,
2090 2090 cp->cache_verify) != NULL) {
2091 2091 kmem_error(KMERR_MODIFIED, cp, buf);
2092 2092 continue;
2093 2093 }
2094 2094 if ((cp->cache_flags & KMF_LITE) &&
2095 2095 cp->cache_destructor != NULL) {
2096 2096 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2097 2097 *(uint64_t *)buf = btp->bt_redzone;
2098 2098 cp->cache_destructor(buf, cp->cache_private);
2099 2099 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2100 2100 }
2101 2101 } else if (cp->cache_destructor != NULL) {
2102 2102 cp->cache_destructor(buf, cp->cache_private);
2103 2103 }
2104 2104
2105 2105 kmem_slab_free(cp, buf);
2106 2106 }
2107 2107 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2108 2108 kmem_cache_free(cp->cache_magtype->mt_cache, mp);
2109 2109 }
2110 2110
2111 2111 /*
2112 2112 * Allocate a magazine from the depot.
2113 2113 */
2114 2114 static kmem_magazine_t *
2115 2115 kmem_depot_alloc(kmem_cache_t *cp, kmem_maglist_t *mlp)
2116 2116 {
2117 2117 kmem_magazine_t *mp;
2118 2118
2119 2119 /*
2120 2120 * If we can't get the depot lock without contention,
2121 2121 * update our contention count. We use the depot
2122 2122 * contention rate to determine whether we need to
2123 2123 * increase the magazine size for better scalability.
2124 2124 */
2125 2125 if (!mutex_tryenter(&cp->cache_depot_lock)) {
2126 2126 mutex_enter(&cp->cache_depot_lock);
2127 2127 cp->cache_depot_contention++;
2128 2128 }
2129 2129
2130 2130 if ((mp = mlp->ml_list) != NULL) {
2131 2131 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2132 2132 mlp->ml_list = mp->mag_next;
2133 2133 if (--mlp->ml_total < mlp->ml_min)
2134 2134 mlp->ml_min = mlp->ml_total;
2135 2135 mlp->ml_alloc++;
2136 2136 }
2137 2137
2138 2138 mutex_exit(&cp->cache_depot_lock);
2139 2139
2140 2140 return (mp);
2141 2141 }
2142 2142
2143 2143 /*
2144 2144 * Free a magazine to the depot.
2145 2145 */
2146 2146 static void
2147 2147 kmem_depot_free(kmem_cache_t *cp, kmem_maglist_t *mlp, kmem_magazine_t *mp)
2148 2148 {
2149 2149 mutex_enter(&cp->cache_depot_lock);
2150 2150 ASSERT(KMEM_MAGAZINE_VALID(cp, mp));
2151 2151 mp->mag_next = mlp->ml_list;
2152 2152 mlp->ml_list = mp;
2153 2153 mlp->ml_total++;
2154 2154 mutex_exit(&cp->cache_depot_lock);
2155 2155 }
2156 2156
2157 2157 /*
2158 2158 * Update the working set statistics for cp's depot.
2159 2159 */
2160 2160 static void
2161 2161 kmem_depot_ws_update(kmem_cache_t *cp)
2162 2162 {
2163 2163 mutex_enter(&cp->cache_depot_lock);
2164 2164 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
2165 2165 cp->cache_full.ml_min = cp->cache_full.ml_total;
2166 2166 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
2167 2167 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
2168 2168 mutex_exit(&cp->cache_depot_lock);
2169 2169 }
2170 2170
2171 2171 /*
2172 2172 * Reap all magazines that have fallen out of the depot's working set.
2173 2173 */
2174 2174 static void
2175 2175 kmem_depot_ws_reap(kmem_cache_t *cp)
2176 2176 {
2177 2177 long reap;
2178 2178 kmem_magazine_t *mp;
2179 2179
2180 2180 ASSERT(!list_link_active(&cp->cache_link) ||
2181 2181 taskq_member(kmem_taskq, curthread));
2182 2182
2183 2183 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
2184 2184 while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_full)) != NULL)
2185 2185 kmem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
2186 2186
2187 2187 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
2188 2188 while (reap-- && (mp = kmem_depot_alloc(cp, &cp->cache_empty)) != NULL)
2189 2189 kmem_magazine_destroy(cp, mp, 0);
2190 2190 }
2191 2191
2192 2192 static void
2193 2193 kmem_cpu_reload(kmem_cpu_cache_t *ccp, kmem_magazine_t *mp, int rounds)
2194 2194 {
2195 2195 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
2196 2196 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
2197 2197 ASSERT(ccp->cc_magsize > 0);
2198 2198
2199 2199 ccp->cc_ploaded = ccp->cc_loaded;
2200 2200 ccp->cc_prounds = ccp->cc_rounds;
2201 2201 ccp->cc_loaded = mp;
2202 2202 ccp->cc_rounds = rounds;
2203 2203 }
2204 2204
2205 2205 /*
2206 2206 * Intercept kmem alloc/free calls during crash dump in order to avoid
2207 2207 * changing kmem state while memory is being saved to the dump device.
2208 2208 * Otherwise, ::kmem_verify will report "corrupt buffers". Note that
2209 2209 * there are no locks because only one CPU calls kmem during a crash
2210 2210 * dump. To enable this feature, first create the associated vmem
2211 2211 * arena with VMC_DUMPSAFE.
2212 2212 */
2213 2213 static void *kmem_dump_start; /* start of pre-reserved heap */
2214 2214 static void *kmem_dump_end; /* end of heap area */
2215 2215 static void *kmem_dump_curr; /* current free heap pointer */
2216 2216 static size_t kmem_dump_size; /* size of heap area */
2217 2217
2218 2218 /* append to each buf created in the pre-reserved heap */
2219 2219 typedef struct kmem_dumpctl {
2220 2220 void *kdc_next; /* cache dump free list linkage */
2221 2221 } kmem_dumpctl_t;
2222 2222
2223 2223 #define KMEM_DUMPCTL(cp, buf) \
2224 2224 ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2225 2225 sizeof (void *)))
2226 2226
2227 2227 /* Keep some simple stats. */
2228 2228 #define KMEM_DUMP_LOGS (100)
2229 2229
2230 2230 typedef struct kmem_dump_log {
2231 2231 kmem_cache_t *kdl_cache;
2232 2232 uint_t kdl_allocs; /* # of dump allocations */
2233 2233 uint_t kdl_frees; /* # of dump frees */
2234 2234 uint_t kdl_alloc_fails; /* # of allocation failures */
2235 2235 uint_t kdl_free_nondump; /* # of non-dump frees */
2236 2236 uint_t kdl_unsafe; /* cache was used, but unsafe */
2237 2237 } kmem_dump_log_t;
2238 2238
2239 2239 static kmem_dump_log_t *kmem_dump_log;
2240 2240 static int kmem_dump_log_idx;
2241 2241
2242 2242 #define KDI_LOG(cp, stat) { \
2243 2243 kmem_dump_log_t *kdl; \
2244 2244 if ((kdl = (kmem_dump_log_t *)((cp)->cache_dumplog)) != NULL) { \
2245 2245 kdl->stat++; \
2246 2246 } else if (kmem_dump_log_idx < KMEM_DUMP_LOGS) { \
2247 2247 kdl = &kmem_dump_log[kmem_dump_log_idx++]; \
2248 2248 kdl->stat++; \
2249 2249 kdl->kdl_cache = (cp); \
2250 2250 (cp)->cache_dumplog = kdl; \
2251 2251 } \
2252 2252 }
2253 2253
2254 2254 /* set non zero for full report */
2255 2255 uint_t kmem_dump_verbose = 0;
2256 2256
2257 2257 /* stats for overize heap */
2258 2258 uint_t kmem_dump_oversize_allocs = 0;
2259 2259 uint_t kmem_dump_oversize_max = 0;
2260 2260
2261 2261 static void
2262 2262 kmem_dumppr(char **pp, char *e, const char *format, ...)
2263 2263 {
2264 2264 char *p = *pp;
2265 2265
2266 2266 if (p < e) {
2267 2267 int n;
2268 2268 va_list ap;
2269 2269
2270 2270 va_start(ap, format);
2271 2271 n = vsnprintf(p, e - p, format, ap);
2272 2272 va_end(ap);
2273 2273 *pp = p + n;
2274 2274 }
2275 2275 }
2276 2276
2277 2277 /*
2278 2278 * Called when dumpadm(1M) configures dump parameters.
2279 2279 */
2280 2280 void
2281 2281 kmem_dump_init(size_t size)
2282 2282 {
2283 2283 if (kmem_dump_start != NULL)
2284 2284 kmem_free(kmem_dump_start, kmem_dump_size);
2285 2285
2286 2286 if (kmem_dump_log == NULL)
2287 2287 kmem_dump_log = (kmem_dump_log_t *)kmem_zalloc(KMEM_DUMP_LOGS *
2288 2288 sizeof (kmem_dump_log_t), KM_SLEEP);
2289 2289
2290 2290 kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2291 2291
2292 2292 if (kmem_dump_start != NULL) {
2293 2293 kmem_dump_size = size;
2294 2294 kmem_dump_curr = kmem_dump_start;
2295 2295 kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2296 2296 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2297 2297 } else {
2298 2298 kmem_dump_size = 0;
2299 2299 kmem_dump_curr = NULL;
2300 2300 kmem_dump_end = NULL;
2301 2301 }
2302 2302 }
2303 2303
2304 2304 /*
2305 2305 * Set flag for each kmem_cache_t if is safe to use alternate dump
2306 2306 * memory. Called just before panic crash dump starts. Set the flag
2307 2307 * for the calling CPU.
2308 2308 */
2309 2309 void
2310 2310 kmem_dump_begin(void)
2311 2311 {
2312 2312 ASSERT(panicstr != NULL);
2313 2313 if (kmem_dump_start != NULL) {
2314 2314 kmem_cache_t *cp;
2315 2315
2316 2316 for (cp = list_head(&kmem_caches); cp != NULL;
2317 2317 cp = list_next(&kmem_caches, cp)) {
2318 2318 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2319 2319
2320 2320 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2321 2321 cp->cache_flags |= KMF_DUMPDIVERT;
2322 2322 ccp->cc_flags |= KMF_DUMPDIVERT;
2323 2323 ccp->cc_dump_rounds = ccp->cc_rounds;
2324 2324 ccp->cc_dump_prounds = ccp->cc_prounds;
2325 2325 ccp->cc_rounds = ccp->cc_prounds = -1;
2326 2326 } else {
2327 2327 cp->cache_flags |= KMF_DUMPUNSAFE;
2328 2328 ccp->cc_flags |= KMF_DUMPUNSAFE;
2329 2329 }
2330 2330 }
2331 2331 }
2332 2332 }
2333 2333
2334 2334 /*
2335 2335 * finished dump intercept
2336 2336 * print any warnings on the console
2337 2337 * return verbose information to dumpsys() in the given buffer
2338 2338 */
2339 2339 size_t
2340 2340 kmem_dump_finish(char *buf, size_t size)
2341 2341 {
2342 2342 int kdi_idx;
2343 2343 int kdi_end = kmem_dump_log_idx;
2344 2344 int percent = 0;
2345 2345 int header = 0;
2346 2346 int warn = 0;
2347 2347 size_t used;
2348 2348 kmem_cache_t *cp;
2349 2349 kmem_dump_log_t *kdl;
2350 2350 char *e = buf + size;
2351 2351 char *p = buf;
2352 2352
2353 2353 if (kmem_dump_size == 0 || kmem_dump_verbose == 0)
2354 2354 return (0);
2355 2355
2356 2356 used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2357 2357 percent = (used * 100) / kmem_dump_size;
2358 2358
2359 2359 kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2360 2360 kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2361 2361 kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2362 2362 kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2363 2363 kmem_dump_oversize_allocs);
2364 2364 kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2365 2365 kmem_dump_oversize_max);
2366 2366
2367 2367 for (kdi_idx = 0; kdi_idx < kdi_end; kdi_idx++) {
2368 2368 kdl = &kmem_dump_log[kdi_idx];
2369 2369 cp = kdl->kdl_cache;
2370 2370 if (cp == NULL)
2371 2371 break;
2372 2372 if (kdl->kdl_alloc_fails)
2373 2373 ++warn;
2374 2374 if (header == 0) {
2375 2375 kmem_dumppr(&p, e,
2376 2376 "Cache Name,Allocs,Frees,Alloc Fails,"
2377 2377 "Nondump Frees,Unsafe Allocs/Frees\n");
2378 2378 header = 1;
2379 2379 }
2380 2380 kmem_dumppr(&p, e, "%s,%d,%d,%d,%d,%d\n",
2381 2381 cp->cache_name, kdl->kdl_allocs, kdl->kdl_frees,
2382 2382 kdl->kdl_alloc_fails, kdl->kdl_free_nondump,
2383 2383 kdl->kdl_unsafe);
2384 2384 }
2385 2385
2386 2386 /* return buffer size used */
2387 2387 if (p < e)
2388 2388 bzero(p, e - p);
2389 2389 return (p - buf);
2390 2390 }
2391 2391
2392 2392 /*
2393 2393 * Allocate a constructed object from alternate dump memory.
2394 2394 */
2395 2395 void *
2396 2396 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2397 2397 {
2398 2398 void *buf;
2399 2399 void *curr;
2400 2400 char *bufend;
2401 2401
2402 2402 /* return a constructed object */
2403 2403 if ((buf = cp->cache_dumpfreelist) != NULL) {
2404 2404 cp->cache_dumpfreelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2405 2405 KDI_LOG(cp, kdl_allocs);
2406 2406 return (buf);
2407 2407 }
2408 2408
2409 2409 /* create a new constructed object */
2410 2410 curr = kmem_dump_curr;
2411 2411 buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2412 2412 bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2413 2413
2414 2414 /* hat layer objects cannot cross a page boundary */
2415 2415 if (cp->cache_align < PAGESIZE) {
2416 2416 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2417 2417 if (bufend > page) {
2418 2418 bufend += page - (char *)buf;
2419 2419 buf = (void *)page;
2420 2420 }
2421 2421 }
2422 2422
2423 2423 /* fall back to normal alloc if reserved area is used up */
2424 2424 if (bufend > (char *)kmem_dump_end) {
2425 2425 kmem_dump_curr = kmem_dump_end;
2426 2426 KDI_LOG(cp, kdl_alloc_fails);
2427 2427 return (NULL);
2428 2428 }
2429 2429
2430 2430 /*
2431 2431 * Must advance curr pointer before calling a constructor that
2432 2432 * may also allocate memory.
2433 2433 */
2434 2434 kmem_dump_curr = bufend;
2435 2435
2436 2436 /* run constructor */
2437 2437 if (cp->cache_constructor != NULL &&
2438 2438 cp->cache_constructor(buf, cp->cache_private, kmflag)
2439 2439 != 0) {
2440 2440 #ifdef DEBUG
2441 2441 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2442 2442 cp->cache_name, (void *)cp);
2443 2443 #endif
2444 2444 /* reset curr pointer iff no allocs were done */
2445 2445 if (kmem_dump_curr == bufend)
2446 2446 kmem_dump_curr = curr;
2447 2447
2448 2448 /* fall back to normal alloc if the constructor fails */
2449 2449 KDI_LOG(cp, kdl_alloc_fails);
2450 2450 return (NULL);
2451 2451 }
2452 2452
2453 2453 KDI_LOG(cp, kdl_allocs);
2454 2454 return (buf);
2455 2455 }
2456 2456
2457 2457 /*
2458 2458 * Free a constructed object in alternate dump memory.
2459 2459 */
2460 2460 int
2461 2461 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2462 2462 {
2463 2463 /* save constructed buffers for next time */
2464 2464 if ((char *)buf >= (char *)kmem_dump_start &&
2465 2465 (char *)buf < (char *)kmem_dump_end) {
2466 2466 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dumpfreelist;
2467 2467 cp->cache_dumpfreelist = buf;
2468 2468 KDI_LOG(cp, kdl_frees);
2469 2469 return (0);
2470 2470 }
2471 2471
2472 2472 /* count all non-dump buf frees */
2473 2473 KDI_LOG(cp, kdl_free_nondump);
2474 2474
2475 2475 /* just drop buffers that were allocated before dump started */
2476 2476 if (kmem_dump_curr < kmem_dump_end)
2477 2477 return (0);
2478 2478
2479 2479 /* fall back to normal free if reserved area is used up */
2480 2480 return (1);
2481 2481 }
2482 2482
2483 2483 /*
2484 2484 * Allocate a constructed object from cache cp.
2485 2485 */
2486 2486 void *
2487 2487 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2488 2488 {
2489 2489 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2490 2490 kmem_magazine_t *fmp;
2491 2491 void *buf;
2492 2492
2493 2493 mutex_enter(&ccp->cc_lock);
2494 2494 for (;;) {
2495 2495 /*
2496 2496 * If there's an object available in the current CPU's
2497 2497 * loaded magazine, just take it and return.
2498 2498 */
2499 2499 if (ccp->cc_rounds > 0) {
2500 2500 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2501 2501 ccp->cc_alloc++;
2502 2502 mutex_exit(&ccp->cc_lock);
2503 2503 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2504 2504 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2505 2505 ASSERT(!(ccp->cc_flags &
2506 2506 KMF_DUMPDIVERT));
2507 2507 KDI_LOG(cp, kdl_unsafe);
2508 2508 }
2509 2509 if ((ccp->cc_flags & KMF_BUFTAG) &&
2510 2510 kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2511 2511 caller()) != 0) {
2512 2512 if (kmflag & KM_NOSLEEP)
2513 2513 return (NULL);
2514 2514 mutex_enter(&ccp->cc_lock);
2515 2515 continue;
2516 2516 }
2517 2517 }
2518 2518 return (buf);
2519 2519 }
2520 2520
2521 2521 /*
2522 2522 * The loaded magazine is empty. If the previously loaded
2523 2523 * magazine was full, exchange them and try again.
2524 2524 */
2525 2525 if (ccp->cc_prounds > 0) {
2526 2526 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2527 2527 continue;
2528 2528 }
2529 2529
2530 2530 /*
2531 2531 * Return an alternate buffer at dump time to preserve
2532 2532 * the heap.
2533 2533 */
2534 2534 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2535 2535 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2536 2536 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2537 2537 /* log it so that we can warn about it */
2538 2538 KDI_LOG(cp, kdl_unsafe);
2539 2539 } else {
2540 2540 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2541 2541 NULL) {
2542 2542 mutex_exit(&ccp->cc_lock);
2543 2543 return (buf);
2544 2544 }
2545 2545 break; /* fall back to slab layer */
2546 2546 }
2547 2547 }
2548 2548
2549 2549 /*
2550 2550 * If the magazine layer is disabled, break out now.
2551 2551 */
2552 2552 if (ccp->cc_magsize == 0)
2553 2553 break;
2554 2554
2555 2555 /*
2556 2556 * Try to get a full magazine from the depot.
2557 2557 */
2558 2558 fmp = kmem_depot_alloc(cp, &cp->cache_full);
2559 2559 if (fmp != NULL) {
2560 2560 if (ccp->cc_ploaded != NULL)
2561 2561 kmem_depot_free(cp, &cp->cache_empty,
2562 2562 ccp->cc_ploaded);
2563 2563 kmem_cpu_reload(ccp, fmp, ccp->cc_magsize);
2564 2564 continue;
2565 2565 }
2566 2566
2567 2567 /*
2568 2568 * There are no full magazines in the depot,
2569 2569 * so fall through to the slab layer.
2570 2570 */
2571 2571 break;
2572 2572 }
2573 2573 mutex_exit(&ccp->cc_lock);
2574 2574
2575 2575 /*
2576 2576 * We couldn't allocate a constructed object from the magazine layer,
2577 2577 * so get a raw buffer from the slab layer and apply its constructor.
2578 2578 */
2579 2579 buf = kmem_slab_alloc(cp, kmflag);
2580 2580
2581 2581 if (buf == NULL)
2582 2582 return (NULL);
2583 2583
2584 2584 if (cp->cache_flags & KMF_BUFTAG) {
2585 2585 /*
2586 2586 * Make kmem_cache_alloc_debug() apply the constructor for us.
2587 2587 */
2588 2588 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2589 2589 if (rc != 0) {
2590 2590 if (kmflag & KM_NOSLEEP)
2591 2591 return (NULL);
2592 2592 /*
2593 2593 * kmem_cache_alloc_debug() detected corruption
2594 2594 * but didn't panic (kmem_panic <= 0). We should not be
2595 2595 * here because the constructor failed (indicated by a
↓ open down ↓ |
587 lines elided |
↑ open up ↑ |
2596 2596 * return code of 1). Try again.
2597 2597 */
2598 2598 ASSERT(rc == -1);
2599 2599 return (kmem_cache_alloc(cp, kmflag));
2600 2600 }
2601 2601 return (buf);
2602 2602 }
2603 2603
2604 2604 if (cp->cache_constructor != NULL &&
2605 2605 cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2606 - atomic_add_64(&cp->cache_alloc_fail, 1);
2606 + atomic_inc_64(&cp->cache_alloc_fail);
2607 2607 kmem_slab_free(cp, buf);
2608 2608 return (NULL);
2609 2609 }
2610 2610
2611 2611 return (buf);
2612 2612 }
2613 2613
2614 2614 /*
2615 2615 * The freed argument tells whether or not kmem_cache_free_debug() has already
2616 2616 * been called so that we can avoid the duplicate free error. For example, a
2617 2617 * buffer on a magazine has already been freed by the client but is still
2618 2618 * constructed.
2619 2619 */
2620 2620 static void
2621 2621 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2622 2622 {
2623 2623 if (!freed && (cp->cache_flags & KMF_BUFTAG))
2624 2624 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2625 2625 return;
2626 2626
2627 2627 /*
2628 2628 * Note that if KMF_DEADBEEF is in effect and KMF_LITE is not,
2629 2629 * kmem_cache_free_debug() will have already applied the destructor.
2630 2630 */
2631 2631 if ((cp->cache_flags & (KMF_DEADBEEF | KMF_LITE)) != KMF_DEADBEEF &&
2632 2632 cp->cache_destructor != NULL) {
2633 2633 if (cp->cache_flags & KMF_DEADBEEF) { /* KMF_LITE implied */
2634 2634 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2635 2635 *(uint64_t *)buf = btp->bt_redzone;
2636 2636 cp->cache_destructor(buf, cp->cache_private);
2637 2637 *(uint64_t *)buf = KMEM_FREE_PATTERN;
2638 2638 } else {
2639 2639 cp->cache_destructor(buf, cp->cache_private);
2640 2640 }
2641 2641 }
2642 2642
2643 2643 kmem_slab_free(cp, buf);
2644 2644 }
2645 2645
2646 2646 /*
2647 2647 * Used when there's no room to free a buffer to the per-CPU cache.
2648 2648 * Drops and re-acquires &ccp->cc_lock, and returns non-zero if the
2649 2649 * caller should try freeing to the per-CPU cache again.
2650 2650 * Note that we don't directly install the magazine in the cpu cache,
2651 2651 * since its state may have changed wildly while the lock was dropped.
2652 2652 */
2653 2653 static int
2654 2654 kmem_cpucache_magazine_alloc(kmem_cpu_cache_t *ccp, kmem_cache_t *cp)
2655 2655 {
2656 2656 kmem_magazine_t *emp;
2657 2657 kmem_magtype_t *mtp;
2658 2658
2659 2659 ASSERT(MUTEX_HELD(&ccp->cc_lock));
2660 2660 ASSERT(((uint_t)ccp->cc_rounds == ccp->cc_magsize ||
2661 2661 ((uint_t)ccp->cc_rounds == -1)) &&
2662 2662 ((uint_t)ccp->cc_prounds == ccp->cc_magsize ||
2663 2663 ((uint_t)ccp->cc_prounds == -1)));
2664 2664
2665 2665 emp = kmem_depot_alloc(cp, &cp->cache_empty);
2666 2666 if (emp != NULL) {
2667 2667 if (ccp->cc_ploaded != NULL)
2668 2668 kmem_depot_free(cp, &cp->cache_full,
2669 2669 ccp->cc_ploaded);
2670 2670 kmem_cpu_reload(ccp, emp, 0);
2671 2671 return (1);
2672 2672 }
2673 2673 /*
2674 2674 * There are no empty magazines in the depot,
2675 2675 * so try to allocate a new one. We must drop all locks
2676 2676 * across kmem_cache_alloc() because lower layers may
2677 2677 * attempt to allocate from this cache.
2678 2678 */
2679 2679 mtp = cp->cache_magtype;
2680 2680 mutex_exit(&ccp->cc_lock);
2681 2681 emp = kmem_cache_alloc(mtp->mt_cache, KM_NOSLEEP);
2682 2682 mutex_enter(&ccp->cc_lock);
2683 2683
2684 2684 if (emp != NULL) {
2685 2685 /*
2686 2686 * We successfully allocated an empty magazine.
2687 2687 * However, we had to drop ccp->cc_lock to do it,
2688 2688 * so the cache's magazine size may have changed.
2689 2689 * If so, free the magazine and try again.
2690 2690 */
2691 2691 if (ccp->cc_magsize != mtp->mt_magsize) {
2692 2692 mutex_exit(&ccp->cc_lock);
2693 2693 kmem_cache_free(mtp->mt_cache, emp);
2694 2694 mutex_enter(&ccp->cc_lock);
2695 2695 return (1);
2696 2696 }
2697 2697
2698 2698 /*
2699 2699 * We got a magazine of the right size. Add it to
2700 2700 * the depot and try the whole dance again.
2701 2701 */
2702 2702 kmem_depot_free(cp, &cp->cache_empty, emp);
2703 2703 return (1);
2704 2704 }
2705 2705
2706 2706 /*
2707 2707 * We couldn't allocate an empty magazine,
2708 2708 * so fall through to the slab layer.
2709 2709 */
2710 2710 return (0);
2711 2711 }
2712 2712
2713 2713 /*
2714 2714 * Free a constructed object to cache cp.
2715 2715 */
2716 2716 void
2717 2717 kmem_cache_free(kmem_cache_t *cp, void *buf)
2718 2718 {
2719 2719 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2720 2720
2721 2721 /*
2722 2722 * The client must not free either of the buffers passed to the move
2723 2723 * callback function.
2724 2724 */
2725 2725 ASSERT(cp->cache_defrag == NULL ||
2726 2726 cp->cache_defrag->kmd_thread != curthread ||
2727 2727 (buf != cp->cache_defrag->kmd_from_buf &&
2728 2728 buf != cp->cache_defrag->kmd_to_buf));
2729 2729
2730 2730 if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2731 2731 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2732 2732 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2733 2733 /* log it so that we can warn about it */
2734 2734 KDI_LOG(cp, kdl_unsafe);
2735 2735 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2736 2736 return;
2737 2737 }
2738 2738 if (ccp->cc_flags & KMF_BUFTAG) {
2739 2739 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2740 2740 return;
2741 2741 }
2742 2742 }
2743 2743
2744 2744 mutex_enter(&ccp->cc_lock);
2745 2745 /*
2746 2746 * Any changes to this logic should be reflected in kmem_slab_prefill()
2747 2747 */
2748 2748 for (;;) {
2749 2749 /*
2750 2750 * If there's a slot available in the current CPU's
2751 2751 * loaded magazine, just put the object there and return.
2752 2752 */
2753 2753 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2754 2754 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
2755 2755 ccp->cc_free++;
2756 2756 mutex_exit(&ccp->cc_lock);
2757 2757 return;
2758 2758 }
2759 2759
2760 2760 /*
2761 2761 * The loaded magazine is full. If the previously loaded
2762 2762 * magazine was empty, exchange them and try again.
2763 2763 */
2764 2764 if (ccp->cc_prounds == 0) {
2765 2765 kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2766 2766 continue;
2767 2767 }
2768 2768
2769 2769 /*
2770 2770 * If the magazine layer is disabled, break out now.
2771 2771 */
2772 2772 if (ccp->cc_magsize == 0)
2773 2773 break;
2774 2774
2775 2775 if (!kmem_cpucache_magazine_alloc(ccp, cp)) {
2776 2776 /*
2777 2777 * We couldn't free our constructed object to the
2778 2778 * magazine layer, so apply its destructor and free it
2779 2779 * to the slab layer.
2780 2780 */
2781 2781 break;
2782 2782 }
2783 2783 }
2784 2784 mutex_exit(&ccp->cc_lock);
2785 2785 kmem_slab_free_constructed(cp, buf, B_TRUE);
2786 2786 }
2787 2787
2788 2788 static void
2789 2789 kmem_slab_prefill(kmem_cache_t *cp, kmem_slab_t *sp)
2790 2790 {
2791 2791 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2792 2792 int cache_flags = cp->cache_flags;
2793 2793
2794 2794 kmem_bufctl_t *next, *head;
2795 2795 size_t nbufs;
2796 2796
2797 2797 /*
2798 2798 * Completely allocate the newly created slab and put the pre-allocated
2799 2799 * buffers in magazines. Any of the buffers that cannot be put in
2800 2800 * magazines must be returned to the slab.
2801 2801 */
2802 2802 ASSERT(MUTEX_HELD(&cp->cache_lock));
2803 2803 ASSERT((cache_flags & (KMF_PREFILL|KMF_BUFTAG)) == KMF_PREFILL);
2804 2804 ASSERT(cp->cache_constructor == NULL);
2805 2805 ASSERT(sp->slab_cache == cp);
2806 2806 ASSERT(sp->slab_refcnt == 1);
2807 2807 ASSERT(sp->slab_head != NULL && sp->slab_chunks > sp->slab_refcnt);
2808 2808 ASSERT(avl_find(&cp->cache_partial_slabs, sp, NULL) == NULL);
2809 2809
2810 2810 head = sp->slab_head;
2811 2811 nbufs = (sp->slab_chunks - sp->slab_refcnt);
2812 2812 sp->slab_head = NULL;
2813 2813 sp->slab_refcnt += nbufs;
2814 2814 cp->cache_bufslab -= nbufs;
2815 2815 cp->cache_slab_alloc += nbufs;
2816 2816 list_insert_head(&cp->cache_complete_slabs, sp);
2817 2817 cp->cache_complete_slab_count++;
2818 2818 mutex_exit(&cp->cache_lock);
2819 2819 mutex_enter(&ccp->cc_lock);
2820 2820
2821 2821 while (head != NULL) {
2822 2822 void *buf = KMEM_BUF(cp, head);
2823 2823 /*
2824 2824 * If there's a slot available in the current CPU's
2825 2825 * loaded magazine, just put the object there and
2826 2826 * continue.
2827 2827 */
2828 2828 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2829 2829 ccp->cc_loaded->mag_round[ccp->cc_rounds++] =
2830 2830 buf;
2831 2831 ccp->cc_free++;
2832 2832 nbufs--;
2833 2833 head = head->bc_next;
2834 2834 continue;
2835 2835 }
2836 2836
2837 2837 /*
2838 2838 * The loaded magazine is full. If the previously
2839 2839 * loaded magazine was empty, exchange them and try
2840 2840 * again.
2841 2841 */
2842 2842 if (ccp->cc_prounds == 0) {
2843 2843 kmem_cpu_reload(ccp, ccp->cc_ploaded,
2844 2844 ccp->cc_prounds);
2845 2845 continue;
2846 2846 }
2847 2847
2848 2848 /*
2849 2849 * If the magazine layer is disabled, break out now.
2850 2850 */
2851 2851
2852 2852 if (ccp->cc_magsize == 0) {
2853 2853 break;
2854 2854 }
2855 2855
2856 2856 if (!kmem_cpucache_magazine_alloc(ccp, cp))
2857 2857 break;
2858 2858 }
2859 2859 mutex_exit(&ccp->cc_lock);
2860 2860 if (nbufs != 0) {
2861 2861 ASSERT(head != NULL);
2862 2862
2863 2863 /*
2864 2864 * If there was a failure, return remaining objects to
2865 2865 * the slab
2866 2866 */
2867 2867 while (head != NULL) {
2868 2868 ASSERT(nbufs != 0);
2869 2869 next = head->bc_next;
2870 2870 head->bc_next = NULL;
2871 2871 kmem_slab_free(cp, KMEM_BUF(cp, head));
2872 2872 head = next;
2873 2873 nbufs--;
2874 2874 }
2875 2875 }
2876 2876 ASSERT(head == NULL);
2877 2877 ASSERT(nbufs == 0);
2878 2878 mutex_enter(&cp->cache_lock);
2879 2879 }
2880 2880
2881 2881 void *
2882 2882 kmem_zalloc(size_t size, int kmflag)
2883 2883 {
2884 2884 size_t index;
2885 2885 void *buf;
2886 2886
2887 2887 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2888 2888 kmem_cache_t *cp = kmem_alloc_table[index];
2889 2889 buf = kmem_cache_alloc(cp, kmflag);
2890 2890 if (buf != NULL) {
2891 2891 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2892 2892 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2893 2893 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2894 2894 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2895 2895
2896 2896 if (cp->cache_flags & KMF_LITE) {
2897 2897 KMEM_BUFTAG_LITE_ENTER(btp,
2898 2898 kmem_lite_count, caller());
2899 2899 }
2900 2900 }
2901 2901 bzero(buf, size);
2902 2902 }
2903 2903 } else {
2904 2904 buf = kmem_alloc(size, kmflag);
2905 2905 if (buf != NULL)
2906 2906 bzero(buf, size);
2907 2907 }
2908 2908 return (buf);
2909 2909 }
2910 2910
2911 2911 void *
2912 2912 kmem_alloc(size_t size, int kmflag)
2913 2913 {
2914 2914 size_t index;
2915 2915 kmem_cache_t *cp;
2916 2916 void *buf;
2917 2917
2918 2918 if ((index = ((size - 1) >> KMEM_ALIGN_SHIFT)) < KMEM_ALLOC_TABLE_MAX) {
2919 2919 cp = kmem_alloc_table[index];
2920 2920 /* fall through to kmem_cache_alloc() */
2921 2921
2922 2922 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2923 2923 kmem_big_alloc_table_max) {
2924 2924 cp = kmem_big_alloc_table[index];
2925 2925 /* fall through to kmem_cache_alloc() */
2926 2926
2927 2927 } else {
2928 2928 if (size == 0)
2929 2929 return (NULL);
2930 2930
2931 2931 buf = vmem_alloc(kmem_oversize_arena, size,
2932 2932 kmflag & KM_VMFLAGS);
2933 2933 if (buf == NULL)
2934 2934 kmem_log_event(kmem_failure_log, NULL, NULL,
2935 2935 (void *)size);
2936 2936 else if (KMEM_DUMP(kmem_slab_cache)) {
2937 2937 /* stats for dump intercept */
2938 2938 kmem_dump_oversize_allocs++;
2939 2939 if (size > kmem_dump_oversize_max)
2940 2940 kmem_dump_oversize_max = size;
2941 2941 }
2942 2942 return (buf);
2943 2943 }
2944 2944
2945 2945 buf = kmem_cache_alloc(cp, kmflag);
2946 2946 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp) && buf != NULL) {
2947 2947 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2948 2948 ((uint8_t *)buf)[size] = KMEM_REDZONE_BYTE;
2949 2949 ((uint32_t *)btp)[1] = KMEM_SIZE_ENCODE(size);
2950 2950
2951 2951 if (cp->cache_flags & KMF_LITE) {
2952 2952 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller());
2953 2953 }
2954 2954 }
2955 2955 return (buf);
2956 2956 }
2957 2957
2958 2958 void
2959 2959 kmem_free(void *buf, size_t size)
2960 2960 {
2961 2961 size_t index;
2962 2962 kmem_cache_t *cp;
2963 2963
2964 2964 if ((index = (size - 1) >> KMEM_ALIGN_SHIFT) < KMEM_ALLOC_TABLE_MAX) {
2965 2965 cp = kmem_alloc_table[index];
2966 2966 /* fall through to kmem_cache_free() */
2967 2967
2968 2968 } else if ((index = ((size - 1) >> KMEM_BIG_SHIFT)) <
2969 2969 kmem_big_alloc_table_max) {
2970 2970 cp = kmem_big_alloc_table[index];
2971 2971 /* fall through to kmem_cache_free() */
2972 2972
2973 2973 } else {
2974 2974 EQUIV(buf == NULL, size == 0);
2975 2975 if (buf == NULL && size == 0)
2976 2976 return;
2977 2977 vmem_free(kmem_oversize_arena, buf, size);
2978 2978 return;
2979 2979 }
2980 2980
2981 2981 if ((cp->cache_flags & KMF_BUFTAG) && !KMEM_DUMP(cp)) {
2982 2982 kmem_buftag_t *btp = KMEM_BUFTAG(cp, buf);
2983 2983 uint32_t *ip = (uint32_t *)btp;
2984 2984 if (ip[1] != KMEM_SIZE_ENCODE(size)) {
2985 2985 if (*(uint64_t *)buf == KMEM_FREE_PATTERN) {
2986 2986 kmem_error(KMERR_DUPFREE, cp, buf);
2987 2987 return;
2988 2988 }
2989 2989 if (KMEM_SIZE_VALID(ip[1])) {
2990 2990 ip[0] = KMEM_SIZE_ENCODE(size);
2991 2991 kmem_error(KMERR_BADSIZE, cp, buf);
2992 2992 } else {
2993 2993 kmem_error(KMERR_REDZONE, cp, buf);
2994 2994 }
2995 2995 return;
2996 2996 }
2997 2997 if (((uint8_t *)buf)[size] != KMEM_REDZONE_BYTE) {
2998 2998 kmem_error(KMERR_REDZONE, cp, buf);
2999 2999 return;
3000 3000 }
3001 3001 btp->bt_redzone = KMEM_REDZONE_PATTERN;
3002 3002 if (cp->cache_flags & KMF_LITE) {
3003 3003 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count,
3004 3004 caller());
3005 3005 }
3006 3006 }
3007 3007 kmem_cache_free(cp, buf);
3008 3008 }
3009 3009
3010 3010 void *
3011 3011 kmem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
3012 3012 {
3013 3013 size_t realsize = size + vmp->vm_quantum;
3014 3014 void *addr;
3015 3015
3016 3016 /*
3017 3017 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
3018 3018 * vm_quantum will cause integer wraparound. Check for this, and
3019 3019 * blow off the firewall page in this case. Note that such a
3020 3020 * giant allocation (the entire kernel address space) can never
3021 3021 * be satisfied, so it will either fail immediately (VM_NOSLEEP)
3022 3022 * or sleep forever (VM_SLEEP). Thus, there is no need for a
3023 3023 * corresponding check in kmem_firewall_va_free().
3024 3024 */
3025 3025 if (realsize < size)
3026 3026 realsize = size;
3027 3027
3028 3028 /*
3029 3029 * While boot still owns resource management, make sure that this
3030 3030 * redzone virtual address allocation is properly accounted for in
3031 3031 * OBPs "virtual-memory" "available" lists because we're
3032 3032 * effectively claiming them for a red zone. If we don't do this,
3033 3033 * the available lists become too fragmented and too large for the
3034 3034 * current boot/kernel memory list interface.
3035 3035 */
3036 3036 addr = vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT);
3037 3037
3038 3038 if (addr != NULL && kvseg.s_base == NULL && realsize != size)
3039 3039 (void) boot_virt_alloc((char *)addr + size, vmp->vm_quantum);
3040 3040
3041 3041 return (addr);
3042 3042 }
3043 3043
3044 3044 void
3045 3045 kmem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
3046 3046 {
3047 3047 ASSERT((kvseg.s_base == NULL ?
3048 3048 va_to_pfn((char *)addr + size) :
3049 3049 hat_getpfnum(kas.a_hat, (caddr_t)addr + size)) == PFN_INVALID);
3050 3050
3051 3051 vmem_free(vmp, addr, size + vmp->vm_quantum);
3052 3052 }
3053 3053
3054 3054 /*
3055 3055 * Try to allocate at least `size' bytes of memory without sleeping or
3056 3056 * panicking. Return actual allocated size in `asize'. If allocation failed,
3057 3057 * try final allocation with sleep or panic allowed.
3058 3058 */
3059 3059 void *
3060 3060 kmem_alloc_tryhard(size_t size, size_t *asize, int kmflag)
3061 3061 {
3062 3062 void *p;
3063 3063
3064 3064 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3065 3065 do {
3066 3066 p = kmem_alloc(*asize, (kmflag | KM_NOSLEEP) & ~KM_PANIC);
3067 3067 if (p != NULL)
3068 3068 return (p);
3069 3069 *asize += KMEM_ALIGN;
3070 3070 } while (*asize <= PAGESIZE);
3071 3071
3072 3072 *asize = P2ROUNDUP(size, KMEM_ALIGN);
3073 3073 return (kmem_alloc(*asize, kmflag));
3074 3074 }
3075 3075
3076 3076 /*
3077 3077 * Reclaim all unused memory from a cache.
3078 3078 */
3079 3079 static void
3080 3080 kmem_cache_reap(kmem_cache_t *cp)
3081 3081 {
3082 3082 ASSERT(taskq_member(kmem_taskq, curthread));
3083 3083 cp->cache_reap++;
3084 3084
3085 3085 /*
3086 3086 * Ask the cache's owner to free some memory if possible.
3087 3087 * The idea is to handle things like the inode cache, which
3088 3088 * typically sits on a bunch of memory that it doesn't truly
3089 3089 * *need*. Reclaim policy is entirely up to the owner; this
3090 3090 * callback is just an advisory plea for help.
3091 3091 */
3092 3092 if (cp->cache_reclaim != NULL) {
3093 3093 long delta;
3094 3094
3095 3095 /*
3096 3096 * Reclaimed memory should be reapable (not included in the
3097 3097 * depot's working set).
3098 3098 */
3099 3099 delta = cp->cache_full.ml_total;
3100 3100 cp->cache_reclaim(cp->cache_private);
3101 3101 delta = cp->cache_full.ml_total - delta;
3102 3102 if (delta > 0) {
3103 3103 mutex_enter(&cp->cache_depot_lock);
3104 3104 cp->cache_full.ml_reaplimit += delta;
3105 3105 cp->cache_full.ml_min += delta;
3106 3106 mutex_exit(&cp->cache_depot_lock);
3107 3107 }
3108 3108 }
3109 3109
3110 3110 kmem_depot_ws_reap(cp);
3111 3111
3112 3112 if (cp->cache_defrag != NULL && !kmem_move_noreap) {
3113 3113 kmem_cache_defrag(cp);
3114 3114 }
3115 3115 }
3116 3116
3117 3117 static void
3118 3118 kmem_reap_timeout(void *flag_arg)
3119 3119 {
3120 3120 uint32_t *flag = (uint32_t *)flag_arg;
3121 3121
3122 3122 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3123 3123 *flag = 0;
3124 3124 }
3125 3125
3126 3126 static void
3127 3127 kmem_reap_done(void *flag)
3128 3128 {
3129 3129 if (!callout_init_done) {
3130 3130 /* can't schedule a timeout at this point */
3131 3131 kmem_reap_timeout(flag);
3132 3132 } else {
3133 3133 (void) timeout(kmem_reap_timeout, flag, kmem_reap_interval);
3134 3134 }
3135 3135 }
3136 3136
3137 3137 static void
3138 3138 kmem_reap_start(void *flag)
3139 3139 {
3140 3140 ASSERT(flag == &kmem_reaping || flag == &kmem_reaping_idspace);
3141 3141
3142 3142 if (flag == &kmem_reaping) {
3143 3143 kmem_cache_applyall(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3144 3144 /*
3145 3145 * if we have segkp under heap, reap segkp cache.
3146 3146 */
3147 3147 if (segkp_fromheap)
3148 3148 segkp_cache_free();
3149 3149 }
3150 3150 else
3151 3151 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3152 3152
3153 3153 /*
3154 3154 * We use taskq_dispatch() to schedule a timeout to clear
3155 3155 * the flag so that kmem_reap() becomes self-throttling:
3156 3156 * we won't reap again until the current reap completes *and*
3157 3157 * at least kmem_reap_interval ticks have elapsed.
3158 3158 */
3159 3159 if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3160 3160 kmem_reap_done(flag);
3161 3161 }
3162 3162
3163 3163 static void
3164 3164 kmem_reap_common(void *flag_arg)
3165 3165 {
3166 3166 uint32_t *flag = (uint32_t *)flag_arg;
3167 3167
3168 3168 if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3169 3169 atomic_cas_32(flag, 0, 1) != 0)
3170 3170 return;
3171 3171
3172 3172 /*
3173 3173 * It may not be kosher to do memory allocation when a reap is called
3174 3174 * is called (for example, if vmem_populate() is in the call chain).
3175 3175 * So we start the reap going with a TQ_NOALLOC dispatch. If the
3176 3176 * dispatch fails, we reset the flag, and the next reap will try again.
3177 3177 */
3178 3178 if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3179 3179 *flag = 0;
3180 3180 }
3181 3181
3182 3182 /*
3183 3183 * Reclaim all unused memory from all caches. Called from the VM system
3184 3184 * when memory gets tight.
3185 3185 */
3186 3186 void
3187 3187 kmem_reap(void)
3188 3188 {
3189 3189 kmem_reap_common(&kmem_reaping);
3190 3190 }
3191 3191
3192 3192 /*
3193 3193 * Reclaim all unused memory from identifier arenas, called when a vmem
3194 3194 * arena not back by memory is exhausted. Since reaping memory-backed caches
3195 3195 * cannot help with identifier exhaustion, we avoid both a large amount of
3196 3196 * work and unwanted side-effects from reclaim callbacks.
3197 3197 */
3198 3198 void
3199 3199 kmem_reap_idspace(void)
3200 3200 {
3201 3201 kmem_reap_common(&kmem_reaping_idspace);
3202 3202 }
3203 3203
3204 3204 /*
3205 3205 * Purge all magazines from a cache and set its magazine limit to zero.
3206 3206 * All calls are serialized by the kmem_taskq lock, except for the final
3207 3207 * call from kmem_cache_destroy().
3208 3208 */
3209 3209 static void
3210 3210 kmem_cache_magazine_purge(kmem_cache_t *cp)
3211 3211 {
3212 3212 kmem_cpu_cache_t *ccp;
3213 3213 kmem_magazine_t *mp, *pmp;
3214 3214 int rounds, prounds, cpu_seqid;
3215 3215
3216 3216 ASSERT(!list_link_active(&cp->cache_link) ||
3217 3217 taskq_member(kmem_taskq, curthread));
3218 3218 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
3219 3219
3220 3220 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3221 3221 ccp = &cp->cache_cpu[cpu_seqid];
3222 3222
3223 3223 mutex_enter(&ccp->cc_lock);
3224 3224 mp = ccp->cc_loaded;
3225 3225 pmp = ccp->cc_ploaded;
3226 3226 rounds = ccp->cc_rounds;
3227 3227 prounds = ccp->cc_prounds;
3228 3228 ccp->cc_loaded = NULL;
3229 3229 ccp->cc_ploaded = NULL;
3230 3230 ccp->cc_rounds = -1;
3231 3231 ccp->cc_prounds = -1;
3232 3232 ccp->cc_magsize = 0;
3233 3233 mutex_exit(&ccp->cc_lock);
3234 3234
3235 3235 if (mp)
3236 3236 kmem_magazine_destroy(cp, mp, rounds);
3237 3237 if (pmp)
3238 3238 kmem_magazine_destroy(cp, pmp, prounds);
3239 3239 }
3240 3240
3241 3241 /*
3242 3242 * Updating the working set statistics twice in a row has the
3243 3243 * effect of setting the working set size to zero, so everything
3244 3244 * is eligible for reaping.
3245 3245 */
3246 3246 kmem_depot_ws_update(cp);
3247 3247 kmem_depot_ws_update(cp);
3248 3248
3249 3249 kmem_depot_ws_reap(cp);
3250 3250 }
3251 3251
3252 3252 /*
3253 3253 * Enable per-cpu magazines on a cache.
3254 3254 */
3255 3255 static void
3256 3256 kmem_cache_magazine_enable(kmem_cache_t *cp)
3257 3257 {
3258 3258 int cpu_seqid;
3259 3259
3260 3260 if (cp->cache_flags & KMF_NOMAGAZINE)
3261 3261 return;
3262 3262
3263 3263 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3264 3264 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3265 3265 mutex_enter(&ccp->cc_lock);
3266 3266 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
3267 3267 mutex_exit(&ccp->cc_lock);
3268 3268 }
3269 3269
3270 3270 }
3271 3271
3272 3272 /*
3273 3273 * Reap (almost) everything right now. See kmem_cache_magazine_purge()
3274 3274 * for explanation of the back-to-back kmem_depot_ws_update() calls.
3275 3275 */
3276 3276 void
3277 3277 kmem_cache_reap_now(kmem_cache_t *cp)
3278 3278 {
3279 3279 ASSERT(list_link_active(&cp->cache_link));
3280 3280
3281 3281 kmem_depot_ws_update(cp);
3282 3282 kmem_depot_ws_update(cp);
3283 3283
3284 3284 (void) taskq_dispatch(kmem_taskq,
3285 3285 (task_func_t *)kmem_depot_ws_reap, cp, TQ_SLEEP);
3286 3286 taskq_wait(kmem_taskq);
3287 3287 }
3288 3288
3289 3289 /*
3290 3290 * Recompute a cache's magazine size. The trade-off is that larger magazines
3291 3291 * provide a higher transfer rate with the depot, while smaller magazines
3292 3292 * reduce memory consumption. Magazine resizing is an expensive operation;
3293 3293 * it should not be done frequently.
3294 3294 *
3295 3295 * Changes to the magazine size are serialized by the kmem_taskq lock.
3296 3296 *
3297 3297 * Note: at present this only grows the magazine size. It might be useful
3298 3298 * to allow shrinkage too.
3299 3299 */
3300 3300 static void
3301 3301 kmem_cache_magazine_resize(kmem_cache_t *cp)
3302 3302 {
3303 3303 kmem_magtype_t *mtp = cp->cache_magtype;
3304 3304
3305 3305 ASSERT(taskq_member(kmem_taskq, curthread));
3306 3306
3307 3307 if (cp->cache_chunksize < mtp->mt_maxbuf) {
3308 3308 kmem_cache_magazine_purge(cp);
3309 3309 mutex_enter(&cp->cache_depot_lock);
3310 3310 cp->cache_magtype = ++mtp;
3311 3311 cp->cache_depot_contention_prev =
3312 3312 cp->cache_depot_contention + INT_MAX;
3313 3313 mutex_exit(&cp->cache_depot_lock);
3314 3314 kmem_cache_magazine_enable(cp);
3315 3315 }
3316 3316 }
3317 3317
3318 3318 /*
3319 3319 * Rescale a cache's hash table, so that the table size is roughly the
3320 3320 * cache size. We want the average lookup time to be extremely small.
3321 3321 */
3322 3322 static void
3323 3323 kmem_hash_rescale(kmem_cache_t *cp)
3324 3324 {
3325 3325 kmem_bufctl_t **old_table, **new_table, *bcp;
3326 3326 size_t old_size, new_size, h;
3327 3327
3328 3328 ASSERT(taskq_member(kmem_taskq, curthread));
3329 3329
3330 3330 new_size = MAX(KMEM_HASH_INITIAL,
3331 3331 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
3332 3332 old_size = cp->cache_hash_mask + 1;
3333 3333
3334 3334 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
3335 3335 return;
3336 3336
3337 3337 new_table = vmem_alloc(kmem_hash_arena, new_size * sizeof (void *),
3338 3338 VM_NOSLEEP);
3339 3339 if (new_table == NULL)
3340 3340 return;
3341 3341 bzero(new_table, new_size * sizeof (void *));
3342 3342
3343 3343 mutex_enter(&cp->cache_lock);
3344 3344
3345 3345 old_size = cp->cache_hash_mask + 1;
3346 3346 old_table = cp->cache_hash_table;
3347 3347
3348 3348 cp->cache_hash_mask = new_size - 1;
3349 3349 cp->cache_hash_table = new_table;
3350 3350 cp->cache_rescale++;
3351 3351
3352 3352 for (h = 0; h < old_size; h++) {
3353 3353 bcp = old_table[h];
3354 3354 while (bcp != NULL) {
3355 3355 void *addr = bcp->bc_addr;
3356 3356 kmem_bufctl_t *next_bcp = bcp->bc_next;
3357 3357 kmem_bufctl_t **hash_bucket = KMEM_HASH(cp, addr);
3358 3358 bcp->bc_next = *hash_bucket;
3359 3359 *hash_bucket = bcp;
3360 3360 bcp = next_bcp;
3361 3361 }
3362 3362 }
3363 3363
3364 3364 mutex_exit(&cp->cache_lock);
3365 3365
3366 3366 vmem_free(kmem_hash_arena, old_table, old_size * sizeof (void *));
3367 3367 }
3368 3368
3369 3369 /*
3370 3370 * Perform periodic maintenance on a cache: hash rescaling, depot working-set
3371 3371 * update, magazine resizing, and slab consolidation.
3372 3372 */
3373 3373 static void
3374 3374 kmem_cache_update(kmem_cache_t *cp)
3375 3375 {
3376 3376 int need_hash_rescale = 0;
3377 3377 int need_magazine_resize = 0;
3378 3378
3379 3379 ASSERT(MUTEX_HELD(&kmem_cache_lock));
3380 3380
3381 3381 /*
3382 3382 * If the cache has become much larger or smaller than its hash table,
3383 3383 * fire off a request to rescale the hash table.
3384 3384 */
3385 3385 mutex_enter(&cp->cache_lock);
3386 3386
3387 3387 if ((cp->cache_flags & KMF_HASH) &&
3388 3388 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
3389 3389 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
3390 3390 cp->cache_hash_mask > KMEM_HASH_INITIAL)))
3391 3391 need_hash_rescale = 1;
3392 3392
3393 3393 mutex_exit(&cp->cache_lock);
3394 3394
3395 3395 /*
3396 3396 * Update the depot working set statistics.
3397 3397 */
3398 3398 kmem_depot_ws_update(cp);
3399 3399
3400 3400 /*
3401 3401 * If there's a lot of contention in the depot,
3402 3402 * increase the magazine size.
3403 3403 */
3404 3404 mutex_enter(&cp->cache_depot_lock);
3405 3405
3406 3406 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
3407 3407 (int)(cp->cache_depot_contention -
3408 3408 cp->cache_depot_contention_prev) > kmem_depot_contention)
3409 3409 need_magazine_resize = 1;
3410 3410
3411 3411 cp->cache_depot_contention_prev = cp->cache_depot_contention;
3412 3412
3413 3413 mutex_exit(&cp->cache_depot_lock);
3414 3414
3415 3415 if (need_hash_rescale)
3416 3416 (void) taskq_dispatch(kmem_taskq,
3417 3417 (task_func_t *)kmem_hash_rescale, cp, TQ_NOSLEEP);
3418 3418
3419 3419 if (need_magazine_resize)
3420 3420 (void) taskq_dispatch(kmem_taskq,
3421 3421 (task_func_t *)kmem_cache_magazine_resize, cp, TQ_NOSLEEP);
3422 3422
3423 3423 if (cp->cache_defrag != NULL)
3424 3424 (void) taskq_dispatch(kmem_taskq,
3425 3425 (task_func_t *)kmem_cache_scan, cp, TQ_NOSLEEP);
3426 3426 }
3427 3427
3428 3428 static void kmem_update(void *);
3429 3429
3430 3430 static void
3431 3431 kmem_update_timeout(void *dummy)
3432 3432 {
3433 3433 (void) timeout(kmem_update, dummy, kmem_reap_interval);
3434 3434 }
3435 3435
3436 3436 static void
3437 3437 kmem_update(void *dummy)
3438 3438 {
3439 3439 kmem_cache_applyall(kmem_cache_update, NULL, TQ_NOSLEEP);
3440 3440
3441 3441 /*
3442 3442 * We use taskq_dispatch() to reschedule the timeout so that
3443 3443 * kmem_update() becomes self-throttling: it won't schedule
3444 3444 * new tasks until all previous tasks have completed.
3445 3445 */
3446 3446 if (!taskq_dispatch(kmem_taskq, kmem_update_timeout, dummy, TQ_NOSLEEP))
3447 3447 kmem_update_timeout(NULL);
3448 3448 }
3449 3449
3450 3450 static int
3451 3451 kmem_cache_kstat_update(kstat_t *ksp, int rw)
3452 3452 {
3453 3453 struct kmem_cache_kstat *kmcp = &kmem_cache_kstat;
3454 3454 kmem_cache_t *cp = ksp->ks_private;
3455 3455 uint64_t cpu_buf_avail;
3456 3456 uint64_t buf_avail = 0;
3457 3457 int cpu_seqid;
3458 3458 long reap;
3459 3459
3460 3460 ASSERT(MUTEX_HELD(&kmem_cache_kstat_lock));
3461 3461
3462 3462 if (rw == KSTAT_WRITE)
3463 3463 return (EACCES);
3464 3464
3465 3465 mutex_enter(&cp->cache_lock);
3466 3466
3467 3467 kmcp->kmc_alloc_fail.value.ui64 = cp->cache_alloc_fail;
3468 3468 kmcp->kmc_alloc.value.ui64 = cp->cache_slab_alloc;
3469 3469 kmcp->kmc_free.value.ui64 = cp->cache_slab_free;
3470 3470 kmcp->kmc_slab_alloc.value.ui64 = cp->cache_slab_alloc;
3471 3471 kmcp->kmc_slab_free.value.ui64 = cp->cache_slab_free;
3472 3472
3473 3473 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3474 3474 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3475 3475
3476 3476 mutex_enter(&ccp->cc_lock);
3477 3477
3478 3478 cpu_buf_avail = 0;
3479 3479 if (ccp->cc_rounds > 0)
3480 3480 cpu_buf_avail += ccp->cc_rounds;
3481 3481 if (ccp->cc_prounds > 0)
3482 3482 cpu_buf_avail += ccp->cc_prounds;
3483 3483
3484 3484 kmcp->kmc_alloc.value.ui64 += ccp->cc_alloc;
3485 3485 kmcp->kmc_free.value.ui64 += ccp->cc_free;
3486 3486 buf_avail += cpu_buf_avail;
3487 3487
3488 3488 mutex_exit(&ccp->cc_lock);
3489 3489 }
3490 3490
3491 3491 mutex_enter(&cp->cache_depot_lock);
3492 3492
3493 3493 kmcp->kmc_depot_alloc.value.ui64 = cp->cache_full.ml_alloc;
3494 3494 kmcp->kmc_depot_free.value.ui64 = cp->cache_empty.ml_alloc;
3495 3495 kmcp->kmc_depot_contention.value.ui64 = cp->cache_depot_contention;
3496 3496 kmcp->kmc_full_magazines.value.ui64 = cp->cache_full.ml_total;
3497 3497 kmcp->kmc_empty_magazines.value.ui64 = cp->cache_empty.ml_total;
3498 3498 kmcp->kmc_magazine_size.value.ui64 =
3499 3499 (cp->cache_flags & KMF_NOMAGAZINE) ?
3500 3500 0 : cp->cache_magtype->mt_magsize;
3501 3501
3502 3502 kmcp->kmc_alloc.value.ui64 += cp->cache_full.ml_alloc;
3503 3503 kmcp->kmc_free.value.ui64 += cp->cache_empty.ml_alloc;
3504 3504 buf_avail += cp->cache_full.ml_total * cp->cache_magtype->mt_magsize;
3505 3505
3506 3506 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
3507 3507 reap = MIN(reap, cp->cache_full.ml_total);
3508 3508
3509 3509 mutex_exit(&cp->cache_depot_lock);
3510 3510
3511 3511 kmcp->kmc_buf_size.value.ui64 = cp->cache_bufsize;
3512 3512 kmcp->kmc_align.value.ui64 = cp->cache_align;
3513 3513 kmcp->kmc_chunk_size.value.ui64 = cp->cache_chunksize;
3514 3514 kmcp->kmc_slab_size.value.ui64 = cp->cache_slabsize;
3515 3515 kmcp->kmc_buf_constructed.value.ui64 = buf_avail;
3516 3516 buf_avail += cp->cache_bufslab;
3517 3517 kmcp->kmc_buf_avail.value.ui64 = buf_avail;
3518 3518 kmcp->kmc_buf_inuse.value.ui64 = cp->cache_buftotal - buf_avail;
3519 3519 kmcp->kmc_buf_total.value.ui64 = cp->cache_buftotal;
3520 3520 kmcp->kmc_buf_max.value.ui64 = cp->cache_bufmax;
3521 3521 kmcp->kmc_slab_create.value.ui64 = cp->cache_slab_create;
3522 3522 kmcp->kmc_slab_destroy.value.ui64 = cp->cache_slab_destroy;
3523 3523 kmcp->kmc_hash_size.value.ui64 = (cp->cache_flags & KMF_HASH) ?
3524 3524 cp->cache_hash_mask + 1 : 0;
3525 3525 kmcp->kmc_hash_lookup_depth.value.ui64 = cp->cache_lookup_depth;
3526 3526 kmcp->kmc_hash_rescale.value.ui64 = cp->cache_rescale;
3527 3527 kmcp->kmc_vmem_source.value.ui64 = cp->cache_arena->vm_id;
3528 3528 kmcp->kmc_reap.value.ui64 = cp->cache_reap;
3529 3529
3530 3530 if (cp->cache_defrag == NULL) {
3531 3531 kmcp->kmc_move_callbacks.value.ui64 = 0;
3532 3532 kmcp->kmc_move_yes.value.ui64 = 0;
3533 3533 kmcp->kmc_move_no.value.ui64 = 0;
3534 3534 kmcp->kmc_move_later.value.ui64 = 0;
3535 3535 kmcp->kmc_move_dont_need.value.ui64 = 0;
3536 3536 kmcp->kmc_move_dont_know.value.ui64 = 0;
3537 3537 kmcp->kmc_move_hunt_found.value.ui64 = 0;
3538 3538 kmcp->kmc_move_slabs_freed.value.ui64 = 0;
3539 3539 kmcp->kmc_defrag.value.ui64 = 0;
3540 3540 kmcp->kmc_scan.value.ui64 = 0;
3541 3541 kmcp->kmc_move_reclaimable.value.ui64 = 0;
3542 3542 } else {
3543 3543 int64_t reclaimable;
3544 3544
3545 3545 kmem_defrag_t *kd = cp->cache_defrag;
3546 3546 kmcp->kmc_move_callbacks.value.ui64 = kd->kmd_callbacks;
3547 3547 kmcp->kmc_move_yes.value.ui64 = kd->kmd_yes;
3548 3548 kmcp->kmc_move_no.value.ui64 = kd->kmd_no;
3549 3549 kmcp->kmc_move_later.value.ui64 = kd->kmd_later;
3550 3550 kmcp->kmc_move_dont_need.value.ui64 = kd->kmd_dont_need;
3551 3551 kmcp->kmc_move_dont_know.value.ui64 = kd->kmd_dont_know;
3552 3552 kmcp->kmc_move_hunt_found.value.ui64 = kd->kmd_hunt_found;
3553 3553 kmcp->kmc_move_slabs_freed.value.ui64 = kd->kmd_slabs_freed;
3554 3554 kmcp->kmc_defrag.value.ui64 = kd->kmd_defrags;
3555 3555 kmcp->kmc_scan.value.ui64 = kd->kmd_scans;
3556 3556
3557 3557 reclaimable = cp->cache_bufslab - (cp->cache_maxchunks - 1);
3558 3558 reclaimable = MAX(reclaimable, 0);
3559 3559 reclaimable += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
3560 3560 kmcp->kmc_move_reclaimable.value.ui64 = reclaimable;
3561 3561 }
3562 3562
3563 3563 mutex_exit(&cp->cache_lock);
3564 3564 return (0);
3565 3565 }
3566 3566
3567 3567 /*
3568 3568 * Return a named statistic about a particular cache.
3569 3569 * This shouldn't be called very often, so it's currently designed for
3570 3570 * simplicity (leverages existing kstat support) rather than efficiency.
3571 3571 */
3572 3572 uint64_t
3573 3573 kmem_cache_stat(kmem_cache_t *cp, char *name)
3574 3574 {
3575 3575 int i;
3576 3576 kstat_t *ksp = cp->cache_kstat;
3577 3577 kstat_named_t *knp = (kstat_named_t *)&kmem_cache_kstat;
3578 3578 uint64_t value = 0;
3579 3579
3580 3580 if (ksp != NULL) {
3581 3581 mutex_enter(&kmem_cache_kstat_lock);
3582 3582 (void) kmem_cache_kstat_update(ksp, KSTAT_READ);
3583 3583 for (i = 0; i < ksp->ks_ndata; i++) {
3584 3584 if (strcmp(knp[i].name, name) == 0) {
3585 3585 value = knp[i].value.ui64;
3586 3586 break;
3587 3587 }
3588 3588 }
3589 3589 mutex_exit(&kmem_cache_kstat_lock);
3590 3590 }
3591 3591 return (value);
3592 3592 }
3593 3593
3594 3594 /*
3595 3595 * Return an estimate of currently available kernel heap memory.
3596 3596 * On 32-bit systems, physical memory may exceed virtual memory,
3597 3597 * we just truncate the result at 1GB.
3598 3598 */
3599 3599 size_t
3600 3600 kmem_avail(void)
3601 3601 {
3602 3602 spgcnt_t rmem = availrmem - tune.t_minarmem;
3603 3603 spgcnt_t fmem = freemem - minfree;
3604 3604
3605 3605 return ((size_t)ptob(MIN(MAX(MIN(rmem, fmem), 0),
3606 3606 1 << (30 - PAGESHIFT))));
3607 3607 }
3608 3608
3609 3609 /*
3610 3610 * Return the maximum amount of memory that is (in theory) allocatable
3611 3611 * from the heap. This may be used as an estimate only since there
3612 3612 * is no guarentee this space will still be available when an allocation
3613 3613 * request is made, nor that the space may be allocated in one big request
3614 3614 * due to kernel heap fragmentation.
3615 3615 */
3616 3616 size_t
3617 3617 kmem_maxavail(void)
3618 3618 {
3619 3619 spgcnt_t pmem = availrmem - tune.t_minarmem;
3620 3620 spgcnt_t vmem = btop(vmem_size(heap_arena, VMEM_FREE));
3621 3621
3622 3622 return ((size_t)ptob(MAX(MIN(pmem, vmem), 0)));
3623 3623 }
3624 3624
3625 3625 /*
3626 3626 * Indicate whether memory-intensive kmem debugging is enabled.
3627 3627 */
3628 3628 int
3629 3629 kmem_debugging(void)
3630 3630 {
3631 3631 return (kmem_flags & (KMF_AUDIT | KMF_REDZONE));
3632 3632 }
3633 3633
3634 3634 /* binning function, sorts finely at the two extremes */
3635 3635 #define KMEM_PARTIAL_SLAB_WEIGHT(sp, binshift) \
3636 3636 ((((sp)->slab_refcnt <= (binshift)) || \
3637 3637 (((sp)->slab_chunks - (sp)->slab_refcnt) <= (binshift))) \
3638 3638 ? -(sp)->slab_refcnt \
3639 3639 : -((binshift) + ((sp)->slab_refcnt >> (binshift))))
3640 3640
3641 3641 /*
3642 3642 * Minimizing the number of partial slabs on the freelist minimizes
3643 3643 * fragmentation (the ratio of unused buffers held by the slab layer). There are
3644 3644 * two ways to get a slab off of the freelist: 1) free all the buffers on the
3645 3645 * slab, and 2) allocate all the buffers on the slab. It follows that we want
3646 3646 * the most-used slabs at the front of the list where they have the best chance
3647 3647 * of being completely allocated, and the least-used slabs at a safe distance
3648 3648 * from the front to improve the odds that the few remaining buffers will all be
3649 3649 * freed before another allocation can tie up the slab. For that reason a slab
3650 3650 * with a higher slab_refcnt sorts less than than a slab with a lower
3651 3651 * slab_refcnt.
3652 3652 *
3653 3653 * However, if a slab has at least one buffer that is deemed unfreeable, we
3654 3654 * would rather have that slab at the front of the list regardless of
3655 3655 * slab_refcnt, since even one unfreeable buffer makes the entire slab
3656 3656 * unfreeable. If the client returns KMEM_CBRC_NO in response to a cache_move()
3657 3657 * callback, the slab is marked unfreeable for as long as it remains on the
3658 3658 * freelist.
3659 3659 */
3660 3660 static int
3661 3661 kmem_partial_slab_cmp(const void *p0, const void *p1)
3662 3662 {
3663 3663 const kmem_cache_t *cp;
3664 3664 const kmem_slab_t *s0 = p0;
3665 3665 const kmem_slab_t *s1 = p1;
3666 3666 int w0, w1;
3667 3667 size_t binshift;
3668 3668
3669 3669 ASSERT(KMEM_SLAB_IS_PARTIAL(s0));
3670 3670 ASSERT(KMEM_SLAB_IS_PARTIAL(s1));
3671 3671 ASSERT(s0->slab_cache == s1->slab_cache);
3672 3672 cp = s1->slab_cache;
3673 3673 ASSERT(MUTEX_HELD(&cp->cache_lock));
3674 3674 binshift = cp->cache_partial_binshift;
3675 3675
3676 3676 /* weight of first slab */
3677 3677 w0 = KMEM_PARTIAL_SLAB_WEIGHT(s0, binshift);
3678 3678 if (s0->slab_flags & KMEM_SLAB_NOMOVE) {
3679 3679 w0 -= cp->cache_maxchunks;
3680 3680 }
3681 3681
3682 3682 /* weight of second slab */
3683 3683 w1 = KMEM_PARTIAL_SLAB_WEIGHT(s1, binshift);
3684 3684 if (s1->slab_flags & KMEM_SLAB_NOMOVE) {
3685 3685 w1 -= cp->cache_maxchunks;
3686 3686 }
3687 3687
3688 3688 if (w0 < w1)
3689 3689 return (-1);
3690 3690 if (w0 > w1)
3691 3691 return (1);
3692 3692
3693 3693 /* compare pointer values */
3694 3694 if ((uintptr_t)s0 < (uintptr_t)s1)
3695 3695 return (-1);
3696 3696 if ((uintptr_t)s0 > (uintptr_t)s1)
3697 3697 return (1);
3698 3698
3699 3699 return (0);
3700 3700 }
3701 3701
3702 3702 /*
3703 3703 * It must be valid to call the destructor (if any) on a newly created object.
3704 3704 * That is, the constructor (if any) must leave the object in a valid state for
3705 3705 * the destructor.
3706 3706 */
3707 3707 kmem_cache_t *
3708 3708 kmem_cache_create(
3709 3709 char *name, /* descriptive name for this cache */
3710 3710 size_t bufsize, /* size of the objects it manages */
3711 3711 size_t align, /* required object alignment */
3712 3712 int (*constructor)(void *, void *, int), /* object constructor */
3713 3713 void (*destructor)(void *, void *), /* object destructor */
3714 3714 void (*reclaim)(void *), /* memory reclaim callback */
3715 3715 void *private, /* pass-thru arg for constr/destr/reclaim */
3716 3716 vmem_t *vmp, /* vmem source for slab allocation */
3717 3717 int cflags) /* cache creation flags */
3718 3718 {
3719 3719 int cpu_seqid;
3720 3720 size_t chunksize;
3721 3721 kmem_cache_t *cp;
3722 3722 kmem_magtype_t *mtp;
3723 3723 size_t csize = KMEM_CACHE_SIZE(max_ncpus);
3724 3724
3725 3725 #ifdef DEBUG
3726 3726 /*
3727 3727 * Cache names should conform to the rules for valid C identifiers
3728 3728 */
3729 3729 if (!strident_valid(name)) {
3730 3730 cmn_err(CE_CONT,
3731 3731 "kmem_cache_create: '%s' is an invalid cache name\n"
3732 3732 "cache names must conform to the rules for "
3733 3733 "C identifiers\n", name);
3734 3734 }
3735 3735 #endif /* DEBUG */
3736 3736
3737 3737 if (vmp == NULL)
3738 3738 vmp = kmem_default_arena;
3739 3739
3740 3740 /*
3741 3741 * If this kmem cache has an identifier vmem arena as its source, mark
3742 3742 * it such to allow kmem_reap_idspace().
3743 3743 */
3744 3744 ASSERT(!(cflags & KMC_IDENTIFIER)); /* consumer should not set this */
3745 3745 if (vmp->vm_cflags & VMC_IDENTIFIER)
3746 3746 cflags |= KMC_IDENTIFIER;
3747 3747
3748 3748 /*
3749 3749 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3750 3750 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3751 3751 * false sharing of per-CPU data.
3752 3752 */
3753 3753 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3754 3754 P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3755 3755 bzero(cp, csize);
3756 3756 list_link_init(&cp->cache_link);
3757 3757
3758 3758 if (align == 0)
3759 3759 align = KMEM_ALIGN;
3760 3760
3761 3761 /*
3762 3762 * If we're not at least KMEM_ALIGN aligned, we can't use free
3763 3763 * memory to hold bufctl information (because we can't safely
3764 3764 * perform word loads and stores on it).
3765 3765 */
3766 3766 if (align < KMEM_ALIGN)
3767 3767 cflags |= KMC_NOTOUCH;
3768 3768
3769 3769 if ((align & (align - 1)) != 0 || align > vmp->vm_quantum)
3770 3770 panic("kmem_cache_create: bad alignment %lu", align);
3771 3771
3772 3772 mutex_enter(&kmem_flags_lock);
3773 3773 if (kmem_flags & KMF_RANDOMIZE)
3774 3774 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3775 3775 KMF_RANDOMIZE;
3776 3776 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3777 3777 mutex_exit(&kmem_flags_lock);
3778 3778
3779 3779 /*
3780 3780 * Make sure all the various flags are reasonable.
3781 3781 */
3782 3782 ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3783 3783
3784 3784 if (cp->cache_flags & KMF_LITE) {
3785 3785 if (bufsize >= kmem_lite_minsize &&
3786 3786 align <= kmem_lite_maxalign &&
3787 3787 P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3788 3788 cp->cache_flags |= KMF_BUFTAG;
3789 3789 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3790 3790 } else {
3791 3791 cp->cache_flags &= ~KMF_DEBUG;
3792 3792 }
3793 3793 }
3794 3794
3795 3795 if (cp->cache_flags & KMF_DEADBEEF)
3796 3796 cp->cache_flags |= KMF_REDZONE;
3797 3797
3798 3798 if ((cflags & KMC_QCACHE) && (cp->cache_flags & KMF_AUDIT))
3799 3799 cp->cache_flags |= KMF_NOMAGAZINE;
3800 3800
3801 3801 if (cflags & KMC_NODEBUG)
3802 3802 cp->cache_flags &= ~KMF_DEBUG;
3803 3803
3804 3804 if (cflags & KMC_NOTOUCH)
3805 3805 cp->cache_flags &= ~KMF_TOUCH;
3806 3806
3807 3807 if (cflags & KMC_PREFILL)
3808 3808 cp->cache_flags |= KMF_PREFILL;
3809 3809
3810 3810 if (cflags & KMC_NOHASH)
3811 3811 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
3812 3812
3813 3813 if (cflags & KMC_NOMAGAZINE)
3814 3814 cp->cache_flags |= KMF_NOMAGAZINE;
3815 3815
3816 3816 if ((cp->cache_flags & KMF_AUDIT) && !(cflags & KMC_NOTOUCH))
3817 3817 cp->cache_flags |= KMF_REDZONE;
3818 3818
3819 3819 if (!(cp->cache_flags & KMF_AUDIT))
3820 3820 cp->cache_flags &= ~KMF_CONTENTS;
3821 3821
3822 3822 if ((cp->cache_flags & KMF_BUFTAG) && bufsize >= kmem_minfirewall &&
3823 3823 !(cp->cache_flags & KMF_LITE) && !(cflags & KMC_NOHASH))
3824 3824 cp->cache_flags |= KMF_FIREWALL;
3825 3825
3826 3826 if (vmp != kmem_default_arena || kmem_firewall_arena == NULL)
3827 3827 cp->cache_flags &= ~KMF_FIREWALL;
3828 3828
3829 3829 if (cp->cache_flags & KMF_FIREWALL) {
3830 3830 cp->cache_flags &= ~KMF_BUFTAG;
3831 3831 cp->cache_flags |= KMF_NOMAGAZINE;
3832 3832 ASSERT(vmp == kmem_default_arena);
3833 3833 vmp = kmem_firewall_arena;
3834 3834 }
3835 3835
3836 3836 /*
3837 3837 * Set cache properties.
3838 3838 */
3839 3839 (void) strncpy(cp->cache_name, name, KMEM_CACHE_NAMELEN);
3840 3840 strident_canon(cp->cache_name, KMEM_CACHE_NAMELEN + 1);
3841 3841 cp->cache_bufsize = bufsize;
3842 3842 cp->cache_align = align;
3843 3843 cp->cache_constructor = constructor;
3844 3844 cp->cache_destructor = destructor;
3845 3845 cp->cache_reclaim = reclaim;
3846 3846 cp->cache_private = private;
3847 3847 cp->cache_arena = vmp;
3848 3848 cp->cache_cflags = cflags;
3849 3849
3850 3850 /*
3851 3851 * Determine the chunk size.
3852 3852 */
3853 3853 chunksize = bufsize;
3854 3854
3855 3855 if (align >= KMEM_ALIGN) {
3856 3856 chunksize = P2ROUNDUP(chunksize, KMEM_ALIGN);
3857 3857 cp->cache_bufctl = chunksize - KMEM_ALIGN;
3858 3858 }
3859 3859
3860 3860 if (cp->cache_flags & KMF_BUFTAG) {
3861 3861 cp->cache_bufctl = chunksize;
3862 3862 cp->cache_buftag = chunksize;
3863 3863 if (cp->cache_flags & KMF_LITE)
3864 3864 chunksize += KMEM_BUFTAG_LITE_SIZE(kmem_lite_count);
3865 3865 else
3866 3866 chunksize += sizeof (kmem_buftag_t);
3867 3867 }
3868 3868
3869 3869 if (cp->cache_flags & KMF_DEADBEEF) {
3870 3870 cp->cache_verify = MIN(cp->cache_buftag, kmem_maxverify);
3871 3871 if (cp->cache_flags & KMF_LITE)
3872 3872 cp->cache_verify = sizeof (uint64_t);
3873 3873 }
3874 3874
3875 3875 cp->cache_contents = MIN(cp->cache_bufctl, kmem_content_maxsave);
3876 3876
3877 3877 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
3878 3878
3879 3879 /*
3880 3880 * Now that we know the chunk size, determine the optimal slab size.
3881 3881 */
3882 3882 if (vmp == kmem_firewall_arena) {
3883 3883 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
3884 3884 cp->cache_mincolor = cp->cache_slabsize - chunksize;
3885 3885 cp->cache_maxcolor = cp->cache_mincolor;
3886 3886 cp->cache_flags |= KMF_HASH;
3887 3887 ASSERT(!(cp->cache_flags & KMF_BUFTAG));
3888 3888 } else if ((cflags & KMC_NOHASH) || (!(cflags & KMC_NOTOUCH) &&
3889 3889 !(cp->cache_flags & KMF_AUDIT) &&
3890 3890 chunksize < vmp->vm_quantum / KMEM_VOID_FRACTION)) {
3891 3891 cp->cache_slabsize = vmp->vm_quantum;
3892 3892 cp->cache_mincolor = 0;
3893 3893 cp->cache_maxcolor =
3894 3894 (cp->cache_slabsize - sizeof (kmem_slab_t)) % chunksize;
3895 3895 ASSERT(chunksize + sizeof (kmem_slab_t) <= cp->cache_slabsize);
3896 3896 ASSERT(!(cp->cache_flags & KMF_AUDIT));
3897 3897 } else {
3898 3898 size_t chunks, bestfit, waste, slabsize;
3899 3899 size_t minwaste = LONG_MAX;
3900 3900
3901 3901 for (chunks = 1; chunks <= KMEM_VOID_FRACTION; chunks++) {
3902 3902 slabsize = P2ROUNDUP(chunksize * chunks,
3903 3903 vmp->vm_quantum);
3904 3904 chunks = slabsize / chunksize;
3905 3905 waste = (slabsize % chunksize) / chunks;
3906 3906 if (waste < minwaste) {
3907 3907 minwaste = waste;
3908 3908 bestfit = slabsize;
3909 3909 }
3910 3910 }
3911 3911 if (cflags & KMC_QCACHE)
3912 3912 bestfit = VMEM_QCACHE_SLABSIZE(vmp->vm_qcache_max);
3913 3913 cp->cache_slabsize = bestfit;
3914 3914 cp->cache_mincolor = 0;
3915 3915 cp->cache_maxcolor = bestfit % chunksize;
3916 3916 cp->cache_flags |= KMF_HASH;
3917 3917 }
3918 3918
3919 3919 cp->cache_maxchunks = (cp->cache_slabsize / cp->cache_chunksize);
3920 3920 cp->cache_partial_binshift = highbit(cp->cache_maxchunks / 16) + 1;
3921 3921
3922 3922 /*
3923 3923 * Disallowing prefill when either the DEBUG or HASH flag is set or when
3924 3924 * there is a constructor avoids some tricky issues with debug setup
3925 3925 * that may be revisited later. We cannot allow prefill in a
3926 3926 * metadata cache because of potential recursion.
3927 3927 */
3928 3928 if (vmp == kmem_msb_arena ||
3929 3929 cp->cache_flags & (KMF_HASH | KMF_BUFTAG) ||
3930 3930 cp->cache_constructor != NULL)
3931 3931 cp->cache_flags &= ~KMF_PREFILL;
3932 3932
3933 3933 if (cp->cache_flags & KMF_HASH) {
3934 3934 ASSERT(!(cflags & KMC_NOHASH));
3935 3935 cp->cache_bufctl_cache = (cp->cache_flags & KMF_AUDIT) ?
3936 3936 kmem_bufctl_audit_cache : kmem_bufctl_cache;
3937 3937 }
3938 3938
3939 3939 if (cp->cache_maxcolor >= vmp->vm_quantum)
3940 3940 cp->cache_maxcolor = vmp->vm_quantum - 1;
3941 3941
3942 3942 cp->cache_color = cp->cache_mincolor;
3943 3943
3944 3944 /*
3945 3945 * Initialize the rest of the slab layer.
3946 3946 */
3947 3947 mutex_init(&cp->cache_lock, NULL, MUTEX_DEFAULT, NULL);
3948 3948
3949 3949 avl_create(&cp->cache_partial_slabs, kmem_partial_slab_cmp,
3950 3950 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3951 3951 /* LINTED: E_TRUE_LOGICAL_EXPR */
3952 3952 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
3953 3953 /* reuse partial slab AVL linkage for complete slab list linkage */
3954 3954 list_create(&cp->cache_complete_slabs,
3955 3955 sizeof (kmem_slab_t), offsetof(kmem_slab_t, slab_link));
3956 3956
3957 3957 if (cp->cache_flags & KMF_HASH) {
3958 3958 cp->cache_hash_table = vmem_alloc(kmem_hash_arena,
3959 3959 KMEM_HASH_INITIAL * sizeof (void *), VM_SLEEP);
3960 3960 bzero(cp->cache_hash_table,
3961 3961 KMEM_HASH_INITIAL * sizeof (void *));
3962 3962 cp->cache_hash_mask = KMEM_HASH_INITIAL - 1;
3963 3963 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
3964 3964 }
3965 3965
3966 3966 /*
3967 3967 * Initialize the depot.
3968 3968 */
3969 3969 mutex_init(&cp->cache_depot_lock, NULL, MUTEX_DEFAULT, NULL);
3970 3970
3971 3971 for (mtp = kmem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
3972 3972 continue;
3973 3973
3974 3974 cp->cache_magtype = mtp;
3975 3975
3976 3976 /*
3977 3977 * Initialize the CPU layer.
3978 3978 */
3979 3979 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
3980 3980 kmem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
3981 3981 mutex_init(&ccp->cc_lock, NULL, MUTEX_DEFAULT, NULL);
3982 3982 ccp->cc_flags = cp->cache_flags;
3983 3983 ccp->cc_rounds = -1;
3984 3984 ccp->cc_prounds = -1;
3985 3985 }
3986 3986
3987 3987 /*
3988 3988 * Create the cache's kstats.
3989 3989 */
3990 3990 if ((cp->cache_kstat = kstat_create("unix", 0, cp->cache_name,
3991 3991 "kmem_cache", KSTAT_TYPE_NAMED,
3992 3992 sizeof (kmem_cache_kstat) / sizeof (kstat_named_t),
3993 3993 KSTAT_FLAG_VIRTUAL)) != NULL) {
3994 3994 cp->cache_kstat->ks_data = &kmem_cache_kstat;
3995 3995 cp->cache_kstat->ks_update = kmem_cache_kstat_update;
3996 3996 cp->cache_kstat->ks_private = cp;
3997 3997 cp->cache_kstat->ks_lock = &kmem_cache_kstat_lock;
3998 3998 kstat_install(cp->cache_kstat);
3999 3999 }
4000 4000
4001 4001 /*
4002 4002 * Add the cache to the global list. This makes it visible
4003 4003 * to kmem_update(), so the cache must be ready for business.
4004 4004 */
4005 4005 mutex_enter(&kmem_cache_lock);
4006 4006 list_insert_tail(&kmem_caches, cp);
4007 4007 mutex_exit(&kmem_cache_lock);
4008 4008
4009 4009 if (kmem_ready)
4010 4010 kmem_cache_magazine_enable(cp);
4011 4011
4012 4012 return (cp);
4013 4013 }
4014 4014
4015 4015 static int
4016 4016 kmem_move_cmp(const void *buf, const void *p)
4017 4017 {
4018 4018 const kmem_move_t *kmm = p;
4019 4019 uintptr_t v1 = (uintptr_t)buf;
4020 4020 uintptr_t v2 = (uintptr_t)kmm->kmm_from_buf;
4021 4021 return (v1 < v2 ? -1 : (v1 > v2 ? 1 : 0));
4022 4022 }
4023 4023
4024 4024 static void
4025 4025 kmem_reset_reclaim_threshold(kmem_defrag_t *kmd)
4026 4026 {
4027 4027 kmd->kmd_reclaim_numer = 1;
4028 4028 }
4029 4029
4030 4030 /*
4031 4031 * Initially, when choosing candidate slabs for buffers to move, we want to be
4032 4032 * very selective and take only slabs that are less than
4033 4033 * (1 / KMEM_VOID_FRACTION) allocated. If we have difficulty finding candidate
4034 4034 * slabs, then we raise the allocation ceiling incrementally. The reclaim
4035 4035 * threshold is reset to (1 / KMEM_VOID_FRACTION) as soon as the cache is no
4036 4036 * longer fragmented.
4037 4037 */
4038 4038 static void
4039 4039 kmem_adjust_reclaim_threshold(kmem_defrag_t *kmd, int direction)
4040 4040 {
4041 4041 if (direction > 0) {
4042 4042 /* make it easier to find a candidate slab */
4043 4043 if (kmd->kmd_reclaim_numer < (KMEM_VOID_FRACTION - 1)) {
4044 4044 kmd->kmd_reclaim_numer++;
4045 4045 }
4046 4046 } else {
4047 4047 /* be more selective */
4048 4048 if (kmd->kmd_reclaim_numer > 1) {
4049 4049 kmd->kmd_reclaim_numer--;
4050 4050 }
4051 4051 }
4052 4052 }
4053 4053
4054 4054 void
4055 4055 kmem_cache_set_move(kmem_cache_t *cp,
4056 4056 kmem_cbrc_t (*move)(void *, void *, size_t, void *))
4057 4057 {
4058 4058 kmem_defrag_t *defrag;
4059 4059
4060 4060 ASSERT(move != NULL);
4061 4061 /*
4062 4062 * The consolidator does not support NOTOUCH caches because kmem cannot
4063 4063 * initialize their slabs with the 0xbaddcafe memory pattern, which sets
4064 4064 * a low order bit usable by clients to distinguish uninitialized memory
4065 4065 * from known objects (see kmem_slab_create).
4066 4066 */
4067 4067 ASSERT(!(cp->cache_cflags & KMC_NOTOUCH));
4068 4068 ASSERT(!(cp->cache_cflags & KMC_IDENTIFIER));
4069 4069
4070 4070 /*
4071 4071 * We should not be holding anyone's cache lock when calling
4072 4072 * kmem_cache_alloc(), so allocate in all cases before acquiring the
4073 4073 * lock.
4074 4074 */
4075 4075 defrag = kmem_cache_alloc(kmem_defrag_cache, KM_SLEEP);
4076 4076
4077 4077 mutex_enter(&cp->cache_lock);
4078 4078
4079 4079 if (KMEM_IS_MOVABLE(cp)) {
4080 4080 if (cp->cache_move == NULL) {
4081 4081 ASSERT(cp->cache_slab_alloc == 0);
4082 4082
4083 4083 cp->cache_defrag = defrag;
4084 4084 defrag = NULL; /* nothing to free */
4085 4085 bzero(cp->cache_defrag, sizeof (kmem_defrag_t));
4086 4086 avl_create(&cp->cache_defrag->kmd_moves_pending,
4087 4087 kmem_move_cmp, sizeof (kmem_move_t),
4088 4088 offsetof(kmem_move_t, kmm_entry));
4089 4089 /* LINTED: E_TRUE_LOGICAL_EXPR */
4090 4090 ASSERT(sizeof (list_node_t) <= sizeof (avl_node_t));
4091 4091 /* reuse the slab's AVL linkage for deadlist linkage */
4092 4092 list_create(&cp->cache_defrag->kmd_deadlist,
4093 4093 sizeof (kmem_slab_t),
4094 4094 offsetof(kmem_slab_t, slab_link));
4095 4095 kmem_reset_reclaim_threshold(cp->cache_defrag);
4096 4096 }
4097 4097 cp->cache_move = move;
4098 4098 }
4099 4099
4100 4100 mutex_exit(&cp->cache_lock);
4101 4101
4102 4102 if (defrag != NULL) {
4103 4103 kmem_cache_free(kmem_defrag_cache, defrag); /* unused */
4104 4104 }
4105 4105 }
4106 4106
4107 4107 void
4108 4108 kmem_cache_destroy(kmem_cache_t *cp)
4109 4109 {
4110 4110 int cpu_seqid;
4111 4111
4112 4112 /*
4113 4113 * Remove the cache from the global cache list so that no one else
4114 4114 * can schedule tasks on its behalf, wait for any pending tasks to
4115 4115 * complete, purge the cache, and then destroy it.
4116 4116 */
4117 4117 mutex_enter(&kmem_cache_lock);
4118 4118 list_remove(&kmem_caches, cp);
4119 4119 mutex_exit(&kmem_cache_lock);
4120 4120
4121 4121 if (kmem_taskq != NULL)
4122 4122 taskq_wait(kmem_taskq);
4123 4123 if (kmem_move_taskq != NULL)
4124 4124 taskq_wait(kmem_move_taskq);
4125 4125
4126 4126 kmem_cache_magazine_purge(cp);
4127 4127
4128 4128 mutex_enter(&cp->cache_lock);
4129 4129 if (cp->cache_buftotal != 0)
4130 4130 cmn_err(CE_WARN, "kmem_cache_destroy: '%s' (%p) not empty",
4131 4131 cp->cache_name, (void *)cp);
4132 4132 if (cp->cache_defrag != NULL) {
4133 4133 avl_destroy(&cp->cache_defrag->kmd_moves_pending);
4134 4134 list_destroy(&cp->cache_defrag->kmd_deadlist);
4135 4135 kmem_cache_free(kmem_defrag_cache, cp->cache_defrag);
4136 4136 cp->cache_defrag = NULL;
4137 4137 }
4138 4138 /*
4139 4139 * The cache is now dead. There should be no further activity. We
4140 4140 * enforce this by setting land mines in the constructor, destructor,
4141 4141 * reclaim, and move routines that induce a kernel text fault if
4142 4142 * invoked.
4143 4143 */
4144 4144 cp->cache_constructor = (int (*)(void *, void *, int))1;
4145 4145 cp->cache_destructor = (void (*)(void *, void *))2;
4146 4146 cp->cache_reclaim = (void (*)(void *))3;
4147 4147 cp->cache_move = (kmem_cbrc_t (*)(void *, void *, size_t, void *))4;
4148 4148 mutex_exit(&cp->cache_lock);
4149 4149
4150 4150 kstat_delete(cp->cache_kstat);
4151 4151
4152 4152 if (cp->cache_hash_table != NULL)
4153 4153 vmem_free(kmem_hash_arena, cp->cache_hash_table,
4154 4154 (cp->cache_hash_mask + 1) * sizeof (void *));
4155 4155
4156 4156 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++)
4157 4157 mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
4158 4158
4159 4159 mutex_destroy(&cp->cache_depot_lock);
4160 4160 mutex_destroy(&cp->cache_lock);
4161 4161
4162 4162 vmem_free(kmem_cache_arena, cp, KMEM_CACHE_SIZE(max_ncpus));
4163 4163 }
4164 4164
4165 4165 /*ARGSUSED*/
4166 4166 static int
4167 4167 kmem_cpu_setup(cpu_setup_t what, int id, void *arg)
4168 4168 {
4169 4169 ASSERT(MUTEX_HELD(&cpu_lock));
4170 4170 if (what == CPU_UNCONFIG) {
4171 4171 kmem_cache_applyall(kmem_cache_magazine_purge,
4172 4172 kmem_taskq, TQ_SLEEP);
4173 4173 kmem_cache_applyall(kmem_cache_magazine_enable,
4174 4174 kmem_taskq, TQ_SLEEP);
4175 4175 }
4176 4176 return (0);
4177 4177 }
4178 4178
4179 4179 static void
4180 4180 kmem_alloc_caches_create(const int *array, size_t count,
4181 4181 kmem_cache_t **alloc_table, size_t maxbuf, uint_t shift)
4182 4182 {
4183 4183 char name[KMEM_CACHE_NAMELEN + 1];
4184 4184 size_t table_unit = (1 << shift); /* range of one alloc_table entry */
4185 4185 size_t size = table_unit;
4186 4186 int i;
4187 4187
4188 4188 for (i = 0; i < count; i++) {
4189 4189 size_t cache_size = array[i];
4190 4190 size_t align = KMEM_ALIGN;
4191 4191 kmem_cache_t *cp;
4192 4192
4193 4193 /* if the table has an entry for maxbuf, we're done */
4194 4194 if (size > maxbuf)
4195 4195 break;
4196 4196
4197 4197 /* cache size must be a multiple of the table unit */
4198 4198 ASSERT(P2PHASE(cache_size, table_unit) == 0);
4199 4199
4200 4200 /*
4201 4201 * If they allocate a multiple of the coherency granularity,
4202 4202 * they get a coherency-granularity-aligned address.
4203 4203 */
4204 4204 if (IS_P2ALIGNED(cache_size, 64))
4205 4205 align = 64;
4206 4206 if (IS_P2ALIGNED(cache_size, PAGESIZE))
4207 4207 align = PAGESIZE;
4208 4208 (void) snprintf(name, sizeof (name),
4209 4209 "kmem_alloc_%lu", cache_size);
4210 4210 cp = kmem_cache_create(name, cache_size, align,
4211 4211 NULL, NULL, NULL, NULL, NULL, KMC_KMEM_ALLOC);
4212 4212
4213 4213 while (size <= cache_size) {
4214 4214 alloc_table[(size - 1) >> shift] = cp;
4215 4215 size += table_unit;
4216 4216 }
4217 4217 }
4218 4218
4219 4219 ASSERT(size > maxbuf); /* i.e. maxbuf <= max(cache_size) */
4220 4220 }
4221 4221
4222 4222 static void
4223 4223 kmem_cache_init(int pass, int use_large_pages)
4224 4224 {
4225 4225 int i;
4226 4226 size_t maxbuf;
4227 4227 kmem_magtype_t *mtp;
4228 4228
4229 4229 for (i = 0; i < sizeof (kmem_magtype) / sizeof (*mtp); i++) {
4230 4230 char name[KMEM_CACHE_NAMELEN + 1];
4231 4231
4232 4232 mtp = &kmem_magtype[i];
4233 4233 (void) sprintf(name, "kmem_magazine_%d", mtp->mt_magsize);
4234 4234 mtp->mt_cache = kmem_cache_create(name,
4235 4235 (mtp->mt_magsize + 1) * sizeof (void *),
4236 4236 mtp->mt_align, NULL, NULL, NULL, NULL,
4237 4237 kmem_msb_arena, KMC_NOHASH);
4238 4238 }
4239 4239
4240 4240 kmem_slab_cache = kmem_cache_create("kmem_slab_cache",
4241 4241 sizeof (kmem_slab_t), 0, NULL, NULL, NULL, NULL,
4242 4242 kmem_msb_arena, KMC_NOHASH);
4243 4243
4244 4244 kmem_bufctl_cache = kmem_cache_create("kmem_bufctl_cache",
4245 4245 sizeof (kmem_bufctl_t), 0, NULL, NULL, NULL, NULL,
4246 4246 kmem_msb_arena, KMC_NOHASH);
4247 4247
4248 4248 kmem_bufctl_audit_cache = kmem_cache_create("kmem_bufctl_audit_cache",
4249 4249 sizeof (kmem_bufctl_audit_t), 0, NULL, NULL, NULL, NULL,
4250 4250 kmem_msb_arena, KMC_NOHASH);
4251 4251
4252 4252 if (pass == 2) {
4253 4253 kmem_va_arena = vmem_create("kmem_va",
4254 4254 NULL, 0, PAGESIZE,
4255 4255 vmem_alloc, vmem_free, heap_arena,
4256 4256 8 * PAGESIZE, VM_SLEEP);
4257 4257
4258 4258 if (use_large_pages) {
4259 4259 kmem_default_arena = vmem_xcreate("kmem_default",
4260 4260 NULL, 0, PAGESIZE,
4261 4261 segkmem_alloc_lp, segkmem_free_lp, kmem_va_arena,
4262 4262 0, VMC_DUMPSAFE | VM_SLEEP);
4263 4263 } else {
4264 4264 kmem_default_arena = vmem_create("kmem_default",
4265 4265 NULL, 0, PAGESIZE,
4266 4266 segkmem_alloc, segkmem_free, kmem_va_arena,
4267 4267 0, VMC_DUMPSAFE | VM_SLEEP);
4268 4268 }
4269 4269
4270 4270 /* Figure out what our maximum cache size is */
4271 4271 maxbuf = kmem_max_cached;
4272 4272 if (maxbuf <= KMEM_MAXBUF) {
4273 4273 maxbuf = 0;
4274 4274 kmem_max_cached = KMEM_MAXBUF;
4275 4275 } else {
4276 4276 size_t size = 0;
4277 4277 size_t max =
4278 4278 sizeof (kmem_big_alloc_sizes) / sizeof (int);
4279 4279 /*
4280 4280 * Round maxbuf up to an existing cache size. If maxbuf
4281 4281 * is larger than the largest cache, we truncate it to
4282 4282 * the largest cache's size.
4283 4283 */
4284 4284 for (i = 0; i < max; i++) {
4285 4285 size = kmem_big_alloc_sizes[i];
4286 4286 if (maxbuf <= size)
4287 4287 break;
4288 4288 }
4289 4289 kmem_max_cached = maxbuf = size;
4290 4290 }
4291 4291
4292 4292 /*
4293 4293 * The big alloc table may not be completely overwritten, so
4294 4294 * we clear out any stale cache pointers from the first pass.
4295 4295 */
4296 4296 bzero(kmem_big_alloc_table, sizeof (kmem_big_alloc_table));
4297 4297 } else {
4298 4298 /*
4299 4299 * During the first pass, the kmem_alloc_* caches
4300 4300 * are treated as metadata.
4301 4301 */
4302 4302 kmem_default_arena = kmem_msb_arena;
4303 4303 maxbuf = KMEM_BIG_MAXBUF_32BIT;
4304 4304 }
4305 4305
4306 4306 /*
4307 4307 * Set up the default caches to back kmem_alloc()
4308 4308 */
4309 4309 kmem_alloc_caches_create(
4310 4310 kmem_alloc_sizes, sizeof (kmem_alloc_sizes) / sizeof (int),
4311 4311 kmem_alloc_table, KMEM_MAXBUF, KMEM_ALIGN_SHIFT);
4312 4312
4313 4313 kmem_alloc_caches_create(
4314 4314 kmem_big_alloc_sizes, sizeof (kmem_big_alloc_sizes) / sizeof (int),
4315 4315 kmem_big_alloc_table, maxbuf, KMEM_BIG_SHIFT);
4316 4316
4317 4317 kmem_big_alloc_table_max = maxbuf >> KMEM_BIG_SHIFT;
4318 4318 }
4319 4319
4320 4320 void
4321 4321 kmem_init(void)
4322 4322 {
4323 4323 kmem_cache_t *cp;
4324 4324 int old_kmem_flags = kmem_flags;
4325 4325 int use_large_pages = 0;
4326 4326 size_t maxverify, minfirewall;
4327 4327
4328 4328 kstat_init();
4329 4329
4330 4330 /*
4331 4331 * Small-memory systems (< 24 MB) can't handle kmem_flags overhead.
4332 4332 */
4333 4333 if (physmem < btop(24 << 20) && !(old_kmem_flags & KMF_STICKY))
4334 4334 kmem_flags = 0;
4335 4335
4336 4336 /*
4337 4337 * Don't do firewalled allocations if the heap is less than 1TB
4338 4338 * (i.e. on a 32-bit kernel)
4339 4339 * The resulting VM_NEXTFIT allocations would create too much
4340 4340 * fragmentation in a small heap.
4341 4341 */
4342 4342 #if defined(_LP64)
4343 4343 maxverify = minfirewall = PAGESIZE / 2;
4344 4344 #else
4345 4345 maxverify = minfirewall = ULONG_MAX;
4346 4346 #endif
4347 4347
4348 4348 /* LINTED */
4349 4349 ASSERT(sizeof (kmem_cpu_cache_t) == KMEM_CPU_CACHE_SIZE);
4350 4350
4351 4351 list_create(&kmem_caches, sizeof (kmem_cache_t),
4352 4352 offsetof(kmem_cache_t, cache_link));
4353 4353
4354 4354 kmem_metadata_arena = vmem_create("kmem_metadata", NULL, 0, PAGESIZE,
4355 4355 vmem_alloc, vmem_free, heap_arena, 8 * PAGESIZE,
4356 4356 VM_SLEEP | VMC_NO_QCACHE);
4357 4357
4358 4358 kmem_msb_arena = vmem_create("kmem_msb", NULL, 0,
4359 4359 PAGESIZE, segkmem_alloc, segkmem_free, kmem_metadata_arena, 0,
4360 4360 VMC_DUMPSAFE | VM_SLEEP);
4361 4361
4362 4362 kmem_cache_arena = vmem_create("kmem_cache", NULL, 0, KMEM_ALIGN,
4363 4363 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4364 4364
4365 4365 kmem_hash_arena = vmem_create("kmem_hash", NULL, 0, KMEM_ALIGN,
4366 4366 segkmem_alloc, segkmem_free, kmem_metadata_arena, 0, VM_SLEEP);
4367 4367
4368 4368 kmem_log_arena = vmem_create("kmem_log", NULL, 0, KMEM_ALIGN,
4369 4369 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4370 4370
4371 4371 kmem_firewall_va_arena = vmem_create("kmem_firewall_va",
4372 4372 NULL, 0, PAGESIZE,
4373 4373 kmem_firewall_va_alloc, kmem_firewall_va_free, heap_arena,
4374 4374 0, VM_SLEEP);
4375 4375
4376 4376 kmem_firewall_arena = vmem_create("kmem_firewall", NULL, 0, PAGESIZE,
4377 4377 segkmem_alloc, segkmem_free, kmem_firewall_va_arena, 0,
4378 4378 VMC_DUMPSAFE | VM_SLEEP);
4379 4379
4380 4380 /* temporary oversize arena for mod_read_system_file */
4381 4381 kmem_oversize_arena = vmem_create("kmem_oversize", NULL, 0, PAGESIZE,
4382 4382 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
4383 4383
4384 4384 kmem_reap_interval = 15 * hz;
4385 4385
4386 4386 /*
4387 4387 * Read /etc/system. This is a chicken-and-egg problem because
4388 4388 * kmem_flags may be set in /etc/system, but mod_read_system_file()
4389 4389 * needs to use the allocator. The simplest solution is to create
4390 4390 * all the standard kmem caches, read /etc/system, destroy all the
4391 4391 * caches we just created, and then create them all again in light
4392 4392 * of the (possibly) new kmem_flags and other kmem tunables.
4393 4393 */
4394 4394 kmem_cache_init(1, 0);
4395 4395
4396 4396 mod_read_system_file(boothowto & RB_ASKNAME);
4397 4397
4398 4398 while ((cp = list_tail(&kmem_caches)) != NULL)
4399 4399 kmem_cache_destroy(cp);
4400 4400
4401 4401 vmem_destroy(kmem_oversize_arena);
4402 4402
4403 4403 if (old_kmem_flags & KMF_STICKY)
4404 4404 kmem_flags = old_kmem_flags;
4405 4405
4406 4406 if (!(kmem_flags & KMF_AUDIT))
4407 4407 vmem_seg_size = offsetof(vmem_seg_t, vs_thread);
4408 4408
4409 4409 if (kmem_maxverify == 0)
4410 4410 kmem_maxverify = maxverify;
4411 4411
4412 4412 if (kmem_minfirewall == 0)
4413 4413 kmem_minfirewall = minfirewall;
4414 4414
4415 4415 /*
4416 4416 * give segkmem a chance to figure out if we are using large pages
4417 4417 * for the kernel heap
4418 4418 */
4419 4419 use_large_pages = segkmem_lpsetup();
4420 4420
4421 4421 /*
4422 4422 * To protect against corruption, we keep the actual number of callers
4423 4423 * KMF_LITE records seperate from the tunable. We arbitrarily clamp
4424 4424 * to 16, since the overhead for small buffers quickly gets out of
4425 4425 * hand.
4426 4426 *
4427 4427 * The real limit would depend on the needs of the largest KMC_NOHASH
4428 4428 * cache.
4429 4429 */
4430 4430 kmem_lite_count = MIN(MAX(0, kmem_lite_pcs), 16);
4431 4431 kmem_lite_pcs = kmem_lite_count;
4432 4432
4433 4433 /*
4434 4434 * Normally, we firewall oversized allocations when possible, but
4435 4435 * if we are using large pages for kernel memory, and we don't have
4436 4436 * any non-LITE debugging flags set, we want to allocate oversized
4437 4437 * buffers from large pages, and so skip the firewalling.
4438 4438 */
4439 4439 if (use_large_pages &&
4440 4440 ((kmem_flags & KMF_LITE) || !(kmem_flags & KMF_DEBUG))) {
4441 4441 kmem_oversize_arena = vmem_xcreate("kmem_oversize", NULL, 0,
4442 4442 PAGESIZE, segkmem_alloc_lp, segkmem_free_lp, heap_arena,
4443 4443 0, VMC_DUMPSAFE | VM_SLEEP);
4444 4444 } else {
4445 4445 kmem_oversize_arena = vmem_create("kmem_oversize",
4446 4446 NULL, 0, PAGESIZE,
4447 4447 segkmem_alloc, segkmem_free, kmem_minfirewall < ULONG_MAX?
4448 4448 kmem_firewall_va_arena : heap_arena, 0, VMC_DUMPSAFE |
4449 4449 VM_SLEEP);
4450 4450 }
4451 4451
4452 4452 kmem_cache_init(2, use_large_pages);
4453 4453
4454 4454 if (kmem_flags & (KMF_AUDIT | KMF_RANDOMIZE)) {
4455 4455 if (kmem_transaction_log_size == 0)
4456 4456 kmem_transaction_log_size = kmem_maxavail() / 50;
4457 4457 kmem_transaction_log = kmem_log_init(kmem_transaction_log_size);
4458 4458 }
4459 4459
4460 4460 if (kmem_flags & (KMF_CONTENTS | KMF_RANDOMIZE)) {
4461 4461 if (kmem_content_log_size == 0)
4462 4462 kmem_content_log_size = kmem_maxavail() / 50;
4463 4463 kmem_content_log = kmem_log_init(kmem_content_log_size);
4464 4464 }
4465 4465
4466 4466 kmem_failure_log = kmem_log_init(kmem_failure_log_size);
4467 4467
4468 4468 kmem_slab_log = kmem_log_init(kmem_slab_log_size);
4469 4469
4470 4470 /*
4471 4471 * Initialize STREAMS message caches so allocb() is available.
4472 4472 * This allows us to initialize the logging framework (cmn_err(9F),
4473 4473 * strlog(9F), etc) so we can start recording messages.
4474 4474 */
4475 4475 streams_msg_init();
4476 4476
4477 4477 /*
4478 4478 * Initialize the ZSD framework in Zones so modules loaded henceforth
4479 4479 * can register their callbacks.
4480 4480 */
4481 4481 zone_zsd_init();
4482 4482
4483 4483 log_init();
4484 4484 taskq_init();
4485 4485
4486 4486 /*
4487 4487 * Warn about invalid or dangerous values of kmem_flags.
4488 4488 * Always warn about unsupported values.
4489 4489 */
4490 4490 if (((kmem_flags & ~(KMF_AUDIT | KMF_DEADBEEF | KMF_REDZONE |
4491 4491 KMF_CONTENTS | KMF_LITE)) != 0) ||
4492 4492 ((kmem_flags & KMF_LITE) && kmem_flags != KMF_LITE))
4493 4493 cmn_err(CE_WARN, "kmem_flags set to unsupported value 0x%x. "
4494 4494 "See the Solaris Tunable Parameters Reference Manual.",
4495 4495 kmem_flags);
4496 4496
4497 4497 #ifdef DEBUG
4498 4498 if ((kmem_flags & KMF_DEBUG) == 0)
4499 4499 cmn_err(CE_NOTE, "kmem debugging disabled.");
4500 4500 #else
4501 4501 /*
4502 4502 * For non-debug kernels, the only "normal" flags are 0, KMF_LITE,
4503 4503 * KMF_REDZONE, and KMF_CONTENTS (the last because it is only enabled
4504 4504 * if KMF_AUDIT is set). We should warn the user about the performance
4505 4505 * penalty of KMF_AUDIT or KMF_DEADBEEF if they are set and KMF_LITE
4506 4506 * isn't set (since that disables AUDIT).
4507 4507 */
4508 4508 if (!(kmem_flags & KMF_LITE) &&
4509 4509 (kmem_flags & (KMF_AUDIT | KMF_DEADBEEF)) != 0)
4510 4510 cmn_err(CE_WARN, "High-overhead kmem debugging features "
4511 4511 "enabled (kmem_flags = 0x%x). Performance degradation "
4512 4512 "and large memory overhead possible. See the Solaris "
4513 4513 "Tunable Parameters Reference Manual.", kmem_flags);
4514 4514 #endif /* not DEBUG */
4515 4515
4516 4516 kmem_cache_applyall(kmem_cache_magazine_enable, NULL, TQ_SLEEP);
4517 4517
4518 4518 kmem_ready = 1;
4519 4519
4520 4520 /*
4521 4521 * Initialize the platform-specific aligned/DMA memory allocator.
4522 4522 */
4523 4523 ka_init();
4524 4524
4525 4525 /*
4526 4526 * Initialize 32-bit ID cache.
4527 4527 */
4528 4528 id32_init();
4529 4529
4530 4530 /*
4531 4531 * Initialize the networking stack so modules loaded can
4532 4532 * register their callbacks.
4533 4533 */
4534 4534 netstack_init();
4535 4535 }
4536 4536
4537 4537 static void
4538 4538 kmem_move_init(void)
4539 4539 {
4540 4540 kmem_defrag_cache = kmem_cache_create("kmem_defrag_cache",
4541 4541 sizeof (kmem_defrag_t), 0, NULL, NULL, NULL, NULL,
4542 4542 kmem_msb_arena, KMC_NOHASH);
4543 4543 kmem_move_cache = kmem_cache_create("kmem_move_cache",
4544 4544 sizeof (kmem_move_t), 0, NULL, NULL, NULL, NULL,
4545 4545 kmem_msb_arena, KMC_NOHASH);
4546 4546
4547 4547 /*
4548 4548 * kmem guarantees that move callbacks are sequential and that even
4549 4549 * across multiple caches no two moves ever execute simultaneously.
4550 4550 * Move callbacks are processed on a separate taskq so that client code
4551 4551 * does not interfere with internal maintenance tasks.
4552 4552 */
4553 4553 kmem_move_taskq = taskq_create_instance("kmem_move_taskq", 0, 1,
4554 4554 minclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE);
4555 4555 }
4556 4556
4557 4557 void
4558 4558 kmem_thread_init(void)
4559 4559 {
4560 4560 kmem_move_init();
4561 4561 kmem_taskq = taskq_create_instance("kmem_taskq", 0, 1, minclsyspri,
4562 4562 300, INT_MAX, TASKQ_PREPOPULATE);
4563 4563 }
4564 4564
4565 4565 void
4566 4566 kmem_mp_init(void)
4567 4567 {
4568 4568 mutex_enter(&cpu_lock);
4569 4569 register_cpu_setup_func(kmem_cpu_setup, NULL);
4570 4570 mutex_exit(&cpu_lock);
4571 4571
4572 4572 kmem_update_timeout(NULL);
4573 4573
4574 4574 taskq_mp_init();
4575 4575 }
4576 4576
4577 4577 /*
4578 4578 * Return the slab of the allocated buffer, or NULL if the buffer is not
4579 4579 * allocated. This function may be called with a known slab address to determine
4580 4580 * whether or not the buffer is allocated, or with a NULL slab address to obtain
4581 4581 * an allocated buffer's slab.
4582 4582 */
4583 4583 static kmem_slab_t *
4584 4584 kmem_slab_allocated(kmem_cache_t *cp, kmem_slab_t *sp, void *buf)
4585 4585 {
4586 4586 kmem_bufctl_t *bcp, *bufbcp;
4587 4587
4588 4588 ASSERT(MUTEX_HELD(&cp->cache_lock));
4589 4589 ASSERT(sp == NULL || KMEM_SLAB_MEMBER(sp, buf));
4590 4590
4591 4591 if (cp->cache_flags & KMF_HASH) {
4592 4592 for (bcp = *KMEM_HASH(cp, buf);
4593 4593 (bcp != NULL) && (bcp->bc_addr != buf);
4594 4594 bcp = bcp->bc_next) {
4595 4595 continue;
4596 4596 }
4597 4597 ASSERT(sp != NULL && bcp != NULL ? sp == bcp->bc_slab : 1);
4598 4598 return (bcp == NULL ? NULL : bcp->bc_slab);
4599 4599 }
4600 4600
4601 4601 if (sp == NULL) {
4602 4602 sp = KMEM_SLAB(cp, buf);
4603 4603 }
4604 4604 bufbcp = KMEM_BUFCTL(cp, buf);
4605 4605 for (bcp = sp->slab_head;
4606 4606 (bcp != NULL) && (bcp != bufbcp);
4607 4607 bcp = bcp->bc_next) {
4608 4608 continue;
4609 4609 }
4610 4610 return (bcp == NULL ? sp : NULL);
4611 4611 }
4612 4612
4613 4613 static boolean_t
4614 4614 kmem_slab_is_reclaimable(kmem_cache_t *cp, kmem_slab_t *sp, int flags)
4615 4615 {
4616 4616 long refcnt = sp->slab_refcnt;
4617 4617
4618 4618 ASSERT(cp->cache_defrag != NULL);
4619 4619
4620 4620 /*
4621 4621 * For code coverage we want to be able to move an object within the
4622 4622 * same slab (the only partial slab) even if allocating the destination
4623 4623 * buffer resulted in a completely allocated slab.
4624 4624 */
4625 4625 if (flags & KMM_DEBUG) {
4626 4626 return ((flags & KMM_DESPERATE) ||
4627 4627 ((sp->slab_flags & KMEM_SLAB_NOMOVE) == 0));
4628 4628 }
4629 4629
4630 4630 /* If we're desperate, we don't care if the client said NO. */
4631 4631 if (flags & KMM_DESPERATE) {
4632 4632 return (refcnt < sp->slab_chunks); /* any partial */
4633 4633 }
4634 4634
4635 4635 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4636 4636 return (B_FALSE);
4637 4637 }
4638 4638
4639 4639 if ((refcnt == 1) || kmem_move_any_partial) {
4640 4640 return (refcnt < sp->slab_chunks);
4641 4641 }
4642 4642
4643 4643 /*
4644 4644 * The reclaim threshold is adjusted at each kmem_cache_scan() so that
4645 4645 * slabs with a progressively higher percentage of used buffers can be
4646 4646 * reclaimed until the cache as a whole is no longer fragmented.
4647 4647 *
4648 4648 * sp->slab_refcnt kmd_reclaim_numer
4649 4649 * --------------- < ------------------
4650 4650 * sp->slab_chunks KMEM_VOID_FRACTION
4651 4651 */
4652 4652 return ((refcnt * KMEM_VOID_FRACTION) <
4653 4653 (sp->slab_chunks * cp->cache_defrag->kmd_reclaim_numer));
4654 4654 }
4655 4655
4656 4656 static void *
4657 4657 kmem_hunt_mag(kmem_cache_t *cp, kmem_magazine_t *m, int n, void *buf,
4658 4658 void *tbuf)
4659 4659 {
4660 4660 int i; /* magazine round index */
4661 4661
4662 4662 for (i = 0; i < n; i++) {
4663 4663 if (buf == m->mag_round[i]) {
4664 4664 if (cp->cache_flags & KMF_BUFTAG) {
4665 4665 (void) kmem_cache_free_debug(cp, tbuf,
4666 4666 caller());
4667 4667 }
4668 4668 m->mag_round[i] = tbuf;
4669 4669 return (buf);
4670 4670 }
4671 4671 }
4672 4672
4673 4673 return (NULL);
4674 4674 }
4675 4675
4676 4676 /*
4677 4677 * Hunt the magazine layer for the given buffer. If found, the buffer is
4678 4678 * removed from the magazine layer and returned, otherwise NULL is returned.
4679 4679 * The state of the returned buffer is freed and constructed.
4680 4680 */
4681 4681 static void *
4682 4682 kmem_hunt_mags(kmem_cache_t *cp, void *buf)
4683 4683 {
4684 4684 kmem_cpu_cache_t *ccp;
4685 4685 kmem_magazine_t *m;
4686 4686 int cpu_seqid;
4687 4687 int n; /* magazine rounds */
4688 4688 void *tbuf; /* temporary swap buffer */
4689 4689
4690 4690 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4691 4691
4692 4692 /*
4693 4693 * Allocated a buffer to swap with the one we hope to pull out of a
4694 4694 * magazine when found.
4695 4695 */
4696 4696 tbuf = kmem_cache_alloc(cp, KM_NOSLEEP);
4697 4697 if (tbuf == NULL) {
4698 4698 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_alloc_fail);
4699 4699 return (NULL);
4700 4700 }
4701 4701 if (tbuf == buf) {
4702 4702 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_lucky);
4703 4703 if (cp->cache_flags & KMF_BUFTAG) {
4704 4704 (void) kmem_cache_free_debug(cp, buf, caller());
4705 4705 }
4706 4706 return (buf);
4707 4707 }
4708 4708
4709 4709 /* Hunt the depot. */
4710 4710 mutex_enter(&cp->cache_depot_lock);
4711 4711 n = cp->cache_magtype->mt_magsize;
4712 4712 for (m = cp->cache_full.ml_list; m != NULL; m = m->mag_next) {
4713 4713 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4714 4714 mutex_exit(&cp->cache_depot_lock);
4715 4715 return (buf);
4716 4716 }
4717 4717 }
4718 4718 mutex_exit(&cp->cache_depot_lock);
4719 4719
4720 4720 /* Hunt the per-CPU magazines. */
4721 4721 for (cpu_seqid = 0; cpu_seqid < max_ncpus; cpu_seqid++) {
4722 4722 ccp = &cp->cache_cpu[cpu_seqid];
4723 4723
4724 4724 mutex_enter(&ccp->cc_lock);
4725 4725 m = ccp->cc_loaded;
4726 4726 n = ccp->cc_rounds;
4727 4727 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4728 4728 mutex_exit(&ccp->cc_lock);
4729 4729 return (buf);
4730 4730 }
4731 4731 m = ccp->cc_ploaded;
4732 4732 n = ccp->cc_prounds;
4733 4733 if (kmem_hunt_mag(cp, m, n, buf, tbuf) != NULL) {
4734 4734 mutex_exit(&ccp->cc_lock);
4735 4735 return (buf);
4736 4736 }
4737 4737 mutex_exit(&ccp->cc_lock);
4738 4738 }
4739 4739
4740 4740 kmem_cache_free(cp, tbuf);
4741 4741 return (NULL);
4742 4742 }
4743 4743
4744 4744 /*
4745 4745 * May be called from the kmem_move_taskq, from kmem_cache_move_notify_task(),
4746 4746 * or when the buffer is freed.
4747 4747 */
4748 4748 static void
4749 4749 kmem_slab_move_yes(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4750 4750 {
4751 4751 ASSERT(MUTEX_HELD(&cp->cache_lock));
4752 4752 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4753 4753
4754 4754 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4755 4755 return;
4756 4756 }
4757 4757
4758 4758 if (sp->slab_flags & KMEM_SLAB_NOMOVE) {
4759 4759 if (KMEM_SLAB_OFFSET(sp, from_buf) == sp->slab_stuck_offset) {
4760 4760 avl_remove(&cp->cache_partial_slabs, sp);
4761 4761 sp->slab_flags &= ~KMEM_SLAB_NOMOVE;
4762 4762 sp->slab_stuck_offset = (uint32_t)-1;
4763 4763 avl_add(&cp->cache_partial_slabs, sp);
4764 4764 }
4765 4765 } else {
4766 4766 sp->slab_later_count = 0;
4767 4767 sp->slab_stuck_offset = (uint32_t)-1;
4768 4768 }
4769 4769 }
4770 4770
4771 4771 static void
4772 4772 kmem_slab_move_no(kmem_cache_t *cp, kmem_slab_t *sp, void *from_buf)
4773 4773 {
4774 4774 ASSERT(taskq_member(kmem_move_taskq, curthread));
4775 4775 ASSERT(MUTEX_HELD(&cp->cache_lock));
4776 4776 ASSERT(KMEM_SLAB_MEMBER(sp, from_buf));
4777 4777
4778 4778 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4779 4779 return;
4780 4780 }
4781 4781
4782 4782 avl_remove(&cp->cache_partial_slabs, sp);
4783 4783 sp->slab_later_count = 0;
4784 4784 sp->slab_flags |= KMEM_SLAB_NOMOVE;
4785 4785 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp, from_buf);
4786 4786 avl_add(&cp->cache_partial_slabs, sp);
4787 4787 }
4788 4788
4789 4789 static void kmem_move_end(kmem_cache_t *, kmem_move_t *);
4790 4790
4791 4791 /*
4792 4792 * The move callback takes two buffer addresses, the buffer to be moved, and a
4793 4793 * newly allocated and constructed buffer selected by kmem as the destination.
4794 4794 * It also takes the size of the buffer and an optional user argument specified
4795 4795 * at cache creation time. kmem guarantees that the buffer to be moved has not
4796 4796 * been unmapped by the virtual memory subsystem. Beyond that, it cannot
4797 4797 * guarantee the present whereabouts of the buffer to be moved, so it is up to
4798 4798 * the client to safely determine whether or not it is still using the buffer.
4799 4799 * The client must not free either of the buffers passed to the move callback,
4800 4800 * since kmem wants to free them directly to the slab layer. The client response
4801 4801 * tells kmem which of the two buffers to free:
4802 4802 *
4803 4803 * YES kmem frees the old buffer (the move was successful)
4804 4804 * NO kmem frees the new buffer, marks the slab of the old buffer
4805 4805 * non-reclaimable to avoid bothering the client again
4806 4806 * LATER kmem frees the new buffer, increments slab_later_count
4807 4807 * DONT_KNOW kmem frees the new buffer, searches mags for the old buffer
4808 4808 * DONT_NEED kmem frees both the old buffer and the new buffer
4809 4809 *
4810 4810 * The pending callback argument now being processed contains both of the
4811 4811 * buffers (old and new) passed to the move callback function, the slab of the
4812 4812 * old buffer, and flags related to the move request, such as whether or not the
4813 4813 * system was desperate for memory.
4814 4814 *
4815 4815 * Slabs are not freed while there is a pending callback, but instead are kept
4816 4816 * on a deadlist, which is drained after the last callback completes. This means
4817 4817 * that slabs are safe to access until kmem_move_end(), no matter how many of
4818 4818 * their buffers have been freed. Once slab_refcnt reaches zero, it stays at
4819 4819 * zero for as long as the slab remains on the deadlist and until the slab is
4820 4820 * freed.
4821 4821 */
4822 4822 static void
4823 4823 kmem_move_buffer(kmem_move_t *callback)
4824 4824 {
4825 4825 kmem_cbrc_t response;
4826 4826 kmem_slab_t *sp = callback->kmm_from_slab;
4827 4827 kmem_cache_t *cp = sp->slab_cache;
4828 4828 boolean_t free_on_slab;
4829 4829
4830 4830 ASSERT(taskq_member(kmem_move_taskq, curthread));
4831 4831 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4832 4832 ASSERT(KMEM_SLAB_MEMBER(sp, callback->kmm_from_buf));
4833 4833
4834 4834 /*
4835 4835 * The number of allocated buffers on the slab may have changed since we
4836 4836 * last checked the slab's reclaimability (when the pending move was
4837 4837 * enqueued), or the client may have responded NO when asked to move
4838 4838 * another buffer on the same slab.
4839 4839 */
4840 4840 if (!kmem_slab_is_reclaimable(cp, sp, callback->kmm_flags)) {
4841 4841 KMEM_STAT_ADD(kmem_move_stats.kms_no_longer_reclaimable);
4842 4842 KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4843 4843 kmem_move_stats.kms_notify_no_longer_reclaimable);
4844 4844 kmem_slab_free(cp, callback->kmm_to_buf);
4845 4845 kmem_move_end(cp, callback);
4846 4846 return;
4847 4847 }
4848 4848
4849 4849 /*
4850 4850 * Hunting magazines is expensive, so we'll wait to do that until the
4851 4851 * client responds KMEM_CBRC_DONT_KNOW. However, checking the slab layer
4852 4852 * is cheap, so we might as well do that here in case we can avoid
4853 4853 * bothering the client.
4854 4854 */
4855 4855 mutex_enter(&cp->cache_lock);
4856 4856 free_on_slab = (kmem_slab_allocated(cp, sp,
4857 4857 callback->kmm_from_buf) == NULL);
4858 4858 mutex_exit(&cp->cache_lock);
4859 4859
4860 4860 if (free_on_slab) {
4861 4861 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab);
4862 4862 kmem_slab_free(cp, callback->kmm_to_buf);
4863 4863 kmem_move_end(cp, callback);
4864 4864 return;
4865 4865 }
4866 4866
4867 4867 if (cp->cache_flags & KMF_BUFTAG) {
4868 4868 /*
4869 4869 * Make kmem_cache_alloc_debug() apply the constructor for us.
↓ open down ↓ |
2253 lines elided |
↑ open up ↑ |
4870 4870 */
4871 4871 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4872 4872 KM_NOSLEEP, 1, caller()) != 0) {
4873 4873 KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail);
4874 4874 kmem_move_end(cp, callback);
4875 4875 return;
4876 4876 }
4877 4877 } else if (cp->cache_constructor != NULL &&
4878 4878 cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4879 4879 KM_NOSLEEP) != 0) {
4880 - atomic_add_64(&cp->cache_alloc_fail, 1);
4880 + atomic_inc_64(&cp->cache_alloc_fail);
4881 4881 KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail);
4882 4882 kmem_slab_free(cp, callback->kmm_to_buf);
4883 4883 kmem_move_end(cp, callback);
4884 4884 return;
4885 4885 }
4886 4886
4887 4887 KMEM_STAT_ADD(kmem_move_stats.kms_callbacks);
4888 4888 KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4889 4889 kmem_move_stats.kms_notify_callbacks);
4890 4890 cp->cache_defrag->kmd_callbacks++;
4891 4891 cp->cache_defrag->kmd_thread = curthread;
4892 4892 cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4893 4893 cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4894 4894 DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4895 4895 callback);
4896 4896
4897 4897 response = cp->cache_move(callback->kmm_from_buf,
4898 4898 callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4899 4899
4900 4900 DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,
4901 4901 callback, kmem_cbrc_t, response);
4902 4902 cp->cache_defrag->kmd_thread = NULL;
4903 4903 cp->cache_defrag->kmd_from_buf = NULL;
4904 4904 cp->cache_defrag->kmd_to_buf = NULL;
4905 4905
4906 4906 if (response == KMEM_CBRC_YES) {
4907 4907 KMEM_STAT_ADD(kmem_move_stats.kms_yes);
4908 4908 cp->cache_defrag->kmd_yes++;
4909 4909 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4910 4910 /* slab safe to access until kmem_move_end() */
4911 4911 if (sp->slab_refcnt == 0)
4912 4912 cp->cache_defrag->kmd_slabs_freed++;
4913 4913 mutex_enter(&cp->cache_lock);
4914 4914 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4915 4915 mutex_exit(&cp->cache_lock);
4916 4916 kmem_move_end(cp, callback);
4917 4917 return;
4918 4918 }
4919 4919
4920 4920 switch (response) {
4921 4921 case KMEM_CBRC_NO:
4922 4922 KMEM_STAT_ADD(kmem_move_stats.kms_no);
4923 4923 cp->cache_defrag->kmd_no++;
4924 4924 mutex_enter(&cp->cache_lock);
4925 4925 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4926 4926 mutex_exit(&cp->cache_lock);
4927 4927 break;
4928 4928 case KMEM_CBRC_LATER:
4929 4929 KMEM_STAT_ADD(kmem_move_stats.kms_later);
4930 4930 cp->cache_defrag->kmd_later++;
4931 4931 mutex_enter(&cp->cache_lock);
4932 4932 if (!KMEM_SLAB_IS_PARTIAL(sp)) {
4933 4933 mutex_exit(&cp->cache_lock);
4934 4934 break;
4935 4935 }
4936 4936
4937 4937 if (++sp->slab_later_count >= KMEM_DISBELIEF) {
4938 4938 KMEM_STAT_ADD(kmem_move_stats.kms_disbelief);
4939 4939 kmem_slab_move_no(cp, sp, callback->kmm_from_buf);
4940 4940 } else if (!(sp->slab_flags & KMEM_SLAB_NOMOVE)) {
4941 4941 sp->slab_stuck_offset = KMEM_SLAB_OFFSET(sp,
4942 4942 callback->kmm_from_buf);
4943 4943 }
4944 4944 mutex_exit(&cp->cache_lock);
4945 4945 break;
4946 4946 case KMEM_CBRC_DONT_NEED:
4947 4947 KMEM_STAT_ADD(kmem_move_stats.kms_dont_need);
4948 4948 cp->cache_defrag->kmd_dont_need++;
4949 4949 kmem_slab_free_constructed(cp, callback->kmm_from_buf, B_FALSE);
4950 4950 if (sp->slab_refcnt == 0)
4951 4951 cp->cache_defrag->kmd_slabs_freed++;
4952 4952 mutex_enter(&cp->cache_lock);
4953 4953 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4954 4954 mutex_exit(&cp->cache_lock);
4955 4955 break;
4956 4956 case KMEM_CBRC_DONT_KNOW:
4957 4957 KMEM_STAT_ADD(kmem_move_stats.kms_dont_know);
4958 4958 cp->cache_defrag->kmd_dont_know++;
4959 4959 if (kmem_hunt_mags(cp, callback->kmm_from_buf) != NULL) {
4960 4960 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_mag);
4961 4961 cp->cache_defrag->kmd_hunt_found++;
4962 4962 kmem_slab_free_constructed(cp, callback->kmm_from_buf,
4963 4963 B_TRUE);
4964 4964 if (sp->slab_refcnt == 0)
4965 4965 cp->cache_defrag->kmd_slabs_freed++;
4966 4966 mutex_enter(&cp->cache_lock);
4967 4967 kmem_slab_move_yes(cp, sp, callback->kmm_from_buf);
4968 4968 mutex_exit(&cp->cache_lock);
4969 4969 }
4970 4970 break;
4971 4971 default:
4972 4972 panic("'%s' (%p) unexpected move callback response %d\n",
4973 4973 cp->cache_name, (void *)cp, response);
4974 4974 }
4975 4975
4976 4976 kmem_slab_free_constructed(cp, callback->kmm_to_buf, B_FALSE);
4977 4977 kmem_move_end(cp, callback);
4978 4978 }
4979 4979
4980 4980 /* Return B_FALSE if there is insufficient memory for the move request. */
4981 4981 static boolean_t
4982 4982 kmem_move_begin(kmem_cache_t *cp, kmem_slab_t *sp, void *buf, int flags)
4983 4983 {
4984 4984 void *to_buf;
4985 4985 avl_index_t index;
4986 4986 kmem_move_t *callback, *pending;
4987 4987 ulong_t n;
4988 4988
4989 4989 ASSERT(taskq_member(kmem_taskq, curthread));
4990 4990 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
4991 4991 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
4992 4992
4993 4993 callback = kmem_cache_alloc(kmem_move_cache, KM_NOSLEEP);
4994 4994 if (callback == NULL) {
4995 4995 KMEM_STAT_ADD(kmem_move_stats.kms_callback_alloc_fail);
4996 4996 return (B_FALSE);
4997 4997 }
4998 4998
4999 4999 callback->kmm_from_slab = sp;
5000 5000 callback->kmm_from_buf = buf;
5001 5001 callback->kmm_flags = flags;
5002 5002
5003 5003 mutex_enter(&cp->cache_lock);
5004 5004
5005 5005 n = avl_numnodes(&cp->cache_partial_slabs);
5006 5006 if ((n == 0) || ((n == 1) && !(flags & KMM_DEBUG))) {
5007 5007 mutex_exit(&cp->cache_lock);
5008 5008 kmem_cache_free(kmem_move_cache, callback);
5009 5009 return (B_TRUE); /* there is no need for the move request */
5010 5010 }
5011 5011
5012 5012 pending = avl_find(&cp->cache_defrag->kmd_moves_pending, buf, &index);
5013 5013 if (pending != NULL) {
5014 5014 /*
5015 5015 * If the move is already pending and we're desperate now,
5016 5016 * update the move flags.
5017 5017 */
5018 5018 if (flags & KMM_DESPERATE) {
5019 5019 pending->kmm_flags |= KMM_DESPERATE;
5020 5020 }
5021 5021 mutex_exit(&cp->cache_lock);
5022 5022 KMEM_STAT_ADD(kmem_move_stats.kms_already_pending);
5023 5023 kmem_cache_free(kmem_move_cache, callback);
5024 5024 return (B_TRUE);
5025 5025 }
5026 5026
5027 5027 to_buf = kmem_slab_alloc_impl(cp, avl_first(&cp->cache_partial_slabs),
5028 5028 B_FALSE);
5029 5029 callback->kmm_to_buf = to_buf;
5030 5030 avl_insert(&cp->cache_defrag->kmd_moves_pending, callback, index);
5031 5031
5032 5032 mutex_exit(&cp->cache_lock);
5033 5033
5034 5034 if (!taskq_dispatch(kmem_move_taskq, (task_func_t *)kmem_move_buffer,
5035 5035 callback, TQ_NOSLEEP)) {
5036 5036 KMEM_STAT_ADD(kmem_move_stats.kms_callback_taskq_fail);
5037 5037 mutex_enter(&cp->cache_lock);
5038 5038 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5039 5039 mutex_exit(&cp->cache_lock);
5040 5040 kmem_slab_free(cp, to_buf);
5041 5041 kmem_cache_free(kmem_move_cache, callback);
5042 5042 return (B_FALSE);
5043 5043 }
5044 5044
5045 5045 return (B_TRUE);
5046 5046 }
5047 5047
5048 5048 static void
5049 5049 kmem_move_end(kmem_cache_t *cp, kmem_move_t *callback)
5050 5050 {
5051 5051 avl_index_t index;
5052 5052
5053 5053 ASSERT(cp->cache_defrag != NULL);
5054 5054 ASSERT(taskq_member(kmem_move_taskq, curthread));
5055 5055 ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
5056 5056
5057 5057 mutex_enter(&cp->cache_lock);
5058 5058 VERIFY(avl_find(&cp->cache_defrag->kmd_moves_pending,
5059 5059 callback->kmm_from_buf, &index) != NULL);
5060 5060 avl_remove(&cp->cache_defrag->kmd_moves_pending, callback);
5061 5061 if (avl_is_empty(&cp->cache_defrag->kmd_moves_pending)) {
5062 5062 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5063 5063 kmem_slab_t *sp;
5064 5064
5065 5065 /*
5066 5066 * The last pending move completed. Release all slabs from the
5067 5067 * front of the dead list except for any slab at the tail that
5068 5068 * needs to be released from the context of kmem_move_buffers().
5069 5069 * kmem deferred unmapping the buffers on these slabs in order
5070 5070 * to guarantee that buffers passed to the move callback have
5071 5071 * been touched only by kmem or by the client itself.
5072 5072 */
5073 5073 while ((sp = list_remove_head(deadlist)) != NULL) {
5074 5074 if (sp->slab_flags & KMEM_SLAB_MOVE_PENDING) {
5075 5075 list_insert_tail(deadlist, sp);
5076 5076 break;
5077 5077 }
5078 5078 cp->cache_defrag->kmd_deadcount--;
5079 5079 cp->cache_slab_destroy++;
5080 5080 mutex_exit(&cp->cache_lock);
5081 5081 kmem_slab_destroy(cp, sp);
5082 5082 KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
5083 5083 mutex_enter(&cp->cache_lock);
5084 5084 }
5085 5085 }
5086 5086 mutex_exit(&cp->cache_lock);
5087 5087 kmem_cache_free(kmem_move_cache, callback);
5088 5088 }
5089 5089
5090 5090 /*
5091 5091 * Move buffers from least used slabs first by scanning backwards from the end
5092 5092 * of the partial slab list. Scan at most max_scan candidate slabs and move
5093 5093 * buffers from at most max_slabs slabs (0 for all partial slabs in both cases).
5094 5094 * If desperate to reclaim memory, move buffers from any partial slab, otherwise
5095 5095 * skip slabs with a ratio of allocated buffers at or above the current
5096 5096 * threshold. Return the number of unskipped slabs (at most max_slabs, -1 if the
5097 5097 * scan is aborted) so that the caller can adjust the reclaimability threshold
5098 5098 * depending on how many reclaimable slabs it finds.
5099 5099 *
5100 5100 * kmem_move_buffers() drops and reacquires cache_lock every time it issues a
5101 5101 * move request, since it is not valid for kmem_move_begin() to call
5102 5102 * kmem_cache_alloc() or taskq_dispatch() with cache_lock held.
5103 5103 */
5104 5104 static int
5105 5105 kmem_move_buffers(kmem_cache_t *cp, size_t max_scan, size_t max_slabs,
5106 5106 int flags)
5107 5107 {
5108 5108 kmem_slab_t *sp;
5109 5109 void *buf;
5110 5110 int i, j; /* slab index, buffer index */
5111 5111 int s; /* reclaimable slabs */
5112 5112 int b; /* allocated (movable) buffers on reclaimable slab */
5113 5113 boolean_t success;
5114 5114 int refcnt;
5115 5115 int nomove;
5116 5116
5117 5117 ASSERT(taskq_member(kmem_taskq, curthread));
5118 5118 ASSERT(MUTEX_HELD(&cp->cache_lock));
5119 5119 ASSERT(kmem_move_cache != NULL);
5120 5120 ASSERT(cp->cache_move != NULL && cp->cache_defrag != NULL);
5121 5121 ASSERT((flags & KMM_DEBUG) ? !avl_is_empty(&cp->cache_partial_slabs) :
5122 5122 avl_numnodes(&cp->cache_partial_slabs) > 1);
5123 5123
5124 5124 if (kmem_move_blocked) {
5125 5125 return (0);
5126 5126 }
5127 5127
5128 5128 if (kmem_move_fulltilt) {
5129 5129 flags |= KMM_DESPERATE;
5130 5130 }
5131 5131
5132 5132 if (max_scan == 0 || (flags & KMM_DESPERATE)) {
5133 5133 /*
5134 5134 * Scan as many slabs as needed to find the desired number of
5135 5135 * candidate slabs.
5136 5136 */
5137 5137 max_scan = (size_t)-1;
5138 5138 }
5139 5139
5140 5140 if (max_slabs == 0 || (flags & KMM_DESPERATE)) {
5141 5141 /* Find as many candidate slabs as possible. */
5142 5142 max_slabs = (size_t)-1;
5143 5143 }
5144 5144
5145 5145 sp = avl_last(&cp->cache_partial_slabs);
5146 5146 ASSERT(KMEM_SLAB_IS_PARTIAL(sp));
5147 5147 for (i = 0, s = 0; (i < max_scan) && (s < max_slabs) && (sp != NULL) &&
5148 5148 ((sp != avl_first(&cp->cache_partial_slabs)) ||
5149 5149 (flags & KMM_DEBUG));
5150 5150 sp = AVL_PREV(&cp->cache_partial_slabs, sp), i++) {
5151 5151
5152 5152 if (!kmem_slab_is_reclaimable(cp, sp, flags)) {
5153 5153 continue;
5154 5154 }
5155 5155 s++;
5156 5156
5157 5157 /* Look for allocated buffers to move. */
5158 5158 for (j = 0, b = 0, buf = sp->slab_base;
5159 5159 (j < sp->slab_chunks) && (b < sp->slab_refcnt);
5160 5160 buf = (((char *)buf) + cp->cache_chunksize), j++) {
5161 5161
5162 5162 if (kmem_slab_allocated(cp, sp, buf) == NULL) {
5163 5163 continue;
5164 5164 }
5165 5165
5166 5166 b++;
5167 5167
5168 5168 /*
5169 5169 * Prevent the slab from being destroyed while we drop
5170 5170 * cache_lock and while the pending move is not yet
5171 5171 * registered. Flag the pending move while
5172 5172 * kmd_moves_pending may still be empty, since we can't
5173 5173 * yet rely on a non-zero pending move count to prevent
5174 5174 * the slab from being destroyed.
5175 5175 */
5176 5176 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5177 5177 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5178 5178 /*
5179 5179 * Recheck refcnt and nomove after reacquiring the lock,
5180 5180 * since these control the order of partial slabs, and
5181 5181 * we want to know if we can pick up the scan where we
5182 5182 * left off.
5183 5183 */
5184 5184 refcnt = sp->slab_refcnt;
5185 5185 nomove = (sp->slab_flags & KMEM_SLAB_NOMOVE);
5186 5186 mutex_exit(&cp->cache_lock);
5187 5187
5188 5188 success = kmem_move_begin(cp, sp, buf, flags);
5189 5189
5190 5190 /*
5191 5191 * Now, before the lock is reacquired, kmem could
5192 5192 * process all pending move requests and purge the
5193 5193 * deadlist, so that upon reacquiring the lock, sp has
5194 5194 * been remapped. Or, the client may free all the
5195 5195 * objects on the slab while the pending moves are still
5196 5196 * on the taskq. Therefore, the KMEM_SLAB_MOVE_PENDING
5197 5197 * flag causes the slab to be put at the end of the
5198 5198 * deadlist and prevents it from being destroyed, since
5199 5199 * we plan to destroy it here after reacquiring the
5200 5200 * lock.
5201 5201 */
5202 5202 mutex_enter(&cp->cache_lock);
5203 5203 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5204 5204 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5205 5205
5206 5206 if (sp->slab_refcnt == 0) {
5207 5207 list_t *deadlist =
5208 5208 &cp->cache_defrag->kmd_deadlist;
5209 5209 list_remove(deadlist, sp);
5210 5210
5211 5211 if (!avl_is_empty(
5212 5212 &cp->cache_defrag->kmd_moves_pending)) {
5213 5213 /*
5214 5214 * A pending move makes it unsafe to
5215 5215 * destroy the slab, because even though
5216 5216 * the move is no longer needed, the
5217 5217 * context where that is determined
5218 5218 * requires the slab to exist.
5219 5219 * Fortunately, a pending move also
5220 5220 * means we don't need to destroy the
5221 5221 * slab here, since it will get
5222 5222 * destroyed along with any other slabs
5223 5223 * on the deadlist after the last
5224 5224 * pending move completes.
5225 5225 */
5226 5226 list_insert_head(deadlist, sp);
5227 5227 KMEM_STAT_ADD(kmem_move_stats.
5228 5228 kms_endscan_slab_dead);
5229 5229 return (-1);
5230 5230 }
5231 5231
5232 5232 /*
5233 5233 * Destroy the slab now if it was completely
5234 5234 * freed while we dropped cache_lock and there
5235 5235 * are no pending moves. Since slab_refcnt
5236 5236 * cannot change once it reaches zero, no new
5237 5237 * pending moves from that slab are possible.
5238 5238 */
5239 5239 cp->cache_defrag->kmd_deadcount--;
5240 5240 cp->cache_slab_destroy++;
5241 5241 mutex_exit(&cp->cache_lock);
5242 5242 kmem_slab_destroy(cp, sp);
5243 5243 KMEM_STAT_ADD(kmem_move_stats.
5244 5244 kms_dead_slabs_freed);
5245 5245 KMEM_STAT_ADD(kmem_move_stats.
5246 5246 kms_endscan_slab_destroyed);
5247 5247 mutex_enter(&cp->cache_lock);
5248 5248 /*
5249 5249 * Since we can't pick up the scan where we left
5250 5250 * off, abort the scan and say nothing about the
5251 5251 * number of reclaimable slabs.
5252 5252 */
5253 5253 return (-1);
5254 5254 }
5255 5255
5256 5256 if (!success) {
5257 5257 /*
5258 5258 * Abort the scan if there is not enough memory
5259 5259 * for the request and say nothing about the
5260 5260 * number of reclaimable slabs.
5261 5261 */
5262 5262 KMEM_STAT_COND_ADD(s < max_slabs,
5263 5263 kmem_move_stats.kms_endscan_nomem);
5264 5264 return (-1);
5265 5265 }
5266 5266
5267 5267 /*
5268 5268 * The slab's position changed while the lock was
5269 5269 * dropped, so we don't know where we are in the
5270 5270 * sequence any more.
5271 5271 */
5272 5272 if (sp->slab_refcnt != refcnt) {
5273 5273 /*
5274 5274 * If this is a KMM_DEBUG move, the slab_refcnt
5275 5275 * may have changed because we allocated a
5276 5276 * destination buffer on the same slab. In that
5277 5277 * case, we're not interested in counting it.
5278 5278 */
5279 5279 KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) &&
5280 5280 (s < max_slabs),
5281 5281 kmem_move_stats.kms_endscan_refcnt_changed);
5282 5282 return (-1);
5283 5283 }
5284 5284 if ((sp->slab_flags & KMEM_SLAB_NOMOVE) != nomove) {
5285 5285 KMEM_STAT_COND_ADD(s < max_slabs,
5286 5286 kmem_move_stats.kms_endscan_nomove_changed);
5287 5287 return (-1);
5288 5288 }
5289 5289
5290 5290 /*
5291 5291 * Generating a move request allocates a destination
5292 5292 * buffer from the slab layer, bumping the first partial
5293 5293 * slab if it is completely allocated. If the current
5294 5294 * slab becomes the first partial slab as a result, we
5295 5295 * can't continue to scan backwards.
5296 5296 *
5297 5297 * If this is a KMM_DEBUG move and we allocated the
5298 5298 * destination buffer from the last partial slab, then
5299 5299 * the buffer we're moving is on the same slab and our
5300 5300 * slab_refcnt has changed, causing us to return before
5301 5301 * reaching here if there are no partial slabs left.
5302 5302 */
5303 5303 ASSERT(!avl_is_empty(&cp->cache_partial_slabs));
5304 5304 if (sp == avl_first(&cp->cache_partial_slabs)) {
5305 5305 /*
5306 5306 * We're not interested in a second KMM_DEBUG
5307 5307 * move.
5308 5308 */
5309 5309 goto end_scan;
5310 5310 }
5311 5311 }
5312 5312 }
5313 5313 end_scan:
5314 5314
5315 5315 KMEM_STAT_COND_ADD(!(flags & KMM_DEBUG) &&
5316 5316 (s < max_slabs) &&
5317 5317 (sp == avl_first(&cp->cache_partial_slabs)),
5318 5318 kmem_move_stats.kms_endscan_freelist);
5319 5319
5320 5320 return (s);
5321 5321 }
5322 5322
5323 5323 typedef struct kmem_move_notify_args {
5324 5324 kmem_cache_t *kmna_cache;
5325 5325 void *kmna_buf;
5326 5326 } kmem_move_notify_args_t;
5327 5327
5328 5328 static void
5329 5329 kmem_cache_move_notify_task(void *arg)
5330 5330 {
5331 5331 kmem_move_notify_args_t *args = arg;
5332 5332 kmem_cache_t *cp = args->kmna_cache;
5333 5333 void *buf = args->kmna_buf;
5334 5334 kmem_slab_t *sp;
5335 5335
5336 5336 ASSERT(taskq_member(kmem_taskq, curthread));
5337 5337 ASSERT(list_link_active(&cp->cache_link));
5338 5338
5339 5339 kmem_free(args, sizeof (kmem_move_notify_args_t));
5340 5340 mutex_enter(&cp->cache_lock);
5341 5341 sp = kmem_slab_allocated(cp, NULL, buf);
5342 5342
5343 5343 /* Ignore the notification if the buffer is no longer allocated. */
5344 5344 if (sp == NULL) {
5345 5345 mutex_exit(&cp->cache_lock);
5346 5346 return;
5347 5347 }
5348 5348
5349 5349 /* Ignore the notification if there's no reason to move the buffer. */
5350 5350 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5351 5351 /*
5352 5352 * So far the notification is not ignored. Ignore the
5353 5353 * notification if the slab is not marked by an earlier refusal
5354 5354 * to move a buffer.
5355 5355 */
5356 5356 if (!(sp->slab_flags & KMEM_SLAB_NOMOVE) &&
5357 5357 (sp->slab_later_count == 0)) {
5358 5358 mutex_exit(&cp->cache_lock);
5359 5359 return;
5360 5360 }
5361 5361
5362 5362 kmem_slab_move_yes(cp, sp, buf);
5363 5363 ASSERT(!(sp->slab_flags & KMEM_SLAB_MOVE_PENDING));
5364 5364 sp->slab_flags |= KMEM_SLAB_MOVE_PENDING;
5365 5365 mutex_exit(&cp->cache_lock);
5366 5366 /* see kmem_move_buffers() about dropping the lock */
5367 5367 (void) kmem_move_begin(cp, sp, buf, KMM_NOTIFY);
5368 5368 mutex_enter(&cp->cache_lock);
5369 5369 ASSERT(sp->slab_flags & KMEM_SLAB_MOVE_PENDING);
5370 5370 sp->slab_flags &= ~KMEM_SLAB_MOVE_PENDING;
5371 5371 if (sp->slab_refcnt == 0) {
5372 5372 list_t *deadlist = &cp->cache_defrag->kmd_deadlist;
5373 5373 list_remove(deadlist, sp);
5374 5374
5375 5375 if (!avl_is_empty(
5376 5376 &cp->cache_defrag->kmd_moves_pending)) {
5377 5377 list_insert_head(deadlist, sp);
5378 5378 mutex_exit(&cp->cache_lock);
5379 5379 KMEM_STAT_ADD(kmem_move_stats.
5380 5380 kms_notify_slab_dead);
5381 5381 return;
5382 5382 }
5383 5383
5384 5384 cp->cache_defrag->kmd_deadcount--;
5385 5385 cp->cache_slab_destroy++;
5386 5386 mutex_exit(&cp->cache_lock);
5387 5387 kmem_slab_destroy(cp, sp);
5388 5388 KMEM_STAT_ADD(kmem_move_stats.kms_dead_slabs_freed);
5389 5389 KMEM_STAT_ADD(kmem_move_stats.
5390 5390 kms_notify_slab_destroyed);
5391 5391 return;
5392 5392 }
5393 5393 } else {
5394 5394 kmem_slab_move_yes(cp, sp, buf);
5395 5395 }
5396 5396 mutex_exit(&cp->cache_lock);
5397 5397 }
5398 5398
5399 5399 void
5400 5400 kmem_cache_move_notify(kmem_cache_t *cp, void *buf)
5401 5401 {
5402 5402 kmem_move_notify_args_t *args;
5403 5403
5404 5404 KMEM_STAT_ADD(kmem_move_stats.kms_notify);
5405 5405 args = kmem_alloc(sizeof (kmem_move_notify_args_t), KM_NOSLEEP);
5406 5406 if (args != NULL) {
5407 5407 args->kmna_cache = cp;
5408 5408 args->kmna_buf = buf;
5409 5409 if (!taskq_dispatch(kmem_taskq,
5410 5410 (task_func_t *)kmem_cache_move_notify_task, args,
5411 5411 TQ_NOSLEEP))
5412 5412 kmem_free(args, sizeof (kmem_move_notify_args_t));
5413 5413 }
5414 5414 }
5415 5415
5416 5416 static void
5417 5417 kmem_cache_defrag(kmem_cache_t *cp)
5418 5418 {
5419 5419 size_t n;
5420 5420
5421 5421 ASSERT(cp->cache_defrag != NULL);
5422 5422
5423 5423 mutex_enter(&cp->cache_lock);
5424 5424 n = avl_numnodes(&cp->cache_partial_slabs);
5425 5425 if (n > 1) {
5426 5426 /* kmem_move_buffers() drops and reacquires cache_lock */
5427 5427 KMEM_STAT_ADD(kmem_move_stats.kms_defrags);
5428 5428 cp->cache_defrag->kmd_defrags++;
5429 5429 (void) kmem_move_buffers(cp, n, 0, KMM_DESPERATE);
5430 5430 }
5431 5431 mutex_exit(&cp->cache_lock);
5432 5432 }
5433 5433
5434 5434 /* Is this cache above the fragmentation threshold? */
5435 5435 static boolean_t
5436 5436 kmem_cache_frag_threshold(kmem_cache_t *cp, uint64_t nfree)
5437 5437 {
5438 5438 /*
5439 5439 * nfree kmem_frag_numer
5440 5440 * ------------------ > ---------------
5441 5441 * cp->cache_buftotal kmem_frag_denom
5442 5442 */
5443 5443 return ((nfree * kmem_frag_denom) >
5444 5444 (cp->cache_buftotal * kmem_frag_numer));
5445 5445 }
5446 5446
5447 5447 static boolean_t
5448 5448 kmem_cache_is_fragmented(kmem_cache_t *cp, boolean_t *doreap)
5449 5449 {
5450 5450 boolean_t fragmented;
5451 5451 uint64_t nfree;
5452 5452
5453 5453 ASSERT(MUTEX_HELD(&cp->cache_lock));
5454 5454 *doreap = B_FALSE;
5455 5455
5456 5456 if (kmem_move_fulltilt) {
5457 5457 if (avl_numnodes(&cp->cache_partial_slabs) > 1) {
5458 5458 return (B_TRUE);
5459 5459 }
5460 5460 } else {
5461 5461 if ((cp->cache_complete_slab_count + avl_numnodes(
5462 5462 &cp->cache_partial_slabs)) < kmem_frag_minslabs) {
5463 5463 return (B_FALSE);
5464 5464 }
5465 5465 }
5466 5466
5467 5467 nfree = cp->cache_bufslab;
5468 5468 fragmented = ((avl_numnodes(&cp->cache_partial_slabs) > 1) &&
5469 5469 kmem_cache_frag_threshold(cp, nfree));
5470 5470
5471 5471 /*
5472 5472 * Free buffers in the magazine layer appear allocated from the point of
5473 5473 * view of the slab layer. We want to know if the slab layer would
5474 5474 * appear fragmented if we included free buffers from magazines that
5475 5475 * have fallen out of the working set.
5476 5476 */
5477 5477 if (!fragmented) {
5478 5478 long reap;
5479 5479
5480 5480 mutex_enter(&cp->cache_depot_lock);
5481 5481 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
5482 5482 reap = MIN(reap, cp->cache_full.ml_total);
5483 5483 mutex_exit(&cp->cache_depot_lock);
5484 5484
5485 5485 nfree += ((uint64_t)reap * cp->cache_magtype->mt_magsize);
5486 5486 if (kmem_cache_frag_threshold(cp, nfree)) {
5487 5487 *doreap = B_TRUE;
5488 5488 }
5489 5489 }
5490 5490
5491 5491 return (fragmented);
5492 5492 }
5493 5493
5494 5494 /* Called periodically from kmem_taskq */
5495 5495 static void
5496 5496 kmem_cache_scan(kmem_cache_t *cp)
5497 5497 {
5498 5498 boolean_t reap = B_FALSE;
5499 5499 kmem_defrag_t *kmd;
5500 5500
5501 5501 ASSERT(taskq_member(kmem_taskq, curthread));
5502 5502
5503 5503 mutex_enter(&cp->cache_lock);
5504 5504
5505 5505 kmd = cp->cache_defrag;
5506 5506 if (kmd->kmd_consolidate > 0) {
5507 5507 kmd->kmd_consolidate--;
5508 5508 mutex_exit(&cp->cache_lock);
5509 5509 kmem_cache_reap(cp);
5510 5510 return;
5511 5511 }
5512 5512
5513 5513 if (kmem_cache_is_fragmented(cp, &reap)) {
5514 5514 size_t slabs_found;
5515 5515
5516 5516 /*
5517 5517 * Consolidate reclaimable slabs from the end of the partial
5518 5518 * slab list (scan at most kmem_reclaim_scan_range slabs to find
5519 5519 * reclaimable slabs). Keep track of how many candidate slabs we
5520 5520 * looked for and how many we actually found so we can adjust
5521 5521 * the definition of a candidate slab if we're having trouble
5522 5522 * finding them.
5523 5523 *
5524 5524 * kmem_move_buffers() drops and reacquires cache_lock.
5525 5525 */
5526 5526 KMEM_STAT_ADD(kmem_move_stats.kms_scans);
5527 5527 kmd->kmd_scans++;
5528 5528 slabs_found = kmem_move_buffers(cp, kmem_reclaim_scan_range,
5529 5529 kmem_reclaim_max_slabs, 0);
5530 5530 if (slabs_found >= 0) {
5531 5531 kmd->kmd_slabs_sought += kmem_reclaim_max_slabs;
5532 5532 kmd->kmd_slabs_found += slabs_found;
5533 5533 }
5534 5534
5535 5535 if (++kmd->kmd_tries >= kmem_reclaim_scan_range) {
5536 5536 kmd->kmd_tries = 0;
5537 5537
5538 5538 /*
5539 5539 * If we had difficulty finding candidate slabs in
5540 5540 * previous scans, adjust the threshold so that
5541 5541 * candidates are easier to find.
5542 5542 */
5543 5543 if (kmd->kmd_slabs_found == kmd->kmd_slabs_sought) {
5544 5544 kmem_adjust_reclaim_threshold(kmd, -1);
5545 5545 } else if ((kmd->kmd_slabs_found * 2) <
5546 5546 kmd->kmd_slabs_sought) {
5547 5547 kmem_adjust_reclaim_threshold(kmd, 1);
5548 5548 }
5549 5549 kmd->kmd_slabs_sought = 0;
5550 5550 kmd->kmd_slabs_found = 0;
5551 5551 }
5552 5552 } else {
5553 5553 kmem_reset_reclaim_threshold(cp->cache_defrag);
5554 5554 #ifdef DEBUG
5555 5555 if (!avl_is_empty(&cp->cache_partial_slabs)) {
5556 5556 /*
5557 5557 * In a debug kernel we want the consolidator to
5558 5558 * run occasionally even when there is plenty of
5559 5559 * memory.
5560 5560 */
5561 5561 uint16_t debug_rand;
5562 5562
5563 5563 (void) random_get_bytes((uint8_t *)&debug_rand, 2);
5564 5564 if (!kmem_move_noreap &&
5565 5565 ((debug_rand % kmem_mtb_reap) == 0)) {
5566 5566 mutex_exit(&cp->cache_lock);
5567 5567 KMEM_STAT_ADD(kmem_move_stats.kms_debug_reaps);
5568 5568 kmem_cache_reap(cp);
5569 5569 return;
5570 5570 } else if ((debug_rand % kmem_mtb_move) == 0) {
5571 5571 KMEM_STAT_ADD(kmem_move_stats.kms_scans);
5572 5572 KMEM_STAT_ADD(kmem_move_stats.kms_debug_scans);
5573 5573 kmd->kmd_scans++;
5574 5574 (void) kmem_move_buffers(cp,
5575 5575 kmem_reclaim_scan_range, 1, KMM_DEBUG);
5576 5576 }
5577 5577 }
5578 5578 #endif /* DEBUG */
5579 5579 }
5580 5580
5581 5581 mutex_exit(&cp->cache_lock);
5582 5582
5583 5583 if (reap) {
5584 5584 KMEM_STAT_ADD(kmem_move_stats.kms_scan_depot_ws_reaps);
5585 5585 kmem_depot_ws_reap(cp);
5586 5586 }
5587 5587 }
↓ open down ↓ |
697 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX