Print this page
6146 seg_inherit_notsup is redundant
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kp.c
+++ new/usr/src/uts/common/vm/seg_kp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 30 * under license from the Regents of the University of California.
31 31 */
32 32
33 33 /*
34 34 * segkp is a segment driver that administers the allocation and deallocation
35 35 * of pageable variable size chunks of kernel virtual address space. Each
36 36 * allocated resource is page-aligned.
37 37 *
38 38 * The user may specify whether the resource should be initialized to 0,
39 39 * include a redzone, or locked in memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/thread.h>
45 45 #include <sys/param.h>
46 46 #include <sys/errno.h>
47 47 #include <sys/sysmacros.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/buf.h>
50 50 #include <sys/mman.h>
51 51 #include <sys/vnode.h>
52 52 #include <sys/cmn_err.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/tuneable.h>
55 55 #include <sys/kmem.h>
56 56 #include <sys/vmem.h>
57 57 #include <sys/cred.h>
58 58 #include <sys/dumphdr.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/vtrace.h>
61 61 #include <sys/stack.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/archsystm.h>
64 64 #include <sys/lgrp.h>
65 65
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_kp.h>
69 69 #include <vm/seg_kmem.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/hat.h>
73 73 #include <sys/bitmap.h>
74 74
75 75 /*
76 76 * Private seg op routines
77 77 */
78 78 static void segkp_badop(void);
79 79 static void segkp_dump(struct seg *seg);
80 80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 81 uint_t prot);
82 82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 84 struct page ***page, enum lock_type type,
85 85 enum seg_rw rw);
86 86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
87 87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
88 88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 89 struct segkp_data **tkpd, struct anon_map *amp);
90 90 static void segkp_release_internal(struct seg *seg,
91 91 struct segkp_data *kpd, size_t len);
92 92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 93 size_t len, struct segkp_data *kpd, uint_t flags);
94 94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 95 size_t len, struct segkp_data *kpd, uint_t flags);
96 96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97 97 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
98 98 static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg,
99 99 caddr_t addr);
100 100 static int segkp_capable(struct seg *seg, segcapability_t capability);
101 101
102 102 /*
103 103 * Lock used to protect the hash table(s) and caches.
104 104 */
105 105 static kmutex_t segkp_lock;
106 106
107 107 /*
108 108 * The segkp caches
109 109 */
110 110 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
111 111
112 112 #define SEGKP_BADOP(t) (t(*)())segkp_badop
113 113
114 114 /*
115 115 * When there are fewer than red_minavail bytes left on the stack,
116 116 * segkp_map_red() will map in the redzone (if called). 5000 seems
117 117 * to work reasonably well...
118 118 */
119 119 long red_minavail = 5000;
120 120
121 121 /*
122 122 * will be set to 1 for 32 bit x86 systems only, in startup.c
123 123 */
124 124 int segkp_fromheap = 0;
125 125 ulong_t *segkp_bitmap;
126 126
127 127 /*
128 128 * If segkp_map_red() is called with the redzone already mapped and
129 129 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
130 130 * then the stack situation has become quite serious; if much more stack
131 131 * is consumed, we have the potential of scrogging the next thread/LWP
132 132 * structure. To help debug the "can't happen" panics which may
133 133 * result from this condition, we record hrestime and the calling thread
134 134 * in red_deep_hires and red_deep_thread respectively.
135 135 */
136 136 #define RED_DEEP_THRESHOLD 2000
137 137
138 138 hrtime_t red_deep_hires;
139 139 kthread_t *red_deep_thread;
140 140
141 141 uint32_t red_nmapped;
142 142 uint32_t red_closest = UINT_MAX;
143 143 uint32_t red_ndoubles;
144 144
145 145 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
146 146 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
147 147
148 148 static struct seg_ops segkp_ops = {
149 149 .dup = SEGKP_BADOP(int),
150 150 .unmap = SEGKP_BADOP(int),
151 151 .free = SEGKP_BADOP(void),
152 152 .fault = segkp_fault,
153 153 .faulta = SEGKP_BADOP(faultcode_t),
154 154 .setprot = SEGKP_BADOP(int),
155 155 .checkprot = segkp_checkprot,
156 156 .kluster = segkp_kluster,
157 157 .swapout = SEGKP_BADOP(size_t),
158 158 .sync = SEGKP_BADOP(int),
159 159 .incore = SEGKP_BADOP(size_t),
160 160 .lockop = SEGKP_BADOP(int),
161 161 .getprot = SEGKP_BADOP(int),
↓ open down ↓ |
161 lines elided |
↑ open up ↑ |
162 162 .getoffset = SEGKP_BADOP(u_offset_t),
163 163 .gettype = SEGKP_BADOP(int),
164 164 .getvp = SEGKP_BADOP(int),
165 165 .advise = SEGKP_BADOP(int),
166 166 .dump = segkp_dump,
167 167 .pagelock = segkp_pagelock,
168 168 .setpagesize = SEGKP_BADOP(int),
169 169 .getmemid = segkp_getmemid,
170 170 .getpolicy = segkp_getpolicy,
171 171 .capable = segkp_capable,
172 - .inherit = seg_inherit_notsup,
173 172 };
174 173
175 174
176 175 static void
177 176 segkp_badop(void)
178 177 {
179 178 panic("segkp_badop");
180 179 /*NOTREACHED*/
181 180 }
182 181
183 182 static void segkpinit_mem_config(struct seg *);
184 183
185 184 static uint32_t segkp_indel;
186 185
187 186 /*
188 187 * Allocate the segment specific private data struct and fill it in
189 188 * with the per kp segment mutex, anon ptr. array and hash table.
190 189 */
191 190 int
192 191 segkp_create(struct seg *seg)
193 192 {
194 193 struct segkp_segdata *kpsd;
195 194 size_t np;
196 195
197 196 ASSERT(seg != NULL && seg->s_as == &kas);
198 197 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
199 198
200 199 if (seg->s_size & PAGEOFFSET) {
201 200 panic("Bad segkp size");
202 201 /*NOTREACHED*/
203 202 }
204 203
205 204 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
206 205
207 206 /*
208 207 * Allocate the virtual memory for segkp and initialize it
209 208 */
210 209 if (segkp_fromheap) {
211 210 np = btop(kvseg.s_size);
212 211 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
213 212 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
214 213 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
215 214 } else {
216 215 segkp_bitmap = NULL;
217 216 np = btop(seg->s_size);
218 217 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
219 218 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
220 219 VM_SLEEP);
221 220 }
222 221
223 222 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
224 223
225 224 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
226 225 KM_SLEEP);
227 226 seg->s_data = (void *)kpsd;
228 227 seg->s_ops = &segkp_ops;
229 228 segkpinit_mem_config(seg);
230 229 return (0);
231 230 }
232 231
233 232
234 233 /*
235 234 * Find a free 'freelist' and initialize it with the appropriate attributes
236 235 */
237 236 void *
238 237 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
239 238 {
240 239 int i;
241 240
242 241 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
243 242 return ((void *)-1);
244 243
245 244 mutex_enter(&segkp_lock);
246 245 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
247 246 if (segkp_cache[i].kpf_inuse)
248 247 continue;
249 248 segkp_cache[i].kpf_inuse = 1;
250 249 segkp_cache[i].kpf_max = maxsize;
251 250 segkp_cache[i].kpf_flags = flags;
252 251 segkp_cache[i].kpf_seg = seg;
253 252 segkp_cache[i].kpf_len = len;
254 253 mutex_exit(&segkp_lock);
255 254 return ((void *)(uintptr_t)i);
256 255 }
257 256 mutex_exit(&segkp_lock);
258 257 return ((void *)-1);
259 258 }
260 259
261 260 /*
262 261 * Free all the cache resources.
263 262 */
264 263 void
265 264 segkp_cache_free(void)
266 265 {
267 266 struct segkp_data *kpd;
268 267 struct seg *seg;
269 268 int i;
270 269
271 270 mutex_enter(&segkp_lock);
272 271 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
273 272 if (!segkp_cache[i].kpf_inuse)
274 273 continue;
275 274 /*
276 275 * Disconnect the freelist and process each element
277 276 */
278 277 kpd = segkp_cache[i].kpf_list;
279 278 seg = segkp_cache[i].kpf_seg;
280 279 segkp_cache[i].kpf_list = NULL;
281 280 segkp_cache[i].kpf_count = 0;
282 281 mutex_exit(&segkp_lock);
283 282
284 283 while (kpd != NULL) {
285 284 struct segkp_data *next;
286 285
287 286 next = kpd->kp_next;
288 287 segkp_release_internal(seg, kpd, kpd->kp_len);
289 288 kpd = next;
290 289 }
291 290 mutex_enter(&segkp_lock);
292 291 }
293 292 mutex_exit(&segkp_lock);
294 293 }
295 294
296 295 /*
297 296 * There are 2 entries into segkp_get_internal. The first includes a cookie
298 297 * used to access a pool of cached segkp resources. The second does not
299 298 * use the cache.
300 299 */
301 300 caddr_t
302 301 segkp_get(struct seg *seg, size_t len, uint_t flags)
303 302 {
304 303 struct segkp_data *kpd = NULL;
305 304
306 305 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
307 306 kpd->kp_cookie = -1;
308 307 return (stom(kpd->kp_base, flags));
309 308 }
310 309 return (NULL);
311 310 }
312 311
313 312 /*
314 313 * Return a 'cached' segkp address
315 314 */
316 315 caddr_t
317 316 segkp_cache_get(void *cookie)
318 317 {
319 318 struct segkp_cache *freelist = NULL;
320 319 struct segkp_data *kpd = NULL;
321 320 int index = (int)(uintptr_t)cookie;
322 321 struct seg *seg;
323 322 size_t len;
324 323 uint_t flags;
325 324
326 325 if (index < 0 || index >= SEGKP_MAX_CACHE)
327 326 return (NULL);
328 327 freelist = &segkp_cache[index];
329 328
330 329 mutex_enter(&segkp_lock);
331 330 seg = freelist->kpf_seg;
332 331 flags = freelist->kpf_flags;
333 332 if (freelist->kpf_list != NULL) {
334 333 kpd = freelist->kpf_list;
335 334 freelist->kpf_list = kpd->kp_next;
336 335 freelist->kpf_count--;
337 336 mutex_exit(&segkp_lock);
338 337 kpd->kp_next = NULL;
339 338 segkp_insert(seg, kpd);
340 339 return (stom(kpd->kp_base, flags));
341 340 }
342 341 len = freelist->kpf_len;
343 342 mutex_exit(&segkp_lock);
344 343 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
345 344 kpd->kp_cookie = index;
346 345 return (stom(kpd->kp_base, flags));
347 346 }
348 347 return (NULL);
349 348 }
350 349
351 350 caddr_t
352 351 segkp_get_withanonmap(
353 352 struct seg *seg,
354 353 size_t len,
355 354 uint_t flags,
356 355 struct anon_map *amp)
357 356 {
358 357 struct segkp_data *kpd = NULL;
359 358
360 359 ASSERT(amp != NULL);
361 360 flags |= KPD_HASAMP;
362 361 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
363 362 kpd->kp_cookie = -1;
364 363 return (stom(kpd->kp_base, flags));
365 364 }
366 365 return (NULL);
367 366 }
368 367
369 368 /*
370 369 * This does the real work of segkp allocation.
371 370 * Return to client base addr. len must be page-aligned. A null value is
372 371 * returned if there are no more vm resources (e.g. pages, swap). The len
373 372 * and base recorded in the private data structure include the redzone
374 373 * and the redzone length (if applicable). If the user requests a redzone
375 374 * either the first or last page is left unmapped depending whether stacks
376 375 * grow to low or high memory.
377 376 *
378 377 * The client may also specify a no-wait flag. If that is set then the
379 378 * request will choose a non-blocking path when requesting resources.
380 379 * The default is make the client wait.
381 380 */
382 381 static caddr_t
383 382 segkp_get_internal(
384 383 struct seg *seg,
385 384 size_t len,
386 385 uint_t flags,
387 386 struct segkp_data **tkpd,
388 387 struct anon_map *amp)
389 388 {
390 389 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
391 390 struct segkp_data *kpd;
392 391 caddr_t vbase = NULL; /* always first virtual, may not be mapped */
393 392 pgcnt_t np = 0; /* number of pages in the resource */
394 393 pgcnt_t segkpindex;
395 394 long i;
396 395 caddr_t va;
397 396 pgcnt_t pages = 0;
398 397 ulong_t anon_idx = 0;
399 398 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
400 399 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
401 400
402 401 if (len & PAGEOFFSET) {
403 402 panic("segkp_get: len is not page-aligned");
404 403 /*NOTREACHED*/
405 404 }
406 405
407 406 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
408 407
409 408 /* Only allow KPD_NO_ANON if we are going to lock it down */
410 409 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
411 410 return (NULL);
412 411
413 412 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
414 413 return (NULL);
415 414 /*
416 415 * Fix up the len to reflect the REDZONE if applicable
417 416 */
418 417 if (flags & KPD_HASREDZONE)
419 418 len += PAGESIZE;
420 419 np = btop(len);
421 420
422 421 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
423 422 if (vbase == NULL) {
424 423 kmem_free(kpd, sizeof (struct segkp_data));
425 424 return (NULL);
426 425 }
427 426
428 427 /* If locking, reserve physical memory */
429 428 if (flags & KPD_LOCKED) {
430 429 pages = btop(SEGKP_MAPLEN(len, flags));
431 430 if (page_resv(pages, kmflag) == 0) {
432 431 vmem_free(SEGKP_VMEM(seg), vbase, len);
433 432 kmem_free(kpd, sizeof (struct segkp_data));
434 433 return (NULL);
435 434 }
436 435 if ((flags & KPD_NO_ANON) == 0)
437 436 atomic_add_long(&anon_segkp_pages_locked, pages);
438 437 }
439 438
440 439 /*
441 440 * Reserve sufficient swap space for this vm resource. We'll
442 441 * actually allocate it in the loop below, but reserving it
443 442 * here allows us to back out more gracefully than if we
444 443 * had an allocation failure in the body of the loop.
445 444 *
446 445 * Note that we don't need swap space for the red zone page.
447 446 */
448 447 if (amp != NULL) {
449 448 /*
450 449 * The swap reservation has been done, if required, and the
451 450 * anon_hdr is separate.
452 451 */
453 452 anon_idx = 0;
454 453 kpd->kp_anon_idx = anon_idx;
455 454 kpd->kp_anon = amp->ahp;
456 455
457 456 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
458 457 kpd, vbase, len, flags, 1);
459 458
460 459 } else if ((flags & KPD_NO_ANON) == 0) {
461 460 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
462 461 if (flags & KPD_LOCKED) {
463 462 atomic_add_long(&anon_segkp_pages_locked,
464 463 -pages);
465 464 page_unresv(pages);
466 465 }
467 466 vmem_free(SEGKP_VMEM(seg), vbase, len);
468 467 kmem_free(kpd, sizeof (struct segkp_data));
469 468 return (NULL);
470 469 }
471 470 atomic_add_long(&anon_segkp_pages_resv,
472 471 btop(SEGKP_MAPLEN(len, flags)));
473 472 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
474 473 kpd->kp_anon_idx = anon_idx;
475 474 kpd->kp_anon = kpsd->kpsd_anon;
476 475
477 476 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
478 477 kpd, vbase, len, flags, 1);
479 478 } else {
480 479 kpd->kp_anon = NULL;
481 480 kpd->kp_anon_idx = 0;
482 481 }
483 482
484 483 /*
485 484 * Allocate page and anon resources for the virtual address range
486 485 * except the redzone
487 486 */
488 487 if (segkp_fromheap)
489 488 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
490 489 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
491 490 page_t *pl[2];
492 491 struct vnode *vp;
493 492 anoff_t off;
494 493 int err;
495 494 page_t *pp = NULL;
496 495
497 496 /*
498 497 * Mark this page to be a segkp page in the bitmap.
499 498 */
500 499 if (segkp_fromheap) {
501 500 BT_ATOMIC_SET(segkp_bitmap, segkpindex);
502 501 segkpindex++;
503 502 }
504 503
505 504 /*
506 505 * If this page is the red zone page, we don't need swap
507 506 * space for it. Note that we skip over the code that
508 507 * establishes MMU mappings, so that the page remains
509 508 * invalid.
510 509 */
511 510 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
512 511 continue;
513 512
514 513 if (kpd->kp_anon != NULL) {
515 514 struct anon *ap;
516 515
517 516 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
518 517 == NULL);
519 518 /*
520 519 * Determine the "vp" and "off" of the anon slot.
521 520 */
522 521 ap = anon_alloc(NULL, 0);
523 522 if (amp != NULL)
524 523 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
525 524 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
526 525 ap, ANON_SLEEP);
527 526 if (amp != NULL)
528 527 ANON_LOCK_EXIT(&->a_rwlock);
529 528 swap_xlate(ap, &vp, &off);
530 529
531 530 /*
532 531 * Create a page with the specified identity. The
533 532 * page is returned with the "shared" lock held.
534 533 */
535 534 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
536 535 NULL, pl, PAGESIZE, seg, va, S_CREATE,
537 536 kcred, NULL);
538 537 if (err) {
539 538 /*
540 539 * XXX - This should not fail.
541 540 */
542 541 panic("segkp_get: no pages");
543 542 /*NOTREACHED*/
544 543 }
545 544 pp = pl[0];
546 545 } else {
547 546 ASSERT(page_exists(&kvp,
548 547 (u_offset_t)(uintptr_t)va) == NULL);
549 548
550 549 if ((pp = page_create_va(&kvp,
551 550 (u_offset_t)(uintptr_t)va, PAGESIZE,
552 551 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
553 552 PG_NORELOC, seg, va)) == NULL) {
554 553 /*
555 554 * Legitimize resource; then destroy it.
556 555 * Easier than trying to unwind here.
557 556 */
558 557 kpd->kp_flags = flags;
559 558 kpd->kp_base = vbase;
560 559 kpd->kp_len = len;
561 560 segkp_release_internal(seg, kpd, va - vbase);
562 561 return (NULL);
563 562 }
564 563 page_io_unlock(pp);
565 564 }
566 565
567 566 if (flags & KPD_ZERO)
568 567 pagezero(pp, 0, PAGESIZE);
569 568
570 569 /*
571 570 * Load and lock an MMU translation for the page.
572 571 */
573 572 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
574 573 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
575 574
576 575 /*
577 576 * Now, release lock on the page.
578 577 */
579 578 if (flags & KPD_LOCKED) {
580 579 /*
581 580 * Indicate to page_retire framework that this
582 581 * page can only be retired when it is freed.
583 582 */
584 583 PP_SETRAF(pp);
585 584 page_downgrade(pp);
586 585 } else
587 586 page_unlock(pp);
588 587 }
589 588
590 589 kpd->kp_flags = flags;
591 590 kpd->kp_base = vbase;
592 591 kpd->kp_len = len;
593 592 segkp_insert(seg, kpd);
594 593 *tkpd = kpd;
595 594 return (stom(kpd->kp_base, flags));
596 595 }
597 596
598 597 /*
599 598 * Release the resource to cache if the pool(designate by the cookie)
600 599 * has less than the maximum allowable. If inserted in cache,
601 600 * segkp_delete insures element is taken off of active list.
602 601 */
603 602 void
604 603 segkp_release(struct seg *seg, caddr_t vaddr)
605 604 {
606 605 struct segkp_cache *freelist;
607 606 struct segkp_data *kpd = NULL;
608 607
609 608 if ((kpd = segkp_find(seg, vaddr)) == NULL) {
610 609 panic("segkp_release: null kpd");
611 610 /*NOTREACHED*/
612 611 }
613 612
614 613 if (kpd->kp_cookie != -1) {
615 614 freelist = &segkp_cache[kpd->kp_cookie];
616 615 mutex_enter(&segkp_lock);
617 616 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
618 617 segkp_delete(seg, kpd);
619 618 kpd->kp_next = freelist->kpf_list;
620 619 freelist->kpf_list = kpd;
621 620 freelist->kpf_count++;
622 621 mutex_exit(&segkp_lock);
623 622 return;
624 623 } else {
625 624 mutex_exit(&segkp_lock);
626 625 kpd->kp_cookie = -1;
627 626 }
628 627 }
629 628 segkp_release_internal(seg, kpd, kpd->kp_len);
630 629 }
631 630
632 631 /*
633 632 * Free the entire resource. segkp_unlock gets called with the start of the
634 633 * mapped portion of the resource. The length is the size of the mapped
635 634 * portion
636 635 */
637 636 static void
638 637 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
639 638 {
640 639 caddr_t va;
641 640 long i;
642 641 long redzone;
643 642 size_t np;
644 643 page_t *pp;
645 644 struct vnode *vp;
646 645 anoff_t off;
647 646 struct anon *ap;
648 647 pgcnt_t segkpindex;
649 648
650 649 ASSERT(kpd != NULL);
651 650 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
652 651 np = btop(len);
653 652
654 653 /* Remove from active hash list */
655 654 if (kpd->kp_cookie == -1) {
656 655 mutex_enter(&segkp_lock);
657 656 segkp_delete(seg, kpd);
658 657 mutex_exit(&segkp_lock);
659 658 }
660 659
661 660 /*
662 661 * Precompute redzone page index.
663 662 */
664 663 redzone = -1;
665 664 if (kpd->kp_flags & KPD_HASREDZONE)
666 665 redzone = KPD_REDZONE(kpd);
667 666
668 667
669 668 va = kpd->kp_base;
670 669
671 670 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
672 671 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
673 672 /*
674 673 * Free up those anon resources that are quiescent.
675 674 */
676 675 if (segkp_fromheap)
677 676 segkpindex = btop((uintptr_t)(va - kvseg.s_base));
678 677 for (i = 0; i < np; i++, va += PAGESIZE) {
679 678
680 679 /*
681 680 * Clear the bit for this page from the bitmap.
682 681 */
683 682 if (segkp_fromheap) {
684 683 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
685 684 segkpindex++;
686 685 }
687 686
688 687 if (i == redzone)
689 688 continue;
690 689 if (kpd->kp_anon) {
691 690 /*
692 691 * Free up anon resources and destroy the
693 692 * associated pages.
694 693 *
695 694 * Release the lock if there is one. Have to get the
696 695 * page to do this, unfortunately.
697 696 */
698 697 if (kpd->kp_flags & KPD_LOCKED) {
699 698 ap = anon_get_ptr(kpd->kp_anon,
700 699 kpd->kp_anon_idx + i);
701 700 swap_xlate(ap, &vp, &off);
702 701 /* Find the shared-locked page. */
703 702 pp = page_find(vp, (u_offset_t)off);
704 703 if (pp == NULL) {
705 704 panic("segkp_release: "
706 705 "kp_anon: no page to unlock ");
707 706 /*NOTREACHED*/
708 707 }
709 708 if (PP_ISRAF(pp))
710 709 PP_CLRRAF(pp);
711 710
712 711 page_unlock(pp);
713 712 }
714 713 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
715 714 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
716 715 PAGESIZE);
717 716 anon_unresv_zone(PAGESIZE, NULL);
718 717 atomic_dec_ulong(&anon_segkp_pages_resv);
719 718 }
720 719 TRACE_5(TR_FAC_VM,
721 720 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
722 721 kpd, va, PAGESIZE, 0, 0);
723 722 } else {
724 723 if (kpd->kp_flags & KPD_LOCKED) {
725 724 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
726 725 if (pp == NULL) {
727 726 panic("segkp_release: "
728 727 "no page to unlock");
729 728 /*NOTREACHED*/
730 729 }
731 730 if (PP_ISRAF(pp))
732 731 PP_CLRRAF(pp);
733 732 /*
734 733 * We should just upgrade the lock here
735 734 * but there is no upgrade that waits.
736 735 */
737 736 page_unlock(pp);
738 737 }
739 738 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
740 739 SE_EXCL);
741 740 if (pp != NULL)
742 741 page_destroy(pp, 0);
743 742 }
744 743 }
745 744
746 745 /* If locked, release physical memory reservation */
747 746 if (kpd->kp_flags & KPD_LOCKED) {
748 747 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
749 748 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
750 749 atomic_add_long(&anon_segkp_pages_locked, -pages);
751 750 page_unresv(pages);
752 751 }
753 752
754 753 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
755 754 kmem_free(kpd, sizeof (struct segkp_data));
756 755 }
757 756
758 757 /*
759 758 * segkp_map_red() will check the current frame pointer against the
760 759 * stack base. If the amount of stack remaining is questionable
761 760 * (less than red_minavail), then segkp_map_red() will map in the redzone
762 761 * and return 1. Otherwise, it will return 0. segkp_map_red() can
763 762 * _only_ be called when:
764 763 *
765 764 * - it is safe to sleep on page_create_va().
766 765 * - the caller is non-swappable.
767 766 *
768 767 * It is up to the caller to remember whether segkp_map_red() successfully
769 768 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
770 769 * time. Note that the caller must _remain_ non-swappable until after
771 770 * calling segkp_unmap_red().
772 771 *
773 772 * Currently, this routine is only called from pagefault() (which necessarily
774 773 * satisfies the above conditions).
775 774 */
776 775 #if defined(STACK_GROWTH_DOWN)
777 776 int
778 777 segkp_map_red(void)
779 778 {
780 779 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
781 780 #ifndef _LP64
782 781 caddr_t stkbase;
783 782 #endif
784 783
785 784 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
786 785
787 786 /*
788 787 * Optimize for the common case where we simply return.
789 788 */
790 789 if ((curthread->t_red_pp == NULL) &&
791 790 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
792 791 return (0);
793 792
794 793 #if defined(_LP64)
795 794 /*
796 795 * XXX We probably need something better than this.
797 796 */
798 797 panic("kernel stack overflow");
799 798 /*NOTREACHED*/
800 799 #else /* _LP64 */
801 800 if (curthread->t_red_pp == NULL) {
802 801 page_t *red_pp;
803 802 struct seg kseg;
804 803
805 804 caddr_t red_va = (caddr_t)
806 805 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
807 806 PAGESIZE);
808 807
809 808 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
810 809 NULL);
811 810
812 811 /*
813 812 * Allocate the physical for the red page.
814 813 */
815 814 /*
816 815 * No PG_NORELOC here to avoid waits. Unlikely to get
817 816 * a relocate happening in the short time the page exists
818 817 * and it will be OK anyway.
819 818 */
820 819
821 820 kseg.s_as = &kas;
822 821 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
823 822 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
824 823 ASSERT(red_pp != NULL);
825 824
826 825 /*
827 826 * So we now have a page to jam into the redzone...
828 827 */
829 828 page_io_unlock(red_pp);
830 829
831 830 hat_memload(kas.a_hat, red_va, red_pp,
832 831 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
833 832 page_downgrade(red_pp);
834 833
835 834 /*
836 835 * The page is left SE_SHARED locked so we can hold on to
837 836 * the page_t pointer.
838 837 */
839 838 curthread->t_red_pp = red_pp;
840 839
841 840 atomic_inc_32(&red_nmapped);
842 841 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
843 842 (void) atomic_cas_32(&red_closest, red_closest,
844 843 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
845 844 }
846 845 return (1);
847 846 }
848 847
849 848 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
850 849 (uintptr_t)PAGEMASK) - PAGESIZE);
851 850
852 851 atomic_inc_32(&red_ndoubles);
853 852
854 853 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
855 854 /*
856 855 * Oh boy. We're already deep within the mapped-in
857 856 * redzone page, and the caller is trying to prepare
858 857 * for a deep stack run. We're running without a
859 858 * redzone right now: if the caller plows off the
860 859 * end of the stack, it'll plow another thread or
861 860 * LWP structure. That situation could result in
862 861 * a very hard-to-debug panic, so, in the spirit of
863 862 * recording the name of one's killer in one's own
864 863 * blood, we're going to record hrestime and the calling
865 864 * thread.
866 865 */
867 866 red_deep_hires = hrestime.tv_nsec;
868 867 red_deep_thread = curthread;
869 868 }
870 869
871 870 /*
872 871 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
873 872 */
874 873 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
875 874 return (0);
876 875 #endif /* _LP64 */
877 876 }
878 877
879 878 void
880 879 segkp_unmap_red(void)
881 880 {
882 881 page_t *pp;
883 882 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
884 883 (uintptr_t)PAGEMASK) - PAGESIZE);
885 884
886 885 ASSERT(curthread->t_red_pp != NULL);
887 886 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
888 887
889 888 /*
890 889 * Because we locked the mapping down, we can't simply rely
891 890 * on page_destroy() to clean everything up; we need to call
892 891 * hat_unload() to explicitly unlock the mapping resources.
893 892 */
894 893 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
895 894
896 895 pp = curthread->t_red_pp;
897 896
898 897 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
899 898
900 899 /*
901 900 * Need to upgrade the SE_SHARED lock to SE_EXCL.
902 901 */
903 902 if (!page_tryupgrade(pp)) {
904 903 /*
905 904 * As there is now wait for upgrade, release the
906 905 * SE_SHARED lock and wait for SE_EXCL.
907 906 */
908 907 page_unlock(pp);
909 908 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
910 909 /* pp may be NULL here, hence the test below */
911 910 }
912 911
913 912 /*
914 913 * Destroy the page, with dontfree set to zero (i.e. free it).
915 914 */
916 915 if (pp != NULL)
917 916 page_destroy(pp, 0);
918 917 curthread->t_red_pp = NULL;
919 918 }
920 919 #else
921 920 #error Red stacks only supported with downwards stack growth.
922 921 #endif
923 922
924 923 /*
925 924 * Handle a fault on an address corresponding to one of the
926 925 * resources in the segkp segment.
927 926 */
928 927 faultcode_t
929 928 segkp_fault(
930 929 struct hat *hat,
931 930 struct seg *seg,
932 931 caddr_t vaddr,
933 932 size_t len,
934 933 enum fault_type type,
935 934 enum seg_rw rw)
936 935 {
937 936 struct segkp_data *kpd = NULL;
938 937 int err;
939 938
940 939 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
941 940
942 941 /*
943 942 * Sanity checks.
944 943 */
945 944 if (type == F_PROT) {
946 945 panic("segkp_fault: unexpected F_PROT fault");
947 946 /*NOTREACHED*/
948 947 }
949 948
950 949 if ((kpd = segkp_find(seg, vaddr)) == NULL)
951 950 return (FC_NOMAP);
952 951
953 952 mutex_enter(&kpd->kp_lock);
954 953
955 954 if (type == F_SOFTLOCK) {
956 955 ASSERT(!(kpd->kp_flags & KPD_LOCKED));
957 956 /*
958 957 * The F_SOFTLOCK case has more stringent
959 958 * range requirements: the given range must exactly coincide
960 959 * with the resource's mapped portion. Note reference to
961 960 * redzone is handled since vaddr would not equal base
962 961 */
963 962 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
964 963 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
965 964 mutex_exit(&kpd->kp_lock);
966 965 return (FC_MAKE_ERR(EFAULT));
967 966 }
968 967
969 968 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
970 969 mutex_exit(&kpd->kp_lock);
971 970 return (FC_MAKE_ERR(err));
972 971 }
973 972 kpd->kp_flags |= KPD_LOCKED;
974 973 mutex_exit(&kpd->kp_lock);
975 974 return (0);
976 975 }
977 976
978 977 if (type == F_INVAL) {
979 978 ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
980 979
981 980 /*
982 981 * Check if we touched the redzone. Somewhat optimistic
983 982 * here if we are touching the redzone of our own stack
984 983 * since we wouldn't have a stack to get this far...
985 984 */
986 985 if ((kpd->kp_flags & KPD_HASREDZONE) &&
987 986 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
988 987 panic("segkp_fault: accessing redzone");
989 988
990 989 /*
991 990 * This fault may occur while the page is being F_SOFTLOCK'ed.
992 991 * Return since a 2nd segkp_load is unnecessary and also would
993 992 * result in the page being locked twice and eventually
994 993 * hang the thread_reaper thread.
995 994 */
996 995 if (kpd->kp_flags & KPD_LOCKED) {
997 996 mutex_exit(&kpd->kp_lock);
998 997 return (0);
999 998 }
1000 999
1001 1000 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
1002 1001 mutex_exit(&kpd->kp_lock);
1003 1002 return (err ? FC_MAKE_ERR(err) : 0);
1004 1003 }
1005 1004
1006 1005 if (type == F_SOFTUNLOCK) {
1007 1006 uint_t flags;
1008 1007
1009 1008 /*
1010 1009 * Make sure the addr is LOCKED and it has anon backing
1011 1010 * before unlocking
1012 1011 */
1013 1012 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
1014 1013 panic("segkp_fault: bad unlock");
1015 1014 /*NOTREACHED*/
1016 1015 }
1017 1016
1018 1017 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
1019 1018 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
1020 1019 panic("segkp_fault: bad range");
1021 1020 /*NOTREACHED*/
1022 1021 }
1023 1022
1024 1023 if (rw == S_WRITE)
1025 1024 flags = kpd->kp_flags | KPD_WRITEDIRTY;
1026 1025 else
1027 1026 flags = kpd->kp_flags;
1028 1027 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
1029 1028 kpd->kp_flags &= ~KPD_LOCKED;
1030 1029 mutex_exit(&kpd->kp_lock);
1031 1030 return (err ? FC_MAKE_ERR(err) : 0);
1032 1031 }
1033 1032 mutex_exit(&kpd->kp_lock);
1034 1033 panic("segkp_fault: bogus fault type: %d\n", type);
1035 1034 /*NOTREACHED*/
1036 1035 }
1037 1036
1038 1037 /*
1039 1038 * Check that the given protections suffice over the range specified by
1040 1039 * vaddr and len. For this segment type, the only issue is whether or
1041 1040 * not the range lies completely within the mapped part of an allocated
1042 1041 * resource.
1043 1042 */
1044 1043 /* ARGSUSED */
1045 1044 static int
1046 1045 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1047 1046 {
1048 1047 struct segkp_data *kpd = NULL;
1049 1048 caddr_t mbase;
1050 1049 size_t mlen;
1051 1050
1052 1051 if ((kpd = segkp_find(seg, vaddr)) == NULL)
1053 1052 return (EACCES);
1054 1053
1055 1054 mutex_enter(&kpd->kp_lock);
1056 1055 mbase = stom(kpd->kp_base, kpd->kp_flags);
1057 1056 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1058 1057 if (len > mlen || vaddr < mbase ||
1059 1058 ((vaddr + len) > (mbase + mlen))) {
1060 1059 mutex_exit(&kpd->kp_lock);
1061 1060 return (EACCES);
1062 1061 }
1063 1062 mutex_exit(&kpd->kp_lock);
1064 1063 return (0);
1065 1064 }
1066 1065
1067 1066
1068 1067 /*
1069 1068 * Check to see if it makes sense to do kluster/read ahead to
1070 1069 * addr + delta relative to the mapping at addr. We assume here
1071 1070 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1072 1071 *
1073 1072 * For seg_u we always "approve" of this action from our standpoint.
1074 1073 */
1075 1074 /*ARGSUSED*/
1076 1075 static int
1077 1076 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1078 1077 {
1079 1078 return (0);
1080 1079 }
1081 1080
1082 1081 /*
1083 1082 * Load and possibly lock intra-slot resources in the range given by
1084 1083 * vaddr and len.
1085 1084 */
1086 1085 static int
1087 1086 segkp_load(
1088 1087 struct hat *hat,
1089 1088 struct seg *seg,
1090 1089 caddr_t vaddr,
1091 1090 size_t len,
1092 1091 struct segkp_data *kpd,
1093 1092 uint_t flags)
1094 1093 {
1095 1094 caddr_t va;
1096 1095 caddr_t vlim;
1097 1096 ulong_t i;
1098 1097 uint_t lock;
1099 1098
1100 1099 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1101 1100
1102 1101 len = P2ROUNDUP(len, PAGESIZE);
1103 1102
1104 1103 /* If locking, reserve physical memory */
1105 1104 if (flags & KPD_LOCKED) {
1106 1105 pgcnt_t pages = btop(len);
1107 1106 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1108 1107 atomic_add_long(&anon_segkp_pages_locked, pages);
1109 1108 (void) page_resv(pages, KM_SLEEP);
1110 1109 }
1111 1110
1112 1111 /*
1113 1112 * Loop through the pages in the given range.
1114 1113 */
1115 1114 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1116 1115 vaddr = va;
1117 1116 vlim = va + len;
1118 1117 lock = flags & KPD_LOCKED;
1119 1118 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1120 1119 for (; va < vlim; va += PAGESIZE, i++) {
1121 1120 page_t *pl[2]; /* second element NULL terminator */
1122 1121 struct vnode *vp;
1123 1122 anoff_t off;
1124 1123 int err;
1125 1124 struct anon *ap;
1126 1125
1127 1126 /*
1128 1127 * Summon the page. If it's not resident, arrange
1129 1128 * for synchronous i/o to pull it in.
1130 1129 */
1131 1130 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1132 1131 swap_xlate(ap, &vp, &off);
1133 1132
1134 1133 /*
1135 1134 * The returned page list will have exactly one entry,
1136 1135 * which is returned to us already kept.
1137 1136 */
1138 1137 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1139 1138 pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
1140 1139
1141 1140 if (err) {
1142 1141 /*
1143 1142 * Back out of what we've done so far.
1144 1143 */
1145 1144 (void) segkp_unlock(hat, seg, vaddr,
1146 1145 (va - vaddr), kpd, flags);
1147 1146 return (err);
1148 1147 }
1149 1148
1150 1149 /*
1151 1150 * Load an MMU translation for the page.
1152 1151 */
1153 1152 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1154 1153 lock ? HAT_LOAD_LOCK : HAT_LOAD);
1155 1154
1156 1155 if (!lock) {
1157 1156 /*
1158 1157 * Now, release "shared" lock on the page.
1159 1158 */
1160 1159 page_unlock(pl[0]);
1161 1160 }
1162 1161 }
1163 1162 return (0);
1164 1163 }
1165 1164
1166 1165 /*
1167 1166 * At the very least unload the mmu-translations and unlock the range if locked
1168 1167 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1169 1168 * any dirty pages should be written to disk.
1170 1169 */
1171 1170 static int
1172 1171 segkp_unlock(
1173 1172 struct hat *hat,
1174 1173 struct seg *seg,
1175 1174 caddr_t vaddr,
1176 1175 size_t len,
1177 1176 struct segkp_data *kpd,
1178 1177 uint_t flags)
1179 1178 {
1180 1179 caddr_t va;
1181 1180 caddr_t vlim;
1182 1181 ulong_t i;
1183 1182 struct page *pp;
1184 1183 struct vnode *vp;
1185 1184 anoff_t off;
1186 1185 struct anon *ap;
1187 1186
1188 1187 #ifdef lint
1189 1188 seg = seg;
1190 1189 #endif /* lint */
1191 1190
1192 1191 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1193 1192
1194 1193 /*
1195 1194 * Loop through the pages in the given range. It is assumed
1196 1195 * segkp_unlock is called with page aligned base
1197 1196 */
1198 1197 va = vaddr;
1199 1198 vlim = va + len;
1200 1199 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1201 1200 hat_unload(hat, va, len,
1202 1201 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1203 1202 for (; va < vlim; va += PAGESIZE, i++) {
1204 1203 /*
1205 1204 * Find the page associated with this part of the
1206 1205 * slot, tracking it down through its associated swap
1207 1206 * space.
1208 1207 */
1209 1208 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1210 1209 swap_xlate(ap, &vp, &off);
1211 1210
1212 1211 if (flags & KPD_LOCKED) {
1213 1212 if ((pp = page_find(vp, off)) == NULL) {
1214 1213 if (flags & KPD_LOCKED) {
1215 1214 panic("segkp_softunlock: missing page");
1216 1215 /*NOTREACHED*/
1217 1216 }
1218 1217 }
1219 1218 } else {
1220 1219 /*
1221 1220 * Nothing to do if the slot is not locked and the
1222 1221 * page doesn't exist.
1223 1222 */
1224 1223 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1225 1224 continue;
1226 1225 }
1227 1226
1228 1227 /*
1229 1228 * If the page doesn't have any translations, is
1230 1229 * dirty and not being shared, then push it out
1231 1230 * asynchronously and avoid waiting for the
1232 1231 * pageout daemon to do it for us.
1233 1232 *
1234 1233 * XXX - Do we really need to get the "exclusive"
1235 1234 * lock via an upgrade?
1236 1235 */
1237 1236 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1238 1237 hat_ismod(pp) && page_tryupgrade(pp)) {
1239 1238 /*
1240 1239 * Hold the vnode before releasing the page lock to
1241 1240 * prevent it from being freed and re-used by some
1242 1241 * other thread.
1243 1242 */
1244 1243 VN_HOLD(vp);
1245 1244 page_unlock(pp);
1246 1245
1247 1246 /*
1248 1247 * Want most powerful credentials we can get so
1249 1248 * use kcred.
1250 1249 */
1251 1250 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1252 1251 B_ASYNC | B_FREE, kcred, NULL);
1253 1252 VN_RELE(vp);
1254 1253 } else {
1255 1254 page_unlock(pp);
1256 1255 }
1257 1256 }
1258 1257
1259 1258 /* If unlocking, release physical memory */
1260 1259 if (flags & KPD_LOCKED) {
1261 1260 pgcnt_t pages = btopr(len);
1262 1261 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1263 1262 atomic_add_long(&anon_segkp_pages_locked, -pages);
1264 1263 page_unresv(pages);
1265 1264 }
1266 1265 return (0);
1267 1266 }
1268 1267
1269 1268 /*
1270 1269 * Insert the kpd in the hash table.
1271 1270 */
1272 1271 static void
1273 1272 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1274 1273 {
1275 1274 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1276 1275 int index;
1277 1276
1278 1277 /*
1279 1278 * Insert the kpd based on the address that will be returned
1280 1279 * via segkp_release.
1281 1280 */
1282 1281 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1283 1282 mutex_enter(&segkp_lock);
1284 1283 kpd->kp_next = kpsd->kpsd_hash[index];
1285 1284 kpsd->kpsd_hash[index] = kpd;
1286 1285 mutex_exit(&segkp_lock);
1287 1286 }
1288 1287
1289 1288 /*
1290 1289 * Remove kpd from the hash table.
1291 1290 */
1292 1291 static void
1293 1292 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1294 1293 {
1295 1294 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1296 1295 struct segkp_data **kpp;
1297 1296 int index;
1298 1297
1299 1298 ASSERT(MUTEX_HELD(&segkp_lock));
1300 1299
1301 1300 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1302 1301 for (kpp = &kpsd->kpsd_hash[index];
1303 1302 *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1304 1303 if (*kpp == kpd) {
1305 1304 *kpp = kpd->kp_next;
1306 1305 return;
1307 1306 }
1308 1307 }
1309 1308 panic("segkp_delete: unable to find element to delete");
1310 1309 /*NOTREACHED*/
1311 1310 }
1312 1311
1313 1312 /*
1314 1313 * Find the kpd associated with a vaddr.
1315 1314 *
1316 1315 * Most of the callers of segkp_find will pass the vaddr that
1317 1316 * hashes to the desired index, but there are cases where
1318 1317 * this is not true in which case we have to (potentially) scan
1319 1318 * the whole table looking for it. This should be very rare
1320 1319 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1321 1320 * middle of the segkp_data region).
1322 1321 */
1323 1322 static struct segkp_data *
1324 1323 segkp_find(struct seg *seg, caddr_t vaddr)
1325 1324 {
1326 1325 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1327 1326 struct segkp_data *kpd;
1328 1327 int i;
1329 1328 int stop;
1330 1329
1331 1330 i = stop = SEGKP_HASH(vaddr);
1332 1331 mutex_enter(&segkp_lock);
1333 1332 do {
1334 1333 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1335 1334 kpd = kpd->kp_next) {
1336 1335 if (vaddr >= kpd->kp_base &&
1337 1336 vaddr < kpd->kp_base + kpd->kp_len) {
1338 1337 mutex_exit(&segkp_lock);
1339 1338 return (kpd);
1340 1339 }
1341 1340 }
1342 1341 if (--i < 0)
1343 1342 i = SEGKP_HASHSZ - 1; /* Wrap */
1344 1343 } while (i != stop);
1345 1344 mutex_exit(&segkp_lock);
1346 1345 return (NULL); /* Not found */
1347 1346 }
1348 1347
1349 1348 /*
1350 1349 * returns size of swappable area.
1351 1350 */
1352 1351 size_t
1353 1352 swapsize(caddr_t v)
1354 1353 {
1355 1354 struct segkp_data *kpd;
1356 1355
1357 1356 if ((kpd = segkp_find(segkp, v)) != NULL)
1358 1357 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1359 1358 else
1360 1359 return (NULL);
1361 1360 }
1362 1361
1363 1362 /*
1364 1363 * Dump out all the active segkp pages
1365 1364 */
1366 1365 static void
1367 1366 segkp_dump(struct seg *seg)
1368 1367 {
1369 1368 int i;
1370 1369 struct segkp_data *kpd;
1371 1370 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1372 1371
1373 1372 for (i = 0; i < SEGKP_HASHSZ; i++) {
1374 1373 for (kpd = kpsd->kpsd_hash[i];
1375 1374 kpd != NULL; kpd = kpd->kp_next) {
1376 1375 pfn_t pfn;
1377 1376 caddr_t addr;
1378 1377 caddr_t eaddr;
1379 1378
1380 1379 addr = kpd->kp_base;
1381 1380 eaddr = addr + kpd->kp_len;
1382 1381 while (addr < eaddr) {
1383 1382 ASSERT(seg->s_as == &kas);
1384 1383 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1385 1384 if (pfn != PFN_INVALID)
1386 1385 dump_addpage(seg->s_as, addr, pfn);
1387 1386 addr += PAGESIZE;
1388 1387 dump_timeleft = dump_timeout;
1389 1388 }
1390 1389 }
1391 1390 }
1392 1391 }
1393 1392
1394 1393 /*ARGSUSED*/
1395 1394 static int
1396 1395 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1397 1396 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1398 1397 {
1399 1398 return (ENOTSUP);
1400 1399 }
1401 1400
1402 1401 /*ARGSUSED*/
1403 1402 static int
1404 1403 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1405 1404 {
1406 1405 return (ENODEV);
1407 1406 }
1408 1407
1409 1408 /*ARGSUSED*/
1410 1409 static lgrp_mem_policy_info_t *
1411 1410 segkp_getpolicy(struct seg *seg, caddr_t addr)
1412 1411 {
1413 1412 return (NULL);
1414 1413 }
1415 1414
1416 1415 /*ARGSUSED*/
1417 1416 static int
1418 1417 segkp_capable(struct seg *seg, segcapability_t capability)
1419 1418 {
1420 1419 return (0);
1421 1420 }
1422 1421
1423 1422 #include <sys/mem_config.h>
1424 1423
1425 1424 /*ARGSUSED*/
1426 1425 static void
1427 1426 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1428 1427 {}
1429 1428
1430 1429 /*
1431 1430 * During memory delete, turn off caches so that pages are not held.
1432 1431 * A better solution may be to unlock the pages while they are
1433 1432 * in the cache so that they may be collected naturally.
1434 1433 */
1435 1434
1436 1435 /*ARGSUSED*/
1437 1436 static int
1438 1437 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1439 1438 {
1440 1439 atomic_inc_32(&segkp_indel);
1441 1440 segkp_cache_free();
1442 1441 return (0);
1443 1442 }
1444 1443
1445 1444 /*ARGSUSED*/
1446 1445 static void
1447 1446 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1448 1447 {
1449 1448 atomic_dec_32(&segkp_indel);
1450 1449 }
1451 1450
1452 1451 static kphysm_setup_vector_t segkp_mem_config_vec = {
1453 1452 KPHYSM_SETUP_VECTOR_VERSION,
1454 1453 segkp_mem_config_post_add,
1455 1454 segkp_mem_config_pre_del,
1456 1455 segkp_mem_config_post_del,
1457 1456 };
1458 1457
1459 1458 static void
1460 1459 segkpinit_mem_config(struct seg *seg)
1461 1460 {
1462 1461 int ret;
1463 1462
1464 1463 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1465 1464 ASSERT(ret == 0);
1466 1465 }
↓ open down ↓ |
1284 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX