1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * Portions of this source code were derived from Berkeley 4.3 BSD 30 * under license from the Regents of the University of California. 31 */ 32 33 /* 34 * segkp is a segment driver that administers the allocation and deallocation 35 * of pageable variable size chunks of kernel virtual address space. Each 36 * allocated resource is page-aligned. 37 * 38 * The user may specify whether the resource should be initialized to 0, 39 * include a redzone, or locked in memory. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/thread.h> 45 #include <sys/param.h> 46 #include <sys/errno.h> 47 #include <sys/sysmacros.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/mman.h> 51 #include <sys/vnode.h> 52 #include <sys/cmn_err.h> 53 #include <sys/swap.h> 54 #include <sys/tuneable.h> 55 #include <sys/kmem.h> 56 #include <sys/vmem.h> 57 #include <sys/cred.h> 58 #include <sys/dumphdr.h> 59 #include <sys/debug.h> 60 #include <sys/vtrace.h> 61 #include <sys/stack.h> 62 #include <sys/atomic.h> 63 #include <sys/archsystm.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_kp.h> 69 #include <vm/seg_kmem.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/hat.h> 73 #include <sys/bitmap.h> 74 75 /* 76 * Private seg op routines 77 */ 78 static void segkp_badop(void); 79 static void segkp_dump(struct seg *seg); 80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len, 81 uint_t prot); 82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 84 struct page ***page, enum lock_type type, 85 enum seg_rw rw); 86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd); 87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd); 88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags, 89 struct segkp_data **tkpd, struct anon_map *amp); 90 static void segkp_release_internal(struct seg *seg, 91 struct segkp_data *kpd, size_t len); 92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr, 93 size_t len, struct segkp_data *kpd, uint_t flags); 94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr, 95 size_t len, struct segkp_data *kpd, uint_t flags); 96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr); 97 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp); 98 static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg, 99 caddr_t addr); 100 static int segkp_capable(struct seg *seg, segcapability_t capability); 101 102 /* 103 * Lock used to protect the hash table(s) and caches. 104 */ 105 static kmutex_t segkp_lock; 106 107 /* 108 * The segkp caches 109 */ 110 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE]; 111 112 #define SEGKP_BADOP(t) (t(*)())segkp_badop 113 114 /* 115 * When there are fewer than red_minavail bytes left on the stack, 116 * segkp_map_red() will map in the redzone (if called). 5000 seems 117 * to work reasonably well... 118 */ 119 long red_minavail = 5000; 120 121 /* 122 * will be set to 1 for 32 bit x86 systems only, in startup.c 123 */ 124 int segkp_fromheap = 0; 125 ulong_t *segkp_bitmap; 126 127 /* 128 * If segkp_map_red() is called with the redzone already mapped and 129 * with less than RED_DEEP_THRESHOLD bytes available on the stack, 130 * then the stack situation has become quite serious; if much more stack 131 * is consumed, we have the potential of scrogging the next thread/LWP 132 * structure. To help debug the "can't happen" panics which may 133 * result from this condition, we record hrestime and the calling thread 134 * in red_deep_hires and red_deep_thread respectively. 135 */ 136 #define RED_DEEP_THRESHOLD 2000 137 138 hrtime_t red_deep_hires; 139 kthread_t *red_deep_thread; 140 141 uint32_t red_nmapped; 142 uint32_t red_closest = UINT_MAX; 143 uint32_t red_ndoubles; 144 145 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */ 146 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */ 147 148 static struct seg_ops segkp_ops = { 149 .dup = SEGKP_BADOP(int), 150 .unmap = SEGKP_BADOP(int), 151 .free = SEGKP_BADOP(void), 152 .fault = segkp_fault, 153 .faulta = SEGKP_BADOP(faultcode_t), 154 .setprot = SEGKP_BADOP(int), 155 .checkprot = segkp_checkprot, 156 .kluster = segkp_kluster, 157 .swapout = SEGKP_BADOP(size_t), 158 .sync = SEGKP_BADOP(int), 159 .incore = SEGKP_BADOP(size_t), 160 .lockop = SEGKP_BADOP(int), 161 .getprot = SEGKP_BADOP(int), 162 .getoffset = SEGKP_BADOP(u_offset_t), 163 .gettype = SEGKP_BADOP(int), 164 .getvp = SEGKP_BADOP(int), 165 .advise = SEGKP_BADOP(int), 166 .dump = segkp_dump, 167 .pagelock = segkp_pagelock, 168 .setpagesize = SEGKP_BADOP(int), 169 .getmemid = segkp_getmemid, 170 .getpolicy = segkp_getpolicy, 171 .capable = segkp_capable, 172 }; 173 174 175 static void 176 segkp_badop(void) 177 { 178 panic("segkp_badop"); 179 /*NOTREACHED*/ 180 } 181 182 static void segkpinit_mem_config(struct seg *); 183 184 static uint32_t segkp_indel; 185 186 /* 187 * Allocate the segment specific private data struct and fill it in 188 * with the per kp segment mutex, anon ptr. array and hash table. 189 */ 190 int 191 segkp_create(struct seg *seg) 192 { 193 struct segkp_segdata *kpsd; 194 size_t np; 195 196 ASSERT(seg != NULL && seg->s_as == &kas); 197 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); 198 199 if (seg->s_size & PAGEOFFSET) { 200 panic("Bad segkp size"); 201 /*NOTREACHED*/ 202 } 203 204 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); 205 206 /* 207 * Allocate the virtual memory for segkp and initialize it 208 */ 209 if (segkp_fromheap) { 210 np = btop(kvseg.s_size); 211 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); 212 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, 213 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); 214 } else { 215 segkp_bitmap = NULL; 216 np = btop(seg->s_size); 217 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, 218 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, 219 VM_SLEEP); 220 } 221 222 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); 223 224 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), 225 KM_SLEEP); 226 seg->s_data = (void *)kpsd; 227 seg->s_ops = &segkp_ops; 228 segkpinit_mem_config(seg); 229 return (0); 230 } 231 232 233 /* 234 * Find a free 'freelist' and initialize it with the appropriate attributes 235 */ 236 void * 237 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags) 238 { 239 int i; 240 241 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED)) 242 return ((void *)-1); 243 244 mutex_enter(&segkp_lock); 245 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 246 if (segkp_cache[i].kpf_inuse) 247 continue; 248 segkp_cache[i].kpf_inuse = 1; 249 segkp_cache[i].kpf_max = maxsize; 250 segkp_cache[i].kpf_flags = flags; 251 segkp_cache[i].kpf_seg = seg; 252 segkp_cache[i].kpf_len = len; 253 mutex_exit(&segkp_lock); 254 return ((void *)(uintptr_t)i); 255 } 256 mutex_exit(&segkp_lock); 257 return ((void *)-1); 258 } 259 260 /* 261 * Free all the cache resources. 262 */ 263 void 264 segkp_cache_free(void) 265 { 266 struct segkp_data *kpd; 267 struct seg *seg; 268 int i; 269 270 mutex_enter(&segkp_lock); 271 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 272 if (!segkp_cache[i].kpf_inuse) 273 continue; 274 /* 275 * Disconnect the freelist and process each element 276 */ 277 kpd = segkp_cache[i].kpf_list; 278 seg = segkp_cache[i].kpf_seg; 279 segkp_cache[i].kpf_list = NULL; 280 segkp_cache[i].kpf_count = 0; 281 mutex_exit(&segkp_lock); 282 283 while (kpd != NULL) { 284 struct segkp_data *next; 285 286 next = kpd->kp_next; 287 segkp_release_internal(seg, kpd, kpd->kp_len); 288 kpd = next; 289 } 290 mutex_enter(&segkp_lock); 291 } 292 mutex_exit(&segkp_lock); 293 } 294 295 /* 296 * There are 2 entries into segkp_get_internal. The first includes a cookie 297 * used to access a pool of cached segkp resources. The second does not 298 * use the cache. 299 */ 300 caddr_t 301 segkp_get(struct seg *seg, size_t len, uint_t flags) 302 { 303 struct segkp_data *kpd = NULL; 304 305 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 306 kpd->kp_cookie = -1; 307 return (stom(kpd->kp_base, flags)); 308 } 309 return (NULL); 310 } 311 312 /* 313 * Return a 'cached' segkp address 314 */ 315 caddr_t 316 segkp_cache_get(void *cookie) 317 { 318 struct segkp_cache *freelist = NULL; 319 struct segkp_data *kpd = NULL; 320 int index = (int)(uintptr_t)cookie; 321 struct seg *seg; 322 size_t len; 323 uint_t flags; 324 325 if (index < 0 || index >= SEGKP_MAX_CACHE) 326 return (NULL); 327 freelist = &segkp_cache[index]; 328 329 mutex_enter(&segkp_lock); 330 seg = freelist->kpf_seg; 331 flags = freelist->kpf_flags; 332 if (freelist->kpf_list != NULL) { 333 kpd = freelist->kpf_list; 334 freelist->kpf_list = kpd->kp_next; 335 freelist->kpf_count--; 336 mutex_exit(&segkp_lock); 337 kpd->kp_next = NULL; 338 segkp_insert(seg, kpd); 339 return (stom(kpd->kp_base, flags)); 340 } 341 len = freelist->kpf_len; 342 mutex_exit(&segkp_lock); 343 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 344 kpd->kp_cookie = index; 345 return (stom(kpd->kp_base, flags)); 346 } 347 return (NULL); 348 } 349 350 caddr_t 351 segkp_get_withanonmap( 352 struct seg *seg, 353 size_t len, 354 uint_t flags, 355 struct anon_map *amp) 356 { 357 struct segkp_data *kpd = NULL; 358 359 ASSERT(amp != NULL); 360 flags |= KPD_HASAMP; 361 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) { 362 kpd->kp_cookie = -1; 363 return (stom(kpd->kp_base, flags)); 364 } 365 return (NULL); 366 } 367 368 /* 369 * This does the real work of segkp allocation. 370 * Return to client base addr. len must be page-aligned. A null value is 371 * returned if there are no more vm resources (e.g. pages, swap). The len 372 * and base recorded in the private data structure include the redzone 373 * and the redzone length (if applicable). If the user requests a redzone 374 * either the first or last page is left unmapped depending whether stacks 375 * grow to low or high memory. 376 * 377 * The client may also specify a no-wait flag. If that is set then the 378 * request will choose a non-blocking path when requesting resources. 379 * The default is make the client wait. 380 */ 381 static caddr_t 382 segkp_get_internal( 383 struct seg *seg, 384 size_t len, 385 uint_t flags, 386 struct segkp_data **tkpd, 387 struct anon_map *amp) 388 { 389 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 390 struct segkp_data *kpd; 391 caddr_t vbase = NULL; /* always first virtual, may not be mapped */ 392 pgcnt_t np = 0; /* number of pages in the resource */ 393 pgcnt_t segkpindex; 394 long i; 395 caddr_t va; 396 pgcnt_t pages = 0; 397 ulong_t anon_idx = 0; 398 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 399 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; 400 401 if (len & PAGEOFFSET) { 402 panic("segkp_get: len is not page-aligned"); 403 /*NOTREACHED*/ 404 } 405 406 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL)); 407 408 /* Only allow KPD_NO_ANON if we are going to lock it down */ 409 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) 410 return (NULL); 411 412 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL) 413 return (NULL); 414 /* 415 * Fix up the len to reflect the REDZONE if applicable 416 */ 417 if (flags & KPD_HASREDZONE) 418 len += PAGESIZE; 419 np = btop(len); 420 421 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT); 422 if (vbase == NULL) { 423 kmem_free(kpd, sizeof (struct segkp_data)); 424 return (NULL); 425 } 426 427 /* If locking, reserve physical memory */ 428 if (flags & KPD_LOCKED) { 429 pages = btop(SEGKP_MAPLEN(len, flags)); 430 if (page_resv(pages, kmflag) == 0) { 431 vmem_free(SEGKP_VMEM(seg), vbase, len); 432 kmem_free(kpd, sizeof (struct segkp_data)); 433 return (NULL); 434 } 435 if ((flags & KPD_NO_ANON) == 0) 436 atomic_add_long(&anon_segkp_pages_locked, pages); 437 } 438 439 /* 440 * Reserve sufficient swap space for this vm resource. We'll 441 * actually allocate it in the loop below, but reserving it 442 * here allows us to back out more gracefully than if we 443 * had an allocation failure in the body of the loop. 444 * 445 * Note that we don't need swap space for the red zone page. 446 */ 447 if (amp != NULL) { 448 /* 449 * The swap reservation has been done, if required, and the 450 * anon_hdr is separate. 451 */ 452 anon_idx = 0; 453 kpd->kp_anon_idx = anon_idx; 454 kpd->kp_anon = amp->ahp; 455 456 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 457 kpd, vbase, len, flags, 1); 458 459 } else if ((flags & KPD_NO_ANON) == 0) { 460 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) { 461 if (flags & KPD_LOCKED) { 462 atomic_add_long(&anon_segkp_pages_locked, 463 -pages); 464 page_unresv(pages); 465 } 466 vmem_free(SEGKP_VMEM(seg), vbase, len); 467 kmem_free(kpd, sizeof (struct segkp_data)); 468 return (NULL); 469 } 470 atomic_add_long(&anon_segkp_pages_resv, 471 btop(SEGKP_MAPLEN(len, flags))); 472 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT; 473 kpd->kp_anon_idx = anon_idx; 474 kpd->kp_anon = kpsd->kpsd_anon; 475 476 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 477 kpd, vbase, len, flags, 1); 478 } else { 479 kpd->kp_anon = NULL; 480 kpd->kp_anon_idx = 0; 481 } 482 483 /* 484 * Allocate page and anon resources for the virtual address range 485 * except the redzone 486 */ 487 if (segkp_fromheap) 488 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base)); 489 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) { 490 page_t *pl[2]; 491 struct vnode *vp; 492 anoff_t off; 493 int err; 494 page_t *pp = NULL; 495 496 /* 497 * Mark this page to be a segkp page in the bitmap. 498 */ 499 if (segkp_fromheap) { 500 BT_ATOMIC_SET(segkp_bitmap, segkpindex); 501 segkpindex++; 502 } 503 504 /* 505 * If this page is the red zone page, we don't need swap 506 * space for it. Note that we skip over the code that 507 * establishes MMU mappings, so that the page remains 508 * invalid. 509 */ 510 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i) 511 continue; 512 513 if (kpd->kp_anon != NULL) { 514 struct anon *ap; 515 516 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i) 517 == NULL); 518 /* 519 * Determine the "vp" and "off" of the anon slot. 520 */ 521 ap = anon_alloc(NULL, 0); 522 if (amp != NULL) 523 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 524 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i, 525 ap, ANON_SLEEP); 526 if (amp != NULL) 527 ANON_LOCK_EXIT(&->a_rwlock); 528 swap_xlate(ap, &vp, &off); 529 530 /* 531 * Create a page with the specified identity. The 532 * page is returned with the "shared" lock held. 533 */ 534 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 535 NULL, pl, PAGESIZE, seg, va, S_CREATE, 536 kcred, NULL); 537 if (err) { 538 /* 539 * XXX - This should not fail. 540 */ 541 panic("segkp_get: no pages"); 542 /*NOTREACHED*/ 543 } 544 pp = pl[0]; 545 } else { 546 ASSERT(page_exists(&kvp, 547 (u_offset_t)(uintptr_t)va) == NULL); 548 549 if ((pp = page_create_va(&kvp, 550 (u_offset_t)(uintptr_t)va, PAGESIZE, 551 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL | 552 PG_NORELOC, seg, va)) == NULL) { 553 /* 554 * Legitimize resource; then destroy it. 555 * Easier than trying to unwind here. 556 */ 557 kpd->kp_flags = flags; 558 kpd->kp_base = vbase; 559 kpd->kp_len = len; 560 segkp_release_internal(seg, kpd, va - vbase); 561 return (NULL); 562 } 563 page_io_unlock(pp); 564 } 565 566 if (flags & KPD_ZERO) 567 pagezero(pp, 0, PAGESIZE); 568 569 /* 570 * Load and lock an MMU translation for the page. 571 */ 572 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE), 573 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD)); 574 575 /* 576 * Now, release lock on the page. 577 */ 578 if (flags & KPD_LOCKED) { 579 /* 580 * Indicate to page_retire framework that this 581 * page can only be retired when it is freed. 582 */ 583 PP_SETRAF(pp); 584 page_downgrade(pp); 585 } else 586 page_unlock(pp); 587 } 588 589 kpd->kp_flags = flags; 590 kpd->kp_base = vbase; 591 kpd->kp_len = len; 592 segkp_insert(seg, kpd); 593 *tkpd = kpd; 594 return (stom(kpd->kp_base, flags)); 595 } 596 597 /* 598 * Release the resource to cache if the pool(designate by the cookie) 599 * has less than the maximum allowable. If inserted in cache, 600 * segkp_delete insures element is taken off of active list. 601 */ 602 void 603 segkp_release(struct seg *seg, caddr_t vaddr) 604 { 605 struct segkp_cache *freelist; 606 struct segkp_data *kpd = NULL; 607 608 if ((kpd = segkp_find(seg, vaddr)) == NULL) { 609 panic("segkp_release: null kpd"); 610 /*NOTREACHED*/ 611 } 612 613 if (kpd->kp_cookie != -1) { 614 freelist = &segkp_cache[kpd->kp_cookie]; 615 mutex_enter(&segkp_lock); 616 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) { 617 segkp_delete(seg, kpd); 618 kpd->kp_next = freelist->kpf_list; 619 freelist->kpf_list = kpd; 620 freelist->kpf_count++; 621 mutex_exit(&segkp_lock); 622 return; 623 } else { 624 mutex_exit(&segkp_lock); 625 kpd->kp_cookie = -1; 626 } 627 } 628 segkp_release_internal(seg, kpd, kpd->kp_len); 629 } 630 631 /* 632 * Free the entire resource. segkp_unlock gets called with the start of the 633 * mapped portion of the resource. The length is the size of the mapped 634 * portion 635 */ 636 static void 637 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) 638 { 639 caddr_t va; 640 long i; 641 long redzone; 642 size_t np; 643 page_t *pp; 644 struct vnode *vp; 645 anoff_t off; 646 struct anon *ap; 647 pgcnt_t segkpindex; 648 649 ASSERT(kpd != NULL); 650 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); 651 np = btop(len); 652 653 /* Remove from active hash list */ 654 if (kpd->kp_cookie == -1) { 655 mutex_enter(&segkp_lock); 656 segkp_delete(seg, kpd); 657 mutex_exit(&segkp_lock); 658 } 659 660 /* 661 * Precompute redzone page index. 662 */ 663 redzone = -1; 664 if (kpd->kp_flags & KPD_HASREDZONE) 665 redzone = KPD_REDZONE(kpd); 666 667 668 va = kpd->kp_base; 669 670 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT), 671 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 672 /* 673 * Free up those anon resources that are quiescent. 674 */ 675 if (segkp_fromheap) 676 segkpindex = btop((uintptr_t)(va - kvseg.s_base)); 677 for (i = 0; i < np; i++, va += PAGESIZE) { 678 679 /* 680 * Clear the bit for this page from the bitmap. 681 */ 682 if (segkp_fromheap) { 683 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex); 684 segkpindex++; 685 } 686 687 if (i == redzone) 688 continue; 689 if (kpd->kp_anon) { 690 /* 691 * Free up anon resources and destroy the 692 * associated pages. 693 * 694 * Release the lock if there is one. Have to get the 695 * page to do this, unfortunately. 696 */ 697 if (kpd->kp_flags & KPD_LOCKED) { 698 ap = anon_get_ptr(kpd->kp_anon, 699 kpd->kp_anon_idx + i); 700 swap_xlate(ap, &vp, &off); 701 /* Find the shared-locked page. */ 702 pp = page_find(vp, (u_offset_t)off); 703 if (pp == NULL) { 704 panic("segkp_release: " 705 "kp_anon: no page to unlock "); 706 /*NOTREACHED*/ 707 } 708 if (PP_ISRAF(pp)) 709 PP_CLRRAF(pp); 710 711 page_unlock(pp); 712 } 713 if ((kpd->kp_flags & KPD_HASAMP) == 0) { 714 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i, 715 PAGESIZE); 716 anon_unresv_zone(PAGESIZE, NULL); 717 atomic_dec_ulong(&anon_segkp_pages_resv); 718 } 719 TRACE_5(TR_FAC_VM, 720 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 721 kpd, va, PAGESIZE, 0, 0); 722 } else { 723 if (kpd->kp_flags & KPD_LOCKED) { 724 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va); 725 if (pp == NULL) { 726 panic("segkp_release: " 727 "no page to unlock"); 728 /*NOTREACHED*/ 729 } 730 if (PP_ISRAF(pp)) 731 PP_CLRRAF(pp); 732 /* 733 * We should just upgrade the lock here 734 * but there is no upgrade that waits. 735 */ 736 page_unlock(pp); 737 } 738 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va, 739 SE_EXCL); 740 if (pp != NULL) 741 page_destroy(pp, 0); 742 } 743 } 744 745 /* If locked, release physical memory reservation */ 746 if (kpd->kp_flags & KPD_LOCKED) { 747 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 748 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 749 atomic_add_long(&anon_segkp_pages_locked, -pages); 750 page_unresv(pages); 751 } 752 753 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len); 754 kmem_free(kpd, sizeof (struct segkp_data)); 755 } 756 757 /* 758 * segkp_map_red() will check the current frame pointer against the 759 * stack base. If the amount of stack remaining is questionable 760 * (less than red_minavail), then segkp_map_red() will map in the redzone 761 * and return 1. Otherwise, it will return 0. segkp_map_red() can 762 * _only_ be called when: 763 * 764 * - it is safe to sleep on page_create_va(). 765 * - the caller is non-swappable. 766 * 767 * It is up to the caller to remember whether segkp_map_red() successfully 768 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later 769 * time. Note that the caller must _remain_ non-swappable until after 770 * calling segkp_unmap_red(). 771 * 772 * Currently, this routine is only called from pagefault() (which necessarily 773 * satisfies the above conditions). 774 */ 775 #if defined(STACK_GROWTH_DOWN) 776 int 777 segkp_map_red(void) 778 { 779 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp(); 780 #ifndef _LP64 781 caddr_t stkbase; 782 #endif 783 784 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 785 786 /* 787 * Optimize for the common case where we simply return. 788 */ 789 if ((curthread->t_red_pp == NULL) && 790 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail)) 791 return (0); 792 793 #if defined(_LP64) 794 /* 795 * XXX We probably need something better than this. 796 */ 797 panic("kernel stack overflow"); 798 /*NOTREACHED*/ 799 #else /* _LP64 */ 800 if (curthread->t_red_pp == NULL) { 801 page_t *red_pp; 802 struct seg kseg; 803 804 caddr_t red_va = (caddr_t) 805 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) - 806 PAGESIZE); 807 808 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) == 809 NULL); 810 811 /* 812 * Allocate the physical for the red page. 813 */ 814 /* 815 * No PG_NORELOC here to avoid waits. Unlikely to get 816 * a relocate happening in the short time the page exists 817 * and it will be OK anyway. 818 */ 819 820 kseg.s_as = &kas; 821 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va, 822 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va); 823 ASSERT(red_pp != NULL); 824 825 /* 826 * So we now have a page to jam into the redzone... 827 */ 828 page_io_unlock(red_pp); 829 830 hat_memload(kas.a_hat, red_va, red_pp, 831 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK); 832 page_downgrade(red_pp); 833 834 /* 835 * The page is left SE_SHARED locked so we can hold on to 836 * the page_t pointer. 837 */ 838 curthread->t_red_pp = red_pp; 839 840 atomic_inc_32(&red_nmapped); 841 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) { 842 (void) atomic_cas_32(&red_closest, red_closest, 843 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase)); 844 } 845 return (1); 846 } 847 848 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase & 849 (uintptr_t)PAGEMASK) - PAGESIZE); 850 851 atomic_inc_32(&red_ndoubles); 852 853 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) { 854 /* 855 * Oh boy. We're already deep within the mapped-in 856 * redzone page, and the caller is trying to prepare 857 * for a deep stack run. We're running without a 858 * redzone right now: if the caller plows off the 859 * end of the stack, it'll plow another thread or 860 * LWP structure. That situation could result in 861 * a very hard-to-debug panic, so, in the spirit of 862 * recording the name of one's killer in one's own 863 * blood, we're going to record hrestime and the calling 864 * thread. 865 */ 866 red_deep_hires = hrestime.tv_nsec; 867 red_deep_thread = curthread; 868 } 869 870 /* 871 * If this is a DEBUG kernel, and we've run too deep for comfort, toss. 872 */ 873 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD); 874 return (0); 875 #endif /* _LP64 */ 876 } 877 878 void 879 segkp_unmap_red(void) 880 { 881 page_t *pp; 882 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase & 883 (uintptr_t)PAGEMASK) - PAGESIZE); 884 885 ASSERT(curthread->t_red_pp != NULL); 886 ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 887 888 /* 889 * Because we locked the mapping down, we can't simply rely 890 * on page_destroy() to clean everything up; we need to call 891 * hat_unload() to explicitly unlock the mapping resources. 892 */ 893 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK); 894 895 pp = curthread->t_red_pp; 896 897 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va)); 898 899 /* 900 * Need to upgrade the SE_SHARED lock to SE_EXCL. 901 */ 902 if (!page_tryupgrade(pp)) { 903 /* 904 * As there is now wait for upgrade, release the 905 * SE_SHARED lock and wait for SE_EXCL. 906 */ 907 page_unlock(pp); 908 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL); 909 /* pp may be NULL here, hence the test below */ 910 } 911 912 /* 913 * Destroy the page, with dontfree set to zero (i.e. free it). 914 */ 915 if (pp != NULL) 916 page_destroy(pp, 0); 917 curthread->t_red_pp = NULL; 918 } 919 #else 920 #error Red stacks only supported with downwards stack growth. 921 #endif 922 923 /* 924 * Handle a fault on an address corresponding to one of the 925 * resources in the segkp segment. 926 */ 927 faultcode_t 928 segkp_fault( 929 struct hat *hat, 930 struct seg *seg, 931 caddr_t vaddr, 932 size_t len, 933 enum fault_type type, 934 enum seg_rw rw) 935 { 936 struct segkp_data *kpd = NULL; 937 int err; 938 939 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock)); 940 941 /* 942 * Sanity checks. 943 */ 944 if (type == F_PROT) { 945 panic("segkp_fault: unexpected F_PROT fault"); 946 /*NOTREACHED*/ 947 } 948 949 if ((kpd = segkp_find(seg, vaddr)) == NULL) 950 return (FC_NOMAP); 951 952 mutex_enter(&kpd->kp_lock); 953 954 if (type == F_SOFTLOCK) { 955 ASSERT(!(kpd->kp_flags & KPD_LOCKED)); 956 /* 957 * The F_SOFTLOCK case has more stringent 958 * range requirements: the given range must exactly coincide 959 * with the resource's mapped portion. Note reference to 960 * redzone is handled since vaddr would not equal base 961 */ 962 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 963 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 964 mutex_exit(&kpd->kp_lock); 965 return (FC_MAKE_ERR(EFAULT)); 966 } 967 968 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) { 969 mutex_exit(&kpd->kp_lock); 970 return (FC_MAKE_ERR(err)); 971 } 972 kpd->kp_flags |= KPD_LOCKED; 973 mutex_exit(&kpd->kp_lock); 974 return (0); 975 } 976 977 if (type == F_INVAL) { 978 ASSERT(!(kpd->kp_flags & KPD_NO_ANON)); 979 980 /* 981 * Check if we touched the redzone. Somewhat optimistic 982 * here if we are touching the redzone of our own stack 983 * since we wouldn't have a stack to get this far... 984 */ 985 if ((kpd->kp_flags & KPD_HASREDZONE) && 986 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd)) 987 panic("segkp_fault: accessing redzone"); 988 989 /* 990 * This fault may occur while the page is being F_SOFTLOCK'ed. 991 * Return since a 2nd segkp_load is unnecessary and also would 992 * result in the page being locked twice and eventually 993 * hang the thread_reaper thread. 994 */ 995 if (kpd->kp_flags & KPD_LOCKED) { 996 mutex_exit(&kpd->kp_lock); 997 return (0); 998 } 999 1000 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags); 1001 mutex_exit(&kpd->kp_lock); 1002 return (err ? FC_MAKE_ERR(err) : 0); 1003 } 1004 1005 if (type == F_SOFTUNLOCK) { 1006 uint_t flags; 1007 1008 /* 1009 * Make sure the addr is LOCKED and it has anon backing 1010 * before unlocking 1011 */ 1012 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) { 1013 panic("segkp_fault: bad unlock"); 1014 /*NOTREACHED*/ 1015 } 1016 1017 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 1018 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 1019 panic("segkp_fault: bad range"); 1020 /*NOTREACHED*/ 1021 } 1022 1023 if (rw == S_WRITE) 1024 flags = kpd->kp_flags | KPD_WRITEDIRTY; 1025 else 1026 flags = kpd->kp_flags; 1027 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags); 1028 kpd->kp_flags &= ~KPD_LOCKED; 1029 mutex_exit(&kpd->kp_lock); 1030 return (err ? FC_MAKE_ERR(err) : 0); 1031 } 1032 mutex_exit(&kpd->kp_lock); 1033 panic("segkp_fault: bogus fault type: %d\n", type); 1034 /*NOTREACHED*/ 1035 } 1036 1037 /* 1038 * Check that the given protections suffice over the range specified by 1039 * vaddr and len. For this segment type, the only issue is whether or 1040 * not the range lies completely within the mapped part of an allocated 1041 * resource. 1042 */ 1043 /* ARGSUSED */ 1044 static int 1045 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot) 1046 { 1047 struct segkp_data *kpd = NULL; 1048 caddr_t mbase; 1049 size_t mlen; 1050 1051 if ((kpd = segkp_find(seg, vaddr)) == NULL) 1052 return (EACCES); 1053 1054 mutex_enter(&kpd->kp_lock); 1055 mbase = stom(kpd->kp_base, kpd->kp_flags); 1056 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags); 1057 if (len > mlen || vaddr < mbase || 1058 ((vaddr + len) > (mbase + mlen))) { 1059 mutex_exit(&kpd->kp_lock); 1060 return (EACCES); 1061 } 1062 mutex_exit(&kpd->kp_lock); 1063 return (0); 1064 } 1065 1066 1067 /* 1068 * Check to see if it makes sense to do kluster/read ahead to 1069 * addr + delta relative to the mapping at addr. We assume here 1070 * that delta is a signed PAGESIZE'd multiple (which can be negative). 1071 * 1072 * For seg_u we always "approve" of this action from our standpoint. 1073 */ 1074 /*ARGSUSED*/ 1075 static int 1076 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 1077 { 1078 return (0); 1079 } 1080 1081 /* 1082 * Load and possibly lock intra-slot resources in the range given by 1083 * vaddr and len. 1084 */ 1085 static int 1086 segkp_load( 1087 struct hat *hat, 1088 struct seg *seg, 1089 caddr_t vaddr, 1090 size_t len, 1091 struct segkp_data *kpd, 1092 uint_t flags) 1093 { 1094 caddr_t va; 1095 caddr_t vlim; 1096 ulong_t i; 1097 uint_t lock; 1098 1099 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1100 1101 len = P2ROUNDUP(len, PAGESIZE); 1102 1103 /* If locking, reserve physical memory */ 1104 if (flags & KPD_LOCKED) { 1105 pgcnt_t pages = btop(len); 1106 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1107 atomic_add_long(&anon_segkp_pages_locked, pages); 1108 (void) page_resv(pages, KM_SLEEP); 1109 } 1110 1111 /* 1112 * Loop through the pages in the given range. 1113 */ 1114 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK); 1115 vaddr = va; 1116 vlim = va + len; 1117 lock = flags & KPD_LOCKED; 1118 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1119 for (; va < vlim; va += PAGESIZE, i++) { 1120 page_t *pl[2]; /* second element NULL terminator */ 1121 struct vnode *vp; 1122 anoff_t off; 1123 int err; 1124 struct anon *ap; 1125 1126 /* 1127 * Summon the page. If it's not resident, arrange 1128 * for synchronous i/o to pull it in. 1129 */ 1130 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1131 swap_xlate(ap, &vp, &off); 1132 1133 /* 1134 * The returned page list will have exactly one entry, 1135 * which is returned to us already kept. 1136 */ 1137 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL, 1138 pl, PAGESIZE, seg, va, S_READ, kcred, NULL); 1139 1140 if (err) { 1141 /* 1142 * Back out of what we've done so far. 1143 */ 1144 (void) segkp_unlock(hat, seg, vaddr, 1145 (va - vaddr), kpd, flags); 1146 return (err); 1147 } 1148 1149 /* 1150 * Load an MMU translation for the page. 1151 */ 1152 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE), 1153 lock ? HAT_LOAD_LOCK : HAT_LOAD); 1154 1155 if (!lock) { 1156 /* 1157 * Now, release "shared" lock on the page. 1158 */ 1159 page_unlock(pl[0]); 1160 } 1161 } 1162 return (0); 1163 } 1164 1165 /* 1166 * At the very least unload the mmu-translations and unlock the range if locked 1167 * Can be called with the following flag value KPD_WRITEDIRTY which specifies 1168 * any dirty pages should be written to disk. 1169 */ 1170 static int 1171 segkp_unlock( 1172 struct hat *hat, 1173 struct seg *seg, 1174 caddr_t vaddr, 1175 size_t len, 1176 struct segkp_data *kpd, 1177 uint_t flags) 1178 { 1179 caddr_t va; 1180 caddr_t vlim; 1181 ulong_t i; 1182 struct page *pp; 1183 struct vnode *vp; 1184 anoff_t off; 1185 struct anon *ap; 1186 1187 #ifdef lint 1188 seg = seg; 1189 #endif /* lint */ 1190 1191 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1192 1193 /* 1194 * Loop through the pages in the given range. It is assumed 1195 * segkp_unlock is called with page aligned base 1196 */ 1197 va = vaddr; 1198 vlim = va + len; 1199 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1200 hat_unload(hat, va, len, 1201 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 1202 for (; va < vlim; va += PAGESIZE, i++) { 1203 /* 1204 * Find the page associated with this part of the 1205 * slot, tracking it down through its associated swap 1206 * space. 1207 */ 1208 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1209 swap_xlate(ap, &vp, &off); 1210 1211 if (flags & KPD_LOCKED) { 1212 if ((pp = page_find(vp, off)) == NULL) { 1213 if (flags & KPD_LOCKED) { 1214 panic("segkp_softunlock: missing page"); 1215 /*NOTREACHED*/ 1216 } 1217 } 1218 } else { 1219 /* 1220 * Nothing to do if the slot is not locked and the 1221 * page doesn't exist. 1222 */ 1223 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) 1224 continue; 1225 } 1226 1227 /* 1228 * If the page doesn't have any translations, is 1229 * dirty and not being shared, then push it out 1230 * asynchronously and avoid waiting for the 1231 * pageout daemon to do it for us. 1232 * 1233 * XXX - Do we really need to get the "exclusive" 1234 * lock via an upgrade? 1235 */ 1236 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) && 1237 hat_ismod(pp) && page_tryupgrade(pp)) { 1238 /* 1239 * Hold the vnode before releasing the page lock to 1240 * prevent it from being freed and re-used by some 1241 * other thread. 1242 */ 1243 VN_HOLD(vp); 1244 page_unlock(pp); 1245 1246 /* 1247 * Want most powerful credentials we can get so 1248 * use kcred. 1249 */ 1250 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 1251 B_ASYNC | B_FREE, kcred, NULL); 1252 VN_RELE(vp); 1253 } else { 1254 page_unlock(pp); 1255 } 1256 } 1257 1258 /* If unlocking, release physical memory */ 1259 if (flags & KPD_LOCKED) { 1260 pgcnt_t pages = btopr(len); 1261 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1262 atomic_add_long(&anon_segkp_pages_locked, -pages); 1263 page_unresv(pages); 1264 } 1265 return (0); 1266 } 1267 1268 /* 1269 * Insert the kpd in the hash table. 1270 */ 1271 static void 1272 segkp_insert(struct seg *seg, struct segkp_data *kpd) 1273 { 1274 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1275 int index; 1276 1277 /* 1278 * Insert the kpd based on the address that will be returned 1279 * via segkp_release. 1280 */ 1281 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1282 mutex_enter(&segkp_lock); 1283 kpd->kp_next = kpsd->kpsd_hash[index]; 1284 kpsd->kpsd_hash[index] = kpd; 1285 mutex_exit(&segkp_lock); 1286 } 1287 1288 /* 1289 * Remove kpd from the hash table. 1290 */ 1291 static void 1292 segkp_delete(struct seg *seg, struct segkp_data *kpd) 1293 { 1294 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1295 struct segkp_data **kpp; 1296 int index; 1297 1298 ASSERT(MUTEX_HELD(&segkp_lock)); 1299 1300 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1301 for (kpp = &kpsd->kpsd_hash[index]; 1302 *kpp != NULL; kpp = &((*kpp)->kp_next)) { 1303 if (*kpp == kpd) { 1304 *kpp = kpd->kp_next; 1305 return; 1306 } 1307 } 1308 panic("segkp_delete: unable to find element to delete"); 1309 /*NOTREACHED*/ 1310 } 1311 1312 /* 1313 * Find the kpd associated with a vaddr. 1314 * 1315 * Most of the callers of segkp_find will pass the vaddr that 1316 * hashes to the desired index, but there are cases where 1317 * this is not true in which case we have to (potentially) scan 1318 * the whole table looking for it. This should be very rare 1319 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the 1320 * middle of the segkp_data region). 1321 */ 1322 static struct segkp_data * 1323 segkp_find(struct seg *seg, caddr_t vaddr) 1324 { 1325 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1326 struct segkp_data *kpd; 1327 int i; 1328 int stop; 1329 1330 i = stop = SEGKP_HASH(vaddr); 1331 mutex_enter(&segkp_lock); 1332 do { 1333 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL; 1334 kpd = kpd->kp_next) { 1335 if (vaddr >= kpd->kp_base && 1336 vaddr < kpd->kp_base + kpd->kp_len) { 1337 mutex_exit(&segkp_lock); 1338 return (kpd); 1339 } 1340 } 1341 if (--i < 0) 1342 i = SEGKP_HASHSZ - 1; /* Wrap */ 1343 } while (i != stop); 1344 mutex_exit(&segkp_lock); 1345 return (NULL); /* Not found */ 1346 } 1347 1348 /* 1349 * returns size of swappable area. 1350 */ 1351 size_t 1352 swapsize(caddr_t v) 1353 { 1354 struct segkp_data *kpd; 1355 1356 if ((kpd = segkp_find(segkp, v)) != NULL) 1357 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 1358 else 1359 return (NULL); 1360 } 1361 1362 /* 1363 * Dump out all the active segkp pages 1364 */ 1365 static void 1366 segkp_dump(struct seg *seg) 1367 { 1368 int i; 1369 struct segkp_data *kpd; 1370 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1371 1372 for (i = 0; i < SEGKP_HASHSZ; i++) { 1373 for (kpd = kpsd->kpsd_hash[i]; 1374 kpd != NULL; kpd = kpd->kp_next) { 1375 pfn_t pfn; 1376 caddr_t addr; 1377 caddr_t eaddr; 1378 1379 addr = kpd->kp_base; 1380 eaddr = addr + kpd->kp_len; 1381 while (addr < eaddr) { 1382 ASSERT(seg->s_as == &kas); 1383 pfn = hat_getpfnum(seg->s_as->a_hat, addr); 1384 if (pfn != PFN_INVALID) 1385 dump_addpage(seg->s_as, addr, pfn); 1386 addr += PAGESIZE; 1387 dump_timeleft = dump_timeout; 1388 } 1389 } 1390 } 1391 } 1392 1393 /*ARGSUSED*/ 1394 static int 1395 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 1396 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1397 { 1398 return (ENOTSUP); 1399 } 1400 1401 /*ARGSUSED*/ 1402 static int 1403 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 1404 { 1405 return (ENODEV); 1406 } 1407 1408 /*ARGSUSED*/ 1409 static lgrp_mem_policy_info_t * 1410 segkp_getpolicy(struct seg *seg, caddr_t addr) 1411 { 1412 return (NULL); 1413 } 1414 1415 /*ARGSUSED*/ 1416 static int 1417 segkp_capable(struct seg *seg, segcapability_t capability) 1418 { 1419 return (0); 1420 } 1421 1422 #include <sys/mem_config.h> 1423 1424 /*ARGSUSED*/ 1425 static void 1426 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages) 1427 {} 1428 1429 /* 1430 * During memory delete, turn off caches so that pages are not held. 1431 * A better solution may be to unlock the pages while they are 1432 * in the cache so that they may be collected naturally. 1433 */ 1434 1435 /*ARGSUSED*/ 1436 static int 1437 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages) 1438 { 1439 atomic_inc_32(&segkp_indel); 1440 segkp_cache_free(); 1441 return (0); 1442 } 1443 1444 /*ARGSUSED*/ 1445 static void 1446 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 1447 { 1448 atomic_dec_32(&segkp_indel); 1449 } 1450 1451 static kphysm_setup_vector_t segkp_mem_config_vec = { 1452 KPHYSM_SETUP_VECTOR_VERSION, 1453 segkp_mem_config_post_add, 1454 segkp_mem_config_pre_del, 1455 segkp_mem_config_post_del, 1456 }; 1457 1458 static void 1459 segkpinit_mem_config(struct seg *seg) 1460 { 1461 int ret; 1462 1463 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg); 1464 ASSERT(ret == 0); 1465 }