1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/kstat.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <vm/vm_dep.h> 84 #include <vm/xhat_sfmmu.h> 85 #include <sys/fpu/fpusystm.h> 86 #include <vm/mach_kpm.h> 87 #include <sys/callb.h> 88 89 #ifdef DEBUG 90 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \ 91 if (SFMMU_IS_SHMERID_VALID(rid)) { \ 92 caddr_t _eaddr = (saddr) + (len); \ 93 sf_srd_t *_srdp; \ 94 sf_region_t *_rgnp; \ 95 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 96 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \ 97 ASSERT((hat) != ksfmmup); \ 98 _srdp = (hat)->sfmmu_srdp; \ 99 ASSERT(_srdp != NULL); \ 100 ASSERT(_srdp->srd_refcnt != 0); \ 101 _rgnp = _srdp->srd_hmergnp[(rid)]; \ 102 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \ 103 ASSERT(_rgnp->rgn_refcnt != 0); \ 104 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \ 105 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 106 SFMMU_REGION_HME); \ 107 ASSERT((saddr) >= _rgnp->rgn_saddr); \ 108 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \ 109 ASSERT(_eaddr > _rgnp->rgn_saddr); \ 110 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \ 111 } 112 113 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \ 114 { \ 115 caddr_t _hsva; \ 116 caddr_t _heva; \ 117 caddr_t _rsva; \ 118 caddr_t _reva; \ 119 int _ttesz = get_hblk_ttesz(hmeblkp); \ 120 int _flagtte; \ 121 ASSERT((srdp)->srd_refcnt != 0); \ 122 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 123 ASSERT((rgnp)->rgn_id == rid); \ 124 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \ 125 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 126 SFMMU_REGION_HME); \ 127 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \ 128 _hsva = (caddr_t)get_hblk_base(hmeblkp); \ 129 _heva = get_hblk_endaddr(hmeblkp); \ 130 _rsva = (caddr_t)P2ALIGN( \ 131 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \ 132 _reva = (caddr_t)P2ROUNDUP( \ 133 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \ 134 HBLK_MIN_BYTES); \ 135 ASSERT(_hsva >= _rsva); \ 136 ASSERT(_hsva < _reva); \ 137 ASSERT(_heva > _rsva); \ 138 ASSERT(_heva <= _reva); \ 139 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \ 140 _ttesz; \ 141 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \ 142 } 143 144 #else /* DEBUG */ 145 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len) 146 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 147 #endif /* DEBUG */ 148 149 #if defined(SF_ERRATA_57) 150 extern caddr_t errata57_limit; 151 #endif 152 153 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 154 (sizeof (int64_t))) 155 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 156 157 #define HBLK_RESERVE_CNT 128 158 #define HBLK_RESERVE_MIN 20 159 160 static struct hme_blk *freehblkp; 161 static kmutex_t freehblkp_lock; 162 static int freehblkcnt; 163 164 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 165 static kmutex_t hblk_reserve_lock; 166 static kthread_t *hblk_reserve_thread; 167 168 static nucleus_hblk8_info_t nucleus_hblk8; 169 static nucleus_hblk1_info_t nucleus_hblk1; 170 171 /* 172 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here 173 * after the initial phase of removing an hmeblk from the hash chain, see 174 * the detailed comment in sfmmu_hblk_hash_rm() for further details. 175 */ 176 static cpu_hme_pend_t *cpu_hme_pend; 177 static uint_t cpu_hme_pend_thresh; 178 /* 179 * SFMMU specific hat functions 180 */ 181 void hat_pagecachectl(struct page *, int); 182 183 /* flags for hat_pagecachectl */ 184 #define HAT_CACHE 0x1 185 #define HAT_UNCACHE 0x2 186 #define HAT_TMPNC 0x4 187 188 /* 189 * Flag to allow the creation of non-cacheable translations 190 * to system memory. It is off by default. At the moment this 191 * flag is used by the ecache error injector. The error injector 192 * will turn it on when creating such a translation then shut it 193 * off when it's finished. 194 */ 195 196 int sfmmu_allow_nc_trans = 0; 197 198 /* 199 * Flag to disable large page support. 200 * value of 1 => disable all large pages. 201 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 202 * 203 * For example, use the value 0x4 to disable 512K pages. 204 * 205 */ 206 #define LARGE_PAGES_OFF 0x1 207 208 /* 209 * The disable_large_pages and disable_ism_large_pages variables control 210 * hat_memload_array and the page sizes to be used by ISM and the kernel. 211 * 212 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 213 * are only used to control which OOB pages to use at upper VM segment creation 214 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 215 * Their values may come from platform or CPU specific code to disable page 216 * sizes that should not be used. 217 * 218 * WARNING: 512K pages are currently not supported for ISM/DISM. 219 */ 220 uint_t disable_large_pages = 0; 221 uint_t disable_ism_large_pages = (1 << TTE512K); 222 uint_t disable_auto_data_large_pages = 0; 223 uint_t disable_auto_text_large_pages = 0; 224 225 /* 226 * Private sfmmu data structures for hat management 227 */ 228 static struct kmem_cache *sfmmuid_cache; 229 static struct kmem_cache *mmuctxdom_cache; 230 231 /* 232 * Private sfmmu data structures for tsb management 233 */ 234 static struct kmem_cache *sfmmu_tsbinfo_cache; 235 static struct kmem_cache *sfmmu_tsb8k_cache; 236 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 237 static vmem_t *kmem_bigtsb_arena; 238 static vmem_t *kmem_tsb_arena; 239 240 /* 241 * sfmmu static variables for hmeblk resource management. 242 */ 243 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 244 static struct kmem_cache *sfmmu8_cache; 245 static struct kmem_cache *sfmmu1_cache; 246 static struct kmem_cache *pa_hment_cache; 247 248 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 249 /* 250 * private data for ism 251 */ 252 static struct kmem_cache *ism_blk_cache; 253 static struct kmem_cache *ism_ment_cache; 254 #define ISMID_STARTADDR NULL 255 256 /* 257 * Region management data structures and function declarations. 258 */ 259 260 static void sfmmu_leave_srd(sfmmu_t *); 261 static int sfmmu_srdcache_constructor(void *, void *, int); 262 static void sfmmu_srdcache_destructor(void *, void *); 263 static int sfmmu_rgncache_constructor(void *, void *, int); 264 static void sfmmu_rgncache_destructor(void *, void *); 265 static int sfrgnmap_isnull(sf_region_map_t *); 266 static int sfhmergnmap_isnull(sf_hmeregion_map_t *); 267 static int sfmmu_scdcache_constructor(void *, void *, int); 268 static void sfmmu_scdcache_destructor(void *, void *); 269 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t, 270 size_t, void *, u_offset_t); 271 272 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1; 273 static sf_srd_bucket_t *srd_buckets; 274 static struct kmem_cache *srd_cache; 275 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1; 276 static struct kmem_cache *region_cache; 277 static struct kmem_cache *scd_cache; 278 279 #ifdef sun4v 280 int use_bigtsb_arena = 1; 281 #else 282 int use_bigtsb_arena = 0; 283 #endif 284 285 /* External /etc/system tunable, for turning on&off the shctx support */ 286 int disable_shctx = 0; 287 /* Internal variable, set by MD if the HW supports shctx feature */ 288 int shctx_on = 0; 289 290 #ifdef DEBUG 291 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); 292 #endif 293 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *); 294 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *); 295 296 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *); 297 static void sfmmu_find_scd(sfmmu_t *); 298 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *); 299 static void sfmmu_finish_join_scd(sfmmu_t *); 300 static void sfmmu_leave_scd(sfmmu_t *, uchar_t); 301 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *); 302 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *); 303 static void sfmmu_free_scd_tsbs(sfmmu_t *); 304 static void sfmmu_tsb_inv_ctx(sfmmu_t *); 305 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *); 306 static void sfmmu_ism_hatflags(sfmmu_t *, int); 307 static int sfmmu_srd_lock_held(sf_srd_t *); 308 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *); 309 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *); 310 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *); 311 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *); 312 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *); 313 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *); 314 315 /* 316 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 317 * HAT flags, synchronizing TLB/TSB coherency, and context management. 318 * The lock is hashed on the sfmmup since the case where we need to lock 319 * all processes is rare but does occur (e.g. we need to unload a shared 320 * mapping from all processes using the mapping). We have a lot of buckets, 321 * and each slab of sfmmu_t's can use about a quarter of them, giving us 322 * a fairly good distribution without wasting too much space and overhead 323 * when we have to grab them all. 324 */ 325 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 326 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 327 328 /* 329 * Hash algorithm optimized for a small number of slabs. 330 * 7 is (highbit((sizeof sfmmu_t)) - 1) 331 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 332 * kmem_cache, and thus they will be sequential within that cache. In 333 * addition, each new slab will have a different "color" up to cache_maxcolor 334 * which will skew the hashing for each successive slab which is allocated. 335 * If the size of sfmmu_t changed to a larger size, this algorithm may need 336 * to be revisited. 337 */ 338 #define TSB_HASH_SHIFT_BITS (7) 339 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 340 341 #ifdef DEBUG 342 int tsb_hash_debug = 0; 343 #define TSB_HASH(sfmmup) \ 344 (tsb_hash_debug ? &hat_lock[0] : \ 345 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 346 #else /* DEBUG */ 347 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 348 #endif /* DEBUG */ 349 350 351 /* sfmmu_replace_tsb() return codes. */ 352 typedef enum tsb_replace_rc { 353 TSB_SUCCESS, 354 TSB_ALLOCFAIL, 355 TSB_LOSTRACE, 356 TSB_ALREADY_SWAPPED, 357 TSB_CANTGROW 358 } tsb_replace_rc_t; 359 360 /* 361 * Flags for TSB allocation routines. 362 */ 363 #define TSB_ALLOC 0x01 364 #define TSB_FORCEALLOC 0x02 365 #define TSB_GROW 0x04 366 #define TSB_SHRINK 0x08 367 #define TSB_SWAPIN 0x10 368 369 /* 370 * Support for HAT callbacks. 371 */ 372 #define SFMMU_MAX_RELOC_CALLBACKS 10 373 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 374 static id_t sfmmu_cb_nextid = 0; 375 static id_t sfmmu_tsb_cb_id; 376 struct sfmmu_callback *sfmmu_cb_table; 377 378 kmutex_t kpr_mutex; 379 kmutex_t kpr_suspendlock; 380 kthread_t *kreloc_thread; 381 382 /* 383 * Enable VA->PA translation sanity checking on DEBUG kernels. 384 * Disabled by default. This is incompatible with some 385 * drivers (error injector, RSM) so if it breaks you get 386 * to keep both pieces. 387 */ 388 int hat_check_vtop = 0; 389 390 /* 391 * Private sfmmu routines (prototypes) 392 */ 393 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 394 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 395 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t, 396 uint_t); 397 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 398 caddr_t, demap_range_t *, uint_t); 399 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 400 caddr_t, int); 401 static void sfmmu_hblk_free(struct hme_blk **); 402 static void sfmmu_hblks_list_purge(struct hme_blk **, int); 403 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 404 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 405 static struct hme_blk *sfmmu_hblk_steal(int); 406 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 407 struct hme_blk *, uint64_t, struct hme_blk *); 408 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 409 410 static void hat_do_memload_array(struct hat *, caddr_t, size_t, 411 struct page **, uint_t, uint_t, uint_t); 412 static void hat_do_memload(struct hat *, caddr_t, struct page *, 413 uint_t, uint_t, uint_t); 414 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 415 uint_t, uint_t, pgcnt_t, uint_t); 416 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 417 uint_t); 418 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 419 uint_t, uint_t); 420 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 421 caddr_t, int, uint_t); 422 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 423 struct hmehash_bucket *, caddr_t, uint_t, uint_t, 424 uint_t); 425 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 426 caddr_t, page_t **, uint_t, uint_t); 427 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 428 429 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 430 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *); 431 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 432 #ifdef VAC 433 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 434 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 435 int tst_tnc(page_t *pp, pgcnt_t); 436 void conv_tnc(page_t *pp, int); 437 #endif 438 439 static void sfmmu_get_ctx(sfmmu_t *); 440 static void sfmmu_free_sfmmu(sfmmu_t *); 441 442 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 443 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 444 445 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 446 static void hat_pagereload(struct page *, struct page *); 447 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 448 #ifdef VAC 449 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 450 static void sfmmu_page_cache(page_t *, int, int, int); 451 #endif 452 453 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *, 454 struct hme_blk *, int); 455 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 456 pfn_t, int, int, int, int); 457 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 458 pfn_t, int); 459 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 460 static void sfmmu_tlb_range_demap(demap_range_t *); 461 static void sfmmu_invalidate_ctx(sfmmu_t *); 462 static void sfmmu_sync_mmustate(sfmmu_t *); 463 464 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 465 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 466 sfmmu_t *); 467 static void sfmmu_tsb_free(struct tsb_info *); 468 static void sfmmu_tsbinfo_free(struct tsb_info *); 469 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 470 sfmmu_t *); 471 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *); 472 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 473 static int sfmmu_select_tsb_szc(pgcnt_t); 474 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 475 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 476 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 477 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 478 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 479 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 480 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 481 hatlock_t *, uint_t); 482 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 483 484 #ifdef VAC 485 void sfmmu_cache_flush(pfn_t, int); 486 void sfmmu_cache_flushcolor(int, pfn_t); 487 #endif 488 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 489 caddr_t, demap_range_t *, uint_t, int); 490 491 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 492 static uint_t sfmmu_ptov_attr(tte_t *); 493 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 494 caddr_t, demap_range_t *, uint_t); 495 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 496 static int sfmmu_idcache_constructor(void *, void *, int); 497 static void sfmmu_idcache_destructor(void *, void *); 498 static int sfmmu_hblkcache_constructor(void *, void *, int); 499 static void sfmmu_hblkcache_destructor(void *, void *); 500 static void sfmmu_hblkcache_reclaim(void *); 501 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 502 struct hmehash_bucket *); 503 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *, 504 struct hme_blk *, struct hme_blk **, int); 505 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *, 506 uint64_t); 507 static struct hme_blk *sfmmu_check_pending_hblks(int); 508 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 509 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int); 510 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t, 511 int, caddr_t *); 512 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *); 513 514 static void sfmmu_rm_large_mappings(page_t *, int); 515 516 static void hat_lock_init(void); 517 static void hat_kstat_init(void); 518 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 519 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *); 520 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t); 521 static void sfmmu_check_page_sizes(sfmmu_t *, int); 522 int fnd_mapping_sz(page_t *); 523 static void iment_add(struct ism_ment *, struct hat *); 524 static void iment_sub(struct ism_ment *, struct hat *); 525 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 526 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 527 extern void sfmmu_clear_utsbinfo(void); 528 529 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t); 530 531 extern int vpm_enable; 532 533 /* kpm globals */ 534 #ifdef DEBUG 535 /* 536 * Enable trap level tsbmiss handling 537 */ 538 int kpm_tsbmtl = 1; 539 540 /* 541 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 542 * required TLB shootdowns in this case, so handle w/ care. Off by default. 543 */ 544 int kpm_tlb_flush; 545 #endif /* DEBUG */ 546 547 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 548 549 #ifdef DEBUG 550 static void sfmmu_check_hblk_flist(); 551 #endif 552 553 /* 554 * Semi-private sfmmu data structures. Some of them are initialize in 555 * startup or in hat_init. Some of them are private but accessed by 556 * assembly code or mach_sfmmu.c 557 */ 558 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 559 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 560 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 561 uint64_t khme_hash_pa; /* PA of khme_hash */ 562 int uhmehash_num; /* # of buckets in user hash table */ 563 int khmehash_num; /* # of buckets in kernel hash table */ 564 565 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 566 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 567 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 568 569 #define DEFAULT_NUM_CTXS_PER_MMU 8192 570 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 571 572 int cache; /* describes system cache */ 573 574 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 575 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 576 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 577 int ktsb_sz; /* kernel 8k-indexed tsb size */ 578 579 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 580 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 581 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 582 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 583 584 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 585 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 586 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 587 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 588 589 #ifndef sun4v 590 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 591 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 592 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 593 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 594 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 595 #endif /* sun4v */ 596 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 597 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 598 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */ 599 600 /* 601 * Size to use for TSB slabs. Future platforms that support page sizes 602 * larger than 4M may wish to change these values, and provide their own 603 * assembly macros for building and decoding the TSB base register contents. 604 * Note disable_large_pages will override the value set here. 605 */ 606 static uint_t tsb_slab_ttesz = TTE4M; 607 size_t tsb_slab_size = MMU_PAGESIZE4M; 608 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 609 /* PFN mask for TTE */ 610 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT; 611 612 /* 613 * Size to use for TSB slabs. These are used only when 256M tsb arenas 614 * exist. 615 */ 616 static uint_t bigtsb_slab_ttesz = TTE256M; 617 static size_t bigtsb_slab_size = MMU_PAGESIZE256M; 618 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M; 619 /* 256M page alignment for 8K pfn */ 620 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT; 621 622 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 623 static int tsb_max_growsize = 0; 624 625 /* 626 * Tunable parameters dealing with TSB policies. 627 */ 628 629 /* 630 * This undocumented tunable forces all 8K TSBs to be allocated from 631 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 632 */ 633 #ifdef DEBUG 634 int tsb_forceheap = 0; 635 #endif /* DEBUG */ 636 637 /* 638 * Decide whether to use per-lgroup arenas, or one global set of 639 * TSB arenas. The default is not to break up per-lgroup, since 640 * most platforms don't recognize any tangible benefit from it. 641 */ 642 int tsb_lgrp_affinity = 0; 643 644 /* 645 * Used for growing the TSB based on the process RSS. 646 * tsb_rss_factor is based on the smallest TSB, and is 647 * shifted by the TSB size to determine if we need to grow. 648 * The default will grow the TSB if the number of TTEs for 649 * this page size exceeds 75% of the number of TSB entries, 650 * which should _almost_ eliminate all conflict misses 651 * (at the expense of using up lots and lots of memory). 652 */ 653 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 654 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 655 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 656 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 657 default_tsb_size) 658 #define TSB_OK_SHRINK() \ 659 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 660 #define TSB_OK_GROW() \ 661 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 662 663 int enable_tsb_rss_sizing = 1; 664 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 665 666 /* which TSB size code to use for new address spaces or if rss sizing off */ 667 int default_tsb_size = TSB_8K_SZCODE; 668 669 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 670 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 671 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 672 673 #ifdef DEBUG 674 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 675 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 676 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 677 static int tsb_alloc_fail_mtbf = 0; 678 static int tsb_alloc_count = 0; 679 #endif /* DEBUG */ 680 681 /* if set to 1, will remap valid TTEs when growing TSB. */ 682 int tsb_remap_ttes = 1; 683 684 /* 685 * If we have more than this many mappings, allocate a second TSB. 686 * This default is chosen because the I/D fully associative TLBs are 687 * assumed to have at least 8 available entries. Platforms with a 688 * larger fully-associative TLB could probably override the default. 689 */ 690 691 #ifdef sun4v 692 int tsb_sectsb_threshold = 0; 693 #else 694 int tsb_sectsb_threshold = 8; 695 #endif 696 697 /* 698 * kstat data 699 */ 700 struct sfmmu_global_stat sfmmu_global_stat; 701 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 702 703 /* 704 * Global data 705 */ 706 sfmmu_t *ksfmmup; /* kernel's hat id */ 707 708 #ifdef DEBUG 709 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 710 #endif 711 712 /* sfmmu locking operations */ 713 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 714 static int sfmmu_mlspl_held(struct page *, int); 715 716 kmutex_t *sfmmu_page_enter(page_t *); 717 void sfmmu_page_exit(kmutex_t *); 718 int sfmmu_page_spl_held(struct page *); 719 720 /* sfmmu internal locking operations - accessed directly */ 721 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 722 kmutex_t **, kmutex_t **); 723 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 724 static hatlock_t * 725 sfmmu_hat_enter(sfmmu_t *); 726 static hatlock_t * 727 sfmmu_hat_tryenter(sfmmu_t *); 728 static void sfmmu_hat_exit(hatlock_t *); 729 static void sfmmu_hat_lock_all(void); 730 static void sfmmu_hat_unlock_all(void); 731 static void sfmmu_ismhat_enter(sfmmu_t *, int); 732 static void sfmmu_ismhat_exit(sfmmu_t *, int); 733 734 kpm_hlk_t *kpmp_table; 735 uint_t kpmp_table_sz; /* must be a power of 2 */ 736 uchar_t kpmp_shift; 737 738 kpm_shlk_t *kpmp_stable; 739 uint_t kpmp_stable_sz; /* must be a power of 2 */ 740 741 /* 742 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128. 743 * SPL_SHIFT is log2(SPL_TABLE_SIZE). 744 */ 745 #if ((2*NCPU_P2) > 128) 746 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1)) 747 #else 748 #define SPL_SHIFT 7U 749 #endif 750 #define SPL_TABLE_SIZE (1U << SPL_SHIFT) 751 #define SPL_MASK (SPL_TABLE_SIZE - 1) 752 753 /* 754 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t 755 * and by multiples of SPL_SHIFT to get as many varied bits as we can. 756 */ 757 #define SPL_INDEX(pp) \ 758 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \ 759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \ 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \ 761 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \ 762 SPL_MASK) 763 764 #define SPL_HASH(pp) \ 765 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex) 766 767 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 768 769 /* Array of mutexes protecting a page's mapping list and p_nrm field. */ 770 771 #define MML_TABLE_SIZE SPL_TABLE_SIZE 772 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex) 773 774 static pad_mutex_t mml_table[MML_TABLE_SIZE]; 775 776 /* 777 * hat_unload_callback() will group together callbacks in order 778 * to avoid xt_sync() calls. This is the maximum size of the group. 779 */ 780 #define MAX_CB_ADDR 32 781 782 tte_t hw_tte; 783 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 784 785 static char *mmu_ctx_kstat_names[] = { 786 "mmu_ctx_tsb_exceptions", 787 "mmu_ctx_tsb_raise_exception", 788 "mmu_ctx_wrap_around", 789 }; 790 791 /* 792 * Wrapper for vmem_xalloc since vmem_create only allows limited 793 * parameters for vm_source_alloc functions. This function allows us 794 * to specify alignment consistent with the size of the object being 795 * allocated. 796 */ 797 static void * 798 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 799 { 800 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 801 } 802 803 /* Common code for setting tsb_alloc_hiwater. */ 804 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 805 ptob(pages) / tsb_alloc_hiwater_factor 806 807 /* 808 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 809 * a single TSB. physmem is the number of physical pages so we need physmem 8K 810 * TTEs to represent all those physical pages. We round this up by using 811 * 1<<highbit(). To figure out which size code to use, remember that the size 812 * code is just an amount to shift the smallest TSB size to get the size of 813 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 814 * highbit() - 1) to get the size code for the smallest TSB that can represent 815 * all of physical memory, while erring on the side of too much. 816 * 817 * Restrict tsb_max_growsize to make sure that: 818 * 1) TSBs can't grow larger than the TSB slab size 819 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 820 */ 821 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 822 int _i, _szc, _slabszc, _tsbszc; \ 823 \ 824 _i = highbit(pages); \ 825 if ((1 << (_i - 1)) == (pages)) \ 826 _i--; /* 2^n case, round down */ \ 827 _szc = _i - TSB_START_SIZE; \ 828 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \ 829 _tsbszc = MIN(_szc, _slabszc); \ 830 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \ 831 } 832 833 /* 834 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 835 * tsb_info which handles that TTE size. 836 */ 837 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \ 838 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 839 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \ 840 sfmmu_hat_lock_held(sfmmup)); \ 841 if ((tte_szc) >= TTE4M) { \ 842 ASSERT((tsbinfop) != NULL); \ 843 (tsbinfop) = (tsbinfop)->tsb_next; \ 844 } \ 845 } 846 847 /* 848 * Macro to use to unload entries from the TSB. 849 * It has knowledge of which page sizes get replicated in the TSB 850 * and will call the appropriate unload routine for the appropriate size. 851 */ 852 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \ 853 { \ 854 int ttesz = get_hblk_ttesz(hmeblkp); \ 855 if (ttesz == TTE8K || ttesz == TTE4M) { \ 856 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 857 } else { \ 858 caddr_t sva = ismhat ? addr : \ 859 (caddr_t)get_hblk_base(hmeblkp); \ 860 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 861 ASSERT(addr >= sva && addr < eva); \ 862 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 863 } \ 864 } 865 866 867 /* Update tsb_alloc_hiwater after memory is configured. */ 868 /*ARGSUSED*/ 869 static void 870 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages) 871 { 872 /* Assumes physmem has already been updated. */ 873 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 874 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 875 } 876 877 /* 878 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 879 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 880 * deleted. 881 */ 882 /*ARGSUSED*/ 883 static int 884 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages) 885 { 886 return (0); 887 } 888 889 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 890 /*ARGSUSED*/ 891 static void 892 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 893 { 894 /* 895 * Whether the delete was cancelled or not, just go ahead and update 896 * tsb_alloc_hiwater and tsb_max_growsize. 897 */ 898 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 899 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 900 } 901 902 static kphysm_setup_vector_t sfmmu_update_vec = { 903 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 904 sfmmu_update_post_add, /* post_add */ 905 sfmmu_update_pre_del, /* pre_del */ 906 sfmmu_update_post_del /* post_del */ 907 }; 908 909 910 /* 911 * HME_BLK HASH PRIMITIVES 912 */ 913 914 /* 915 * Enter a hme on the mapping list for page pp. 916 * When large pages are more prevalent in the system we might want to 917 * keep the mapping list in ascending order by the hment size. For now, 918 * small pages are more frequent, so don't slow it down. 919 */ 920 #define HME_ADD(hme, pp) \ 921 { \ 922 ASSERT(sfmmu_mlist_held(pp)); \ 923 \ 924 hme->hme_prev = NULL; \ 925 hme->hme_next = pp->p_mapping; \ 926 hme->hme_page = pp; \ 927 if (pp->p_mapping) { \ 928 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 929 ASSERT(pp->p_share > 0); \ 930 } else { \ 931 /* EMPTY */ \ 932 ASSERT(pp->p_share == 0); \ 933 } \ 934 pp->p_mapping = hme; \ 935 pp->p_share++; \ 936 } 937 938 /* 939 * Enter a hme on the mapping list for page pp. 940 * If we are unmapping a large translation, we need to make sure that the 941 * change is reflect in the corresponding bit of the p_index field. 942 */ 943 #define HME_SUB(hme, pp) \ 944 { \ 945 ASSERT(sfmmu_mlist_held(pp)); \ 946 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 947 \ 948 if (pp->p_mapping == NULL) { \ 949 panic("hme_remove - no mappings"); \ 950 } \ 951 \ 952 membar_stst(); /* ensure previous stores finish */ \ 953 \ 954 ASSERT(pp->p_share > 0); \ 955 pp->p_share--; \ 956 \ 957 if (hme->hme_prev) { \ 958 ASSERT(pp->p_mapping != hme); \ 959 ASSERT(hme->hme_prev->hme_page == pp || \ 960 IS_PAHME(hme->hme_prev)); \ 961 hme->hme_prev->hme_next = hme->hme_next; \ 962 } else { \ 963 ASSERT(pp->p_mapping == hme); \ 964 pp->p_mapping = hme->hme_next; \ 965 ASSERT((pp->p_mapping == NULL) ? \ 966 (pp->p_share == 0) : 1); \ 967 } \ 968 \ 969 if (hme->hme_next) { \ 970 ASSERT(hme->hme_next->hme_page == pp || \ 971 IS_PAHME(hme->hme_next)); \ 972 hme->hme_next->hme_prev = hme->hme_prev; \ 973 } \ 974 \ 975 /* zero out the entry */ \ 976 hme->hme_next = NULL; \ 977 hme->hme_prev = NULL; \ 978 hme->hme_page = NULL; \ 979 \ 980 if (hme_size(hme) > TTE8K) { \ 981 /* remove mappings for remainder of large pg */ \ 982 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 983 } \ 984 } 985 986 /* 987 * This function returns the hment given the hme_blk and a vaddr. 988 * It assumes addr has already been checked to belong to hme_blk's 989 * range. 990 */ 991 #define HBLKTOHME(hment, hmeblkp, addr) \ 992 { \ 993 int index; \ 994 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 995 } 996 997 /* 998 * Version of HBLKTOHME that also returns the index in hmeblkp 999 * of the hment. 1000 */ 1001 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 1002 { \ 1003 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 1004 \ 1005 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 1006 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 1007 } else \ 1008 idx = 0; \ 1009 \ 1010 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 1011 } 1012 1013 /* 1014 * Disable any page sizes not supported by the CPU 1015 */ 1016 void 1017 hat_init_pagesizes() 1018 { 1019 int i; 1020 1021 mmu_exported_page_sizes = 0; 1022 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 1023 1024 szc_2_userszc[i] = (uint_t)-1; 1025 userszc_2_szc[i] = (uint_t)-1; 1026 1027 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 1028 disable_large_pages |= (1 << i); 1029 } else { 1030 szc_2_userszc[i] = mmu_exported_page_sizes; 1031 userszc_2_szc[mmu_exported_page_sizes] = i; 1032 mmu_exported_page_sizes++; 1033 } 1034 } 1035 1036 disable_ism_large_pages |= disable_large_pages; 1037 disable_auto_data_large_pages = disable_large_pages; 1038 disable_auto_text_large_pages = disable_large_pages; 1039 1040 /* 1041 * Initialize mmu-specific large page sizes. 1042 */ 1043 if (&mmu_large_pages_disabled) { 1044 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 1045 disable_ism_large_pages |= 1046 mmu_large_pages_disabled(HAT_LOAD_SHARE); 1047 disable_auto_data_large_pages |= 1048 mmu_large_pages_disabled(HAT_AUTO_DATA); 1049 disable_auto_text_large_pages |= 1050 mmu_large_pages_disabled(HAT_AUTO_TEXT); 1051 } 1052 } 1053 1054 /* 1055 * Initialize the hardware address translation structures. 1056 */ 1057 void 1058 hat_init(void) 1059 { 1060 int i; 1061 uint_t sz; 1062 size_t size; 1063 1064 hat_lock_init(); 1065 hat_kstat_init(); 1066 1067 /* 1068 * Hardware-only bits in a TTE 1069 */ 1070 MAKE_TTE_MASK(&hw_tte); 1071 1072 hat_init_pagesizes(); 1073 1074 /* Initialize the hash locks */ 1075 for (i = 0; i < khmehash_num; i++) { 1076 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1077 MUTEX_DEFAULT, NULL); 1078 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1079 } 1080 for (i = 0; i < uhmehash_num; i++) { 1081 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1082 MUTEX_DEFAULT, NULL); 1083 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1084 } 1085 khmehash_num--; /* make sure counter starts from 0 */ 1086 uhmehash_num--; /* make sure counter starts from 0 */ 1087 1088 /* 1089 * Allocate context domain structures. 1090 * 1091 * A platform may choose to modify max_mmu_ctxdoms in 1092 * set_platform_defaults(). If a platform does not define 1093 * a set_platform_defaults() or does not choose to modify 1094 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 1095 * 1096 * For all platforms that have CPUs sharing MMUs, this 1097 * value must be defined. 1098 */ 1099 if (max_mmu_ctxdoms == 0) 1100 max_mmu_ctxdoms = max_ncpus; 1101 1102 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 1103 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 1104 1105 /* mmu_ctx_t is 64 bytes aligned */ 1106 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 1107 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1108 /* 1109 * MMU context domain initialization for the Boot CPU. 1110 * This needs the context domains array allocated above. 1111 */ 1112 mutex_enter(&cpu_lock); 1113 sfmmu_cpu_init(CPU); 1114 mutex_exit(&cpu_lock); 1115 1116 /* 1117 * Intialize ism mapping list lock. 1118 */ 1119 1120 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1121 1122 /* 1123 * Each sfmmu structure carries an array of MMU context info 1124 * structures, one per context domain. The size of this array depends 1125 * on the maximum number of context domains. So, the size of the 1126 * sfmmu structure varies per platform. 1127 * 1128 * sfmmu is allocated from static arena, because trap 1129 * handler at TL > 0 is not allowed to touch kernel relocatable 1130 * memory. sfmmu's alignment is changed to 64 bytes from 1131 * default 8 bytes, as the lower 6 bits will be used to pass 1132 * pgcnt to vtag_flush_pgcnt_tl1. 1133 */ 1134 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1135 1136 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1137 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1138 NULL, NULL, static_arena, 0); 1139 1140 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1141 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1142 1143 /* 1144 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1145 * from the heap when low on memory or when TSB_FORCEALLOC is 1146 * specified, don't use magazines to cache them--we want to return 1147 * them to the system as quickly as possible. 1148 */ 1149 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1150 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1151 static_arena, KMC_NOMAGAZINE); 1152 1153 /* 1154 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1155 * memory, which corresponds to the old static reserve for TSBs. 1156 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1157 * memory we'll allocate for TSB slabs; beyond this point TSB 1158 * allocations will be taken from the kernel heap (via 1159 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1160 * consumer. 1161 */ 1162 if (tsb_alloc_hiwater_factor == 0) { 1163 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1164 } 1165 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1166 1167 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1168 if (!(disable_large_pages & (1 << sz))) 1169 break; 1170 } 1171 1172 if (sz < tsb_slab_ttesz) { 1173 tsb_slab_ttesz = sz; 1174 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1175 tsb_slab_size = 1 << tsb_slab_shift; 1176 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1177 use_bigtsb_arena = 0; 1178 } else if (use_bigtsb_arena && 1179 (disable_large_pages & (1 << bigtsb_slab_ttesz))) { 1180 use_bigtsb_arena = 0; 1181 } 1182 1183 if (!use_bigtsb_arena) { 1184 bigtsb_slab_shift = tsb_slab_shift; 1185 } 1186 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1187 1188 /* 1189 * On smaller memory systems, allocate TSB memory in smaller chunks 1190 * than the default 4M slab size. We also honor disable_large_pages 1191 * here. 1192 * 1193 * The trap handlers need to be patched with the final slab shift, 1194 * since they need to be able to construct the TSB pointer at runtime. 1195 */ 1196 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1197 !(disable_large_pages & (1 << TTE512K))) { 1198 tsb_slab_ttesz = TTE512K; 1199 tsb_slab_shift = MMU_PAGESHIFT512K; 1200 tsb_slab_size = MMU_PAGESIZE512K; 1201 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT; 1202 use_bigtsb_arena = 0; 1203 } 1204 1205 if (!use_bigtsb_arena) { 1206 bigtsb_slab_ttesz = tsb_slab_ttesz; 1207 bigtsb_slab_shift = tsb_slab_shift; 1208 bigtsb_slab_size = tsb_slab_size; 1209 bigtsb_slab_mask = tsb_slab_mask; 1210 } 1211 1212 1213 /* 1214 * Set up memory callback to update tsb_alloc_hiwater and 1215 * tsb_max_growsize. 1216 */ 1217 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0); 1218 ASSERT(i == 0); 1219 1220 /* 1221 * kmem_tsb_arena is the source from which large TSB slabs are 1222 * drawn. The quantum of this arena corresponds to the largest 1223 * TSB size we can dynamically allocate for user processes. 1224 * Currently it must also be a supported page size since we 1225 * use exactly one translation entry to map each slab page. 1226 * 1227 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1228 * which most TSBs are allocated. Since most TSB allocations are 1229 * typically 8K we have a kmem cache we stack on top of each 1230 * kmem_tsb_default_arena to speed up those allocations. 1231 * 1232 * Note the two-level scheme of arenas is required only 1233 * because vmem_create doesn't allow us to specify alignment 1234 * requirements. If this ever changes the code could be 1235 * simplified to use only one level of arenas. 1236 * 1237 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena 1238 * will be provided in addition to the 4M kmem_tsb_arena. 1239 */ 1240 if (use_bigtsb_arena) { 1241 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0, 1242 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper, 1243 vmem_xfree, heap_arena, 0, VM_SLEEP); 1244 } 1245 1246 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1247 sfmmu_vmem_xalloc_aligned_wrapper, 1248 vmem_xfree, heap_arena, 0, VM_SLEEP); 1249 1250 if (tsb_lgrp_affinity) { 1251 char s[50]; 1252 for (i = 0; i < NLGRPS_MAX; i++) { 1253 if (use_bigtsb_arena) { 1254 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i); 1255 kmem_bigtsb_default_arena[i] = vmem_create(s, 1256 NULL, 0, 2 * tsb_slab_size, 1257 sfmmu_tsb_segkmem_alloc, 1258 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 1259 0, VM_SLEEP | VM_BESTFIT); 1260 } 1261 1262 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1263 kmem_tsb_default_arena[i] = vmem_create(s, 1264 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1265 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1266 VM_SLEEP | VM_BESTFIT); 1267 1268 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1269 sfmmu_tsb_cache[i] = kmem_cache_create(s, 1270 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1271 kmem_tsb_default_arena[i], 0); 1272 } 1273 } else { 1274 if (use_bigtsb_arena) { 1275 kmem_bigtsb_default_arena[0] = 1276 vmem_create("kmem_bigtsb_default", NULL, 0, 1277 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc, 1278 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0, 1279 VM_SLEEP | VM_BESTFIT); 1280 } 1281 1282 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1283 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1284 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1285 VM_SLEEP | VM_BESTFIT); 1286 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1287 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1288 kmem_tsb_default_arena[0], 0); 1289 } 1290 1291 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1292 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1293 sfmmu_hblkcache_destructor, 1294 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1295 hat_memload_arena, KMC_NOHASH); 1296 1297 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1298 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, 1299 VMC_DUMPSAFE | VM_SLEEP); 1300 1301 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1302 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1303 sfmmu_hblkcache_destructor, 1304 NULL, (void *)HME1BLK_SZ, 1305 hat_memload1_arena, KMC_NOHASH); 1306 1307 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1308 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1309 1310 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1311 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1312 NULL, NULL, static_arena, KMC_NOHASH); 1313 1314 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1315 sizeof (ism_ment_t), 0, NULL, NULL, 1316 NULL, NULL, NULL, 0); 1317 1318 /* 1319 * We grab the first hat for the kernel, 1320 */ 1321 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1322 kas.a_hat = hat_alloc(&kas); 1323 AS_LOCK_EXIT(&kas, &kas.a_lock); 1324 1325 /* 1326 * Initialize hblk_reserve. 1327 */ 1328 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1329 va_to_pa((caddr_t)hblk_reserve); 1330 1331 #ifndef UTSB_PHYS 1332 /* 1333 * Reserve some kernel virtual address space for the locked TTEs 1334 * that allow us to probe the TSB from TL>0. 1335 */ 1336 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1337 0, 0, NULL, NULL, VM_SLEEP); 1338 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1339 0, 0, NULL, NULL, VM_SLEEP); 1340 #endif 1341 1342 #ifdef VAC 1343 /* 1344 * The big page VAC handling code assumes VAC 1345 * will not be bigger than the smallest big 1346 * page- which is 64K. 1347 */ 1348 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1349 cmn_err(CE_PANIC, "VAC too big!"); 1350 } 1351 #endif 1352 1353 (void) xhat_init(); 1354 1355 uhme_hash_pa = va_to_pa(uhme_hash); 1356 khme_hash_pa = va_to_pa(khme_hash); 1357 1358 /* 1359 * Initialize relocation locks. kpr_suspendlock is held 1360 * at PIL_MAX to prevent interrupts from pinning the holder 1361 * of a suspended TTE which may access it leading to a 1362 * deadlock condition. 1363 */ 1364 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1365 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1366 1367 /* 1368 * If Shared context support is disabled via /etc/system 1369 * set shctx_on to 0 here if it was set to 1 earlier in boot 1370 * sequence by cpu module initialization code. 1371 */ 1372 if (shctx_on && disable_shctx) { 1373 shctx_on = 0; 1374 } 1375 1376 if (shctx_on) { 1377 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * 1378 sizeof (srd_buckets[0]), KM_SLEEP); 1379 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) { 1380 mutex_init(&srd_buckets[i].srdb_lock, NULL, 1381 MUTEX_DEFAULT, NULL); 1382 } 1383 1384 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t), 1385 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor, 1386 NULL, NULL, NULL, 0); 1387 region_cache = kmem_cache_create("region_cache", 1388 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor, 1389 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0); 1390 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t), 1391 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor, 1392 NULL, NULL, NULL, 0); 1393 } 1394 1395 /* 1396 * Pre-allocate hrm_hashtab before enabling the collection of 1397 * refmod statistics. Allocating on the fly would mean us 1398 * running the risk of suffering recursive mutex enters or 1399 * deadlocks. 1400 */ 1401 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1402 KM_SLEEP); 1403 1404 /* Allocate per-cpu pending freelist of hmeblks */ 1405 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64, 1406 KM_SLEEP); 1407 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP( 1408 (uintptr_t)cpu_hme_pend, 64); 1409 1410 for (i = 0; i < NCPU; i++) { 1411 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT, 1412 NULL); 1413 } 1414 1415 if (cpu_hme_pend_thresh == 0) { 1416 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH; 1417 } 1418 } 1419 1420 /* 1421 * Initialize locking for the hat layer, called early during boot. 1422 */ 1423 static void 1424 hat_lock_init() 1425 { 1426 int i; 1427 1428 /* 1429 * initialize the array of mutexes protecting a page's mapping 1430 * list and p_nrm field. 1431 */ 1432 for (i = 0; i < MML_TABLE_SIZE; i++) 1433 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL); 1434 1435 if (kpm_enable) { 1436 for (i = 0; i < kpmp_table_sz; i++) { 1437 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1438 MUTEX_DEFAULT, NULL); 1439 } 1440 } 1441 1442 /* 1443 * Initialize array of mutex locks that protects sfmmu fields and 1444 * TSB lists. 1445 */ 1446 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1447 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1448 NULL); 1449 } 1450 1451 #define SFMMU_KERNEL_MAXVA \ 1452 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1453 1454 /* 1455 * Allocate a hat structure. 1456 * Called when an address space first uses a hat. 1457 */ 1458 struct hat * 1459 hat_alloc(struct as *as) 1460 { 1461 sfmmu_t *sfmmup; 1462 int i; 1463 uint64_t cnum; 1464 extern uint_t get_color_start(struct as *); 1465 1466 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1467 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1468 sfmmup->sfmmu_as = as; 1469 sfmmup->sfmmu_flags = 0; 1470 sfmmup->sfmmu_tteflags = 0; 1471 sfmmup->sfmmu_rtteflags = 0; 1472 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1473 1474 if (as == &kas) { 1475 ksfmmup = sfmmup; 1476 sfmmup->sfmmu_cext = 0; 1477 cnum = KCONTEXT; 1478 1479 sfmmup->sfmmu_clrstart = 0; 1480 sfmmup->sfmmu_tsb = NULL; 1481 /* 1482 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1483 * to setup tsb_info for ksfmmup. 1484 */ 1485 } else { 1486 1487 /* 1488 * Just set to invalid ctx. When it faults, it will 1489 * get a valid ctx. This would avoid the situation 1490 * where we get a ctx, but it gets stolen and then 1491 * we fault when we try to run and so have to get 1492 * another ctx. 1493 */ 1494 sfmmup->sfmmu_cext = 0; 1495 cnum = INVALID_CONTEXT; 1496 1497 /* initialize original physical page coloring bin */ 1498 sfmmup->sfmmu_clrstart = get_color_start(as); 1499 #ifdef DEBUG 1500 if (tsb_random_size) { 1501 uint32_t randval = (uint32_t)gettick() >> 4; 1502 int size = randval % (tsb_max_growsize + 1); 1503 1504 /* chose a random tsb size for stress testing */ 1505 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1506 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1507 } else 1508 #endif /* DEBUG */ 1509 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1510 default_tsb_size, 1511 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1512 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID; 1513 ASSERT(sfmmup->sfmmu_tsb != NULL); 1514 } 1515 1516 ASSERT(max_mmu_ctxdoms > 0); 1517 for (i = 0; i < max_mmu_ctxdoms; i++) { 1518 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1519 sfmmup->sfmmu_ctxs[i].gnum = 0; 1520 } 1521 1522 for (i = 0; i < max_mmu_page_sizes; i++) { 1523 sfmmup->sfmmu_ttecnt[i] = 0; 1524 sfmmup->sfmmu_scdrttecnt[i] = 0; 1525 sfmmup->sfmmu_ismttecnt[i] = 0; 1526 sfmmup->sfmmu_scdismttecnt[i] = 0; 1527 sfmmup->sfmmu_pgsz[i] = TTE8K; 1528 } 1529 sfmmup->sfmmu_tsb0_4minflcnt = 0; 1530 sfmmup->sfmmu_iblk = NULL; 1531 sfmmup->sfmmu_ismhat = 0; 1532 sfmmup->sfmmu_scdhat = 0; 1533 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1534 if (sfmmup == ksfmmup) { 1535 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1536 } else { 1537 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1538 } 1539 sfmmup->sfmmu_free = 0; 1540 sfmmup->sfmmu_rmstat = 0; 1541 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1542 sfmmup->sfmmu_xhat_provider = NULL; 1543 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1544 sfmmup->sfmmu_srdp = NULL; 1545 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); 1546 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 1547 sfmmup->sfmmu_scdp = NULL; 1548 sfmmup->sfmmu_scd_link.next = NULL; 1549 sfmmup->sfmmu_scd_link.prev = NULL; 1550 return (sfmmup); 1551 } 1552 1553 /* 1554 * Create per-MMU context domain kstats for a given MMU ctx. 1555 */ 1556 static void 1557 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1558 { 1559 mmu_ctx_stat_t stat; 1560 kstat_t *mmu_kstat; 1561 1562 ASSERT(MUTEX_HELD(&cpu_lock)); 1563 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1564 1565 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1566 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1567 1568 if (mmu_kstat == NULL) { 1569 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1570 mmu_ctxp->mmu_idx); 1571 } else { 1572 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1573 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1574 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1575 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1576 mmu_ctxp->mmu_kstat = mmu_kstat; 1577 kstat_install(mmu_kstat); 1578 } 1579 } 1580 1581 /* 1582 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1583 * context domain information for a given CPU. If a platform does not 1584 * specify that interface, then the function below is used instead to return 1585 * default information. The defaults are as follows: 1586 * 1587 * - The number of MMU context IDs supported on any CPU in the 1588 * system is 8K. 1589 * - There is one MMU context domain per CPU. 1590 */ 1591 /*ARGSUSED*/ 1592 static void 1593 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1594 { 1595 infop->mmu_nctxs = nctxs; 1596 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1597 } 1598 1599 /* 1600 * Called during CPU initialization to set the MMU context-related information 1601 * for a CPU. 1602 * 1603 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1604 */ 1605 void 1606 sfmmu_cpu_init(cpu_t *cp) 1607 { 1608 mmu_ctx_info_t info; 1609 mmu_ctx_t *mmu_ctxp; 1610 1611 ASSERT(MUTEX_HELD(&cpu_lock)); 1612 1613 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1614 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1615 else 1616 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1617 1618 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1619 1620 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1621 /* Each mmu_ctx is cacheline aligned. */ 1622 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1623 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1624 1625 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1626 (void *)ipltospl(DISP_LEVEL)); 1627 mmu_ctxp->mmu_idx = info.mmu_idx; 1628 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1629 /* 1630 * Globally for lifetime of a system, 1631 * gnum must always increase. 1632 * mmu_saved_gnum is protected by the cpu_lock. 1633 */ 1634 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1635 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1636 1637 sfmmu_mmu_kstat_create(mmu_ctxp); 1638 1639 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1640 } else { 1641 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1642 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs); 1643 } 1644 1645 /* 1646 * The mmu_lock is acquired here to prevent races with 1647 * the wrap-around code. 1648 */ 1649 mutex_enter(&mmu_ctxp->mmu_lock); 1650 1651 1652 mmu_ctxp->mmu_ncpus++; 1653 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1654 CPU_MMU_IDX(cp) = info.mmu_idx; 1655 CPU_MMU_CTXP(cp) = mmu_ctxp; 1656 1657 mutex_exit(&mmu_ctxp->mmu_lock); 1658 } 1659 1660 static void 1661 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp) 1662 { 1663 ASSERT(MUTEX_HELD(&cpu_lock)); 1664 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock)); 1665 1666 mutex_destroy(&mmu_ctxp->mmu_lock); 1667 1668 if (mmu_ctxp->mmu_kstat) 1669 kstat_delete(mmu_ctxp->mmu_kstat); 1670 1671 /* mmu_saved_gnum is protected by the cpu_lock. */ 1672 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1673 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1674 1675 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1676 } 1677 1678 /* 1679 * Called to perform MMU context-related cleanup for a CPU. 1680 */ 1681 void 1682 sfmmu_cpu_cleanup(cpu_t *cp) 1683 { 1684 mmu_ctx_t *mmu_ctxp; 1685 1686 ASSERT(MUTEX_HELD(&cpu_lock)); 1687 1688 mmu_ctxp = CPU_MMU_CTXP(cp); 1689 ASSERT(mmu_ctxp != NULL); 1690 1691 /* 1692 * The mmu_lock is acquired here to prevent races with 1693 * the wrap-around code. 1694 */ 1695 mutex_enter(&mmu_ctxp->mmu_lock); 1696 1697 CPU_MMU_CTXP(cp) = NULL; 1698 1699 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1700 if (--mmu_ctxp->mmu_ncpus == 0) { 1701 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1702 mutex_exit(&mmu_ctxp->mmu_lock); 1703 sfmmu_ctxdom_free(mmu_ctxp); 1704 return; 1705 } 1706 1707 mutex_exit(&mmu_ctxp->mmu_lock); 1708 } 1709 1710 uint_t 1711 sfmmu_ctxdom_nctxs(int idx) 1712 { 1713 return (mmu_ctxs_tbl[idx]->mmu_nctxs); 1714 } 1715 1716 #ifdef sun4v 1717 /* 1718 * sfmmu_ctxdoms_* is an interface provided to help keep context domains 1719 * consistant after suspend/resume on system that can resume on a different 1720 * hardware than it was suspended. 1721 * 1722 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts 1723 * from being allocated. It acquires all hat_locks, which blocks most access to 1724 * context data, except for a few cases that are handled separately or are 1725 * harmless. It wraps each domain to increment gnum and invalidate on-CPU 1726 * contexts, and forces cnum to its max. As a result of this call all user 1727 * threads that are running on CPUs trap and try to perform wrap around but 1728 * can't because hat_locks are taken. Threads that were not on CPUs but started 1729 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking 1730 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block 1731 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs 1732 * are paused, else it could deadlock acquiring locks held by paused CPUs. 1733 * 1734 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records 1735 * the CPUs that had them. It must be called after CPUs have been paused. This 1736 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data, 1737 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx 1738 * runs with interrupts disabled. When CPUs are later resumed, they may enter 1739 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately 1740 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus 1741 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is 1742 * accessing the old context domains. 1743 * 1744 * sfmmu_ctxdoms_update(void) frees space used by old context domains and 1745 * allocates new context domains based on hardware layout. It initializes 1746 * every CPU that had context domain before migration to have one again. 1747 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it 1748 * could deadlock acquiring locks held by paused CPUs. 1749 * 1750 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads 1751 * acquire new context ids and continue execution. 1752 * 1753 * Therefore functions should be called in the following order: 1754 * suspend_routine() 1755 * sfmmu_ctxdom_lock() 1756 * pause_cpus() 1757 * suspend() 1758 * if (suspend failed) 1759 * sfmmu_ctxdom_unlock() 1760 * ... 1761 * sfmmu_ctxdom_remove() 1762 * resume_cpus() 1763 * sfmmu_ctxdom_update() 1764 * sfmmu_ctxdom_unlock() 1765 */ 1766 static cpuset_t sfmmu_ctxdoms_pset; 1767 1768 void 1769 sfmmu_ctxdoms_remove() 1770 { 1771 processorid_t id; 1772 cpu_t *cp; 1773 1774 /* 1775 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can 1776 * be restored post-migration. A CPU may be powered off and not have a 1777 * domain, for example. 1778 */ 1779 CPUSET_ZERO(sfmmu_ctxdoms_pset); 1780 1781 for (id = 0; id < NCPU; id++) { 1782 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) { 1783 CPUSET_ADD(sfmmu_ctxdoms_pset, id); 1784 CPU_MMU_CTXP(cp) = NULL; 1785 } 1786 } 1787 } 1788 1789 void 1790 sfmmu_ctxdoms_lock(void) 1791 { 1792 int idx; 1793 mmu_ctx_t *mmu_ctxp; 1794 1795 sfmmu_hat_lock_all(); 1796 1797 /* 1798 * At this point, no thread can be in sfmmu_ctx_wrap_around, because 1799 * hat_lock is always taken before calling it. 1800 * 1801 * For each domain, set mmu_cnum to max so no more contexts can be 1802 * allocated, and wrap to flush on-CPU contexts and force threads to 1803 * acquire a new context when we later drop hat_lock after migration. 1804 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum, 1805 * but the latter uses CAS and will miscompare and not overwrite it. 1806 */ 1807 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */ 1808 for (idx = 0; idx < max_mmu_ctxdoms; idx++) { 1809 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) { 1810 mutex_enter(&mmu_ctxp->mmu_lock); 1811 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs; 1812 /* make sure updated cnum visible */ 1813 membar_enter(); 1814 mutex_exit(&mmu_ctxp->mmu_lock); 1815 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE); 1816 } 1817 } 1818 kpreempt_enable(); 1819 } 1820 1821 void 1822 sfmmu_ctxdoms_unlock(void) 1823 { 1824 sfmmu_hat_unlock_all(); 1825 } 1826 1827 void 1828 sfmmu_ctxdoms_update(void) 1829 { 1830 processorid_t id; 1831 cpu_t *cp; 1832 uint_t idx; 1833 mmu_ctx_t *mmu_ctxp; 1834 1835 /* 1836 * Free all context domains. As side effect, this increases 1837 * mmu_saved_gnum to the maximum gnum over all domains, which is used to 1838 * init gnum in the new domains, which therefore will be larger than the 1839 * sfmmu gnum for any process, guaranteeing that every process will see 1840 * a new generation and allocate a new context regardless of what new 1841 * domain it runs in. 1842 */ 1843 mutex_enter(&cpu_lock); 1844 1845 for (idx = 0; idx < max_mmu_ctxdoms; idx++) { 1846 if (mmu_ctxs_tbl[idx] != NULL) { 1847 mmu_ctxp = mmu_ctxs_tbl[idx]; 1848 mmu_ctxs_tbl[idx] = NULL; 1849 sfmmu_ctxdom_free(mmu_ctxp); 1850 } 1851 } 1852 1853 for (id = 0; id < NCPU; id++) { 1854 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) && 1855 (cp = cpu[id]) != NULL) 1856 sfmmu_cpu_init(cp); 1857 } 1858 mutex_exit(&cpu_lock); 1859 } 1860 #endif 1861 1862 /* 1863 * Hat_setup, makes an address space context the current active one. 1864 * In sfmmu this translates to setting the secondary context with the 1865 * corresponding context. 1866 */ 1867 void 1868 hat_setup(struct hat *sfmmup, int allocflag) 1869 { 1870 hatlock_t *hatlockp; 1871 1872 /* Init needs some special treatment. */ 1873 if (allocflag == HAT_INIT) { 1874 /* 1875 * Make sure that we have 1876 * 1. a TSB 1877 * 2. a valid ctx that doesn't get stolen after this point. 1878 */ 1879 hatlockp = sfmmu_hat_enter(sfmmup); 1880 1881 /* 1882 * Swap in the TSB. hat_init() allocates tsbinfos without 1883 * TSBs, but we need one for init, since the kernel does some 1884 * special things to set up its stack and needs the TSB to 1885 * resolve page faults. 1886 */ 1887 sfmmu_tsb_swapin(sfmmup, hatlockp); 1888 1889 sfmmu_get_ctx(sfmmup); 1890 1891 sfmmu_hat_exit(hatlockp); 1892 } else { 1893 ASSERT(allocflag == HAT_ALLOC); 1894 1895 hatlockp = sfmmu_hat_enter(sfmmup); 1896 kpreempt_disable(); 1897 1898 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1899 /* 1900 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1901 * pagesize bits don't matter in this case since we are passing 1902 * INVALID_CONTEXT to it. 1903 * Compatibility Note: hw takes care of MMU_SCONTEXT1 1904 */ 1905 sfmmu_setctx_sec(INVALID_CONTEXT); 1906 sfmmu_clear_utsbinfo(); 1907 1908 kpreempt_enable(); 1909 sfmmu_hat_exit(hatlockp); 1910 } 1911 } 1912 1913 /* 1914 * Free all the translation resources for the specified address space. 1915 * Called from as_free when an address space is being destroyed. 1916 */ 1917 void 1918 hat_free_start(struct hat *sfmmup) 1919 { 1920 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1921 ASSERT(sfmmup != ksfmmup); 1922 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1923 1924 sfmmup->sfmmu_free = 1; 1925 if (sfmmup->sfmmu_scdp != NULL) { 1926 sfmmu_leave_scd(sfmmup, 0); 1927 } 1928 1929 ASSERT(sfmmup->sfmmu_scdp == NULL); 1930 } 1931 1932 void 1933 hat_free_end(struct hat *sfmmup) 1934 { 1935 int i; 1936 1937 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 1938 ASSERT(sfmmup->sfmmu_free == 1); 1939 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1940 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1941 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1942 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1943 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1944 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1945 1946 if (sfmmup->sfmmu_rmstat) { 1947 hat_freestat(sfmmup->sfmmu_as, NULL); 1948 } 1949 1950 while (sfmmup->sfmmu_tsb != NULL) { 1951 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1952 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1953 sfmmup->sfmmu_tsb = next; 1954 } 1955 1956 if (sfmmup->sfmmu_srdp != NULL) { 1957 sfmmu_leave_srd(sfmmup); 1958 ASSERT(sfmmup->sfmmu_srdp == NULL); 1959 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1960 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) { 1961 kmem_free(sfmmup->sfmmu_hmeregion_links[i], 1962 SFMMU_L2_HMERLINKS_SIZE); 1963 sfmmup->sfmmu_hmeregion_links[i] = NULL; 1964 } 1965 } 1966 } 1967 sfmmu_free_sfmmu(sfmmup); 1968 1969 #ifdef DEBUG 1970 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1971 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL); 1972 } 1973 #endif 1974 1975 kmem_cache_free(sfmmuid_cache, sfmmup); 1976 } 1977 1978 /* 1979 * Duplicate the translations of an as into another newas 1980 */ 1981 /* ARGSUSED */ 1982 int 1983 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 1984 uint_t flag) 1985 { 1986 sf_srd_t *srdp; 1987 sf_scd_t *scdp; 1988 int i; 1989 extern uint_t get_color_start(struct as *); 1990 1991 ASSERT(hat->sfmmu_xhat_provider == NULL); 1992 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || 1993 (flag == HAT_DUP_SRD)); 1994 ASSERT(hat != ksfmmup); 1995 ASSERT(newhat != ksfmmup); 1996 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); 1997 1998 if (flag == HAT_DUP_COW) { 1999 panic("hat_dup: HAT_DUP_COW not supported"); 2000 } 2001 2002 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { 2003 ASSERT(srdp->srd_evp != NULL); 2004 VN_HOLD(srdp->srd_evp); 2005 ASSERT(srdp->srd_refcnt > 0); 2006 newhat->sfmmu_srdp = srdp; 2007 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 2008 } 2009 2010 /* 2011 * HAT_DUP_ALL flag is used after as duplication is done. 2012 */ 2013 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) { 2014 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2); 2015 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags; 2016 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) { 2017 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG; 2018 } 2019 2020 /* check if need to join scd */ 2021 if ((scdp = hat->sfmmu_scdp) != NULL && 2022 newhat->sfmmu_scdp != scdp) { 2023 int ret; 2024 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map, 2025 &scdp->scd_region_map, ret); 2026 ASSERT(ret); 2027 sfmmu_join_scd(scdp, newhat); 2028 ASSERT(newhat->sfmmu_scdp == scdp && 2029 scdp->scd_refcnt >= 2); 2030 for (i = 0; i < max_mmu_page_sizes; i++) { 2031 newhat->sfmmu_ismttecnt[i] = 2032 hat->sfmmu_ismttecnt[i]; 2033 newhat->sfmmu_scdismttecnt[i] = 2034 hat->sfmmu_scdismttecnt[i]; 2035 } 2036 } 2037 2038 sfmmu_check_page_sizes(newhat, 1); 2039 } 2040 2041 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 2042 update_proc_pgcolorbase_after_fork != 0) { 2043 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 2044 } 2045 return (0); 2046 } 2047 2048 void 2049 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 2050 uint_t attr, uint_t flags) 2051 { 2052 hat_do_memload(hat, addr, pp, attr, flags, 2053 SFMMU_INVALID_SHMERID); 2054 } 2055 2056 void 2057 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 2058 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 2059 { 2060 uint_t rid; 2061 if (rcookie == HAT_INVALID_REGION_COOKIE || 2062 hat->sfmmu_xhat_provider != NULL) { 2063 hat_do_memload(hat, addr, pp, attr, flags, 2064 SFMMU_INVALID_SHMERID); 2065 return; 2066 } 2067 rid = (uint_t)((uint64_t)rcookie); 2068 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2069 hat_do_memload(hat, addr, pp, attr, flags, rid); 2070 } 2071 2072 /* 2073 * Set up addr to map to page pp with protection prot. 2074 * As an optimization we also load the TSB with the 2075 * corresponding tte but it is no big deal if the tte gets kicked out. 2076 */ 2077 static void 2078 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, 2079 uint_t attr, uint_t flags, uint_t rid) 2080 { 2081 tte_t tte; 2082 2083 2084 ASSERT(hat != NULL); 2085 ASSERT(PAGE_LOCKED(pp)); 2086 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2087 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2088 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2089 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE); 2090 2091 if (PP_ISFREE(pp)) { 2092 panic("hat_memload: loading a mapping to free page %p", 2093 (void *)pp); 2094 } 2095 2096 if (hat->sfmmu_xhat_provider) { 2097 /* no regions for xhats */ 2098 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2099 XHAT_MEMLOAD(hat, addr, pp, attr, flags); 2100 return; 2101 } 2102 2103 ASSERT((hat == ksfmmup) || 2104 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2105 2106 if (flags & ~SFMMU_LOAD_ALLFLAG) 2107 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 2108 flags & ~SFMMU_LOAD_ALLFLAG); 2109 2110 if (hat->sfmmu_rmstat) 2111 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 2112 2113 #if defined(SF_ERRATA_57) 2114 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2115 (addr < errata57_limit) && (attr & PROT_EXEC) && 2116 !(flags & HAT_LOAD_SHARE)) { 2117 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 2118 " page executable"); 2119 attr &= ~PROT_EXEC; 2120 } 2121 #endif 2122 2123 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2124 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid); 2125 2126 /* 2127 * Check TSB and TLB page sizes. 2128 */ 2129 if ((flags & HAT_LOAD_SHARE) == 0) { 2130 sfmmu_check_page_sizes(hat, 1); 2131 } 2132 } 2133 2134 /* 2135 * hat_devload can be called to map real memory (e.g. 2136 * /dev/kmem) and even though hat_devload will determine pf is 2137 * for memory, it will be unable to get a shared lock on the 2138 * page (because someone else has it exclusively) and will 2139 * pass dp = NULL. If tteload doesn't get a non-NULL 2140 * page pointer it can't cache memory. 2141 */ 2142 void 2143 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 2144 uint_t attr, int flags) 2145 { 2146 tte_t tte; 2147 struct page *pp = NULL; 2148 int use_lgpg = 0; 2149 2150 ASSERT(hat != NULL); 2151 2152 if (hat->sfmmu_xhat_provider) { 2153 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); 2154 return; 2155 } 2156 2157 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2158 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2159 ASSERT((hat == ksfmmup) || 2160 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2161 if (len == 0) 2162 panic("hat_devload: zero len"); 2163 if (flags & ~SFMMU_LOAD_ALLFLAG) 2164 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 2165 flags & ~SFMMU_LOAD_ALLFLAG); 2166 2167 #if defined(SF_ERRATA_57) 2168 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2169 (addr < errata57_limit) && (attr & PROT_EXEC) && 2170 !(flags & HAT_LOAD_SHARE)) { 2171 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 2172 " page executable"); 2173 attr &= ~PROT_EXEC; 2174 } 2175 #endif 2176 2177 /* 2178 * If it's a memory page find its pp 2179 */ 2180 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 2181 pp = page_numtopp_nolock(pfn); 2182 if (pp == NULL) { 2183 flags |= HAT_LOAD_NOCONSIST; 2184 } else { 2185 if (PP_ISFREE(pp)) { 2186 panic("hat_memload: loading " 2187 "a mapping to free page %p", 2188 (void *)pp); 2189 } 2190 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 2191 panic("hat_memload: loading a mapping " 2192 "to unlocked relocatable page %p", 2193 (void *)pp); 2194 } 2195 ASSERT(len == MMU_PAGESIZE); 2196 } 2197 } 2198 2199 if (hat->sfmmu_rmstat) 2200 hat_resvstat(len, hat->sfmmu_as, addr); 2201 2202 if (flags & HAT_LOAD_NOCONSIST) { 2203 attr |= SFMMU_UNCACHEVTTE; 2204 use_lgpg = 1; 2205 } 2206 if (!pf_is_memory(pfn)) { 2207 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 2208 use_lgpg = 1; 2209 switch (attr & HAT_ORDER_MASK) { 2210 case HAT_STRICTORDER: 2211 case HAT_UNORDERED_OK: 2212 /* 2213 * we set the side effect bit for all non 2214 * memory mappings unless merging is ok 2215 */ 2216 attr |= SFMMU_SIDEFFECT; 2217 break; 2218 case HAT_MERGING_OK: 2219 case HAT_LOADCACHING_OK: 2220 case HAT_STORECACHING_OK: 2221 break; 2222 default: 2223 panic("hat_devload: bad attr"); 2224 break; 2225 } 2226 } 2227 while (len) { 2228 if (!use_lgpg) { 2229 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2230 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2231 flags, SFMMU_INVALID_SHMERID); 2232 len -= MMU_PAGESIZE; 2233 addr += MMU_PAGESIZE; 2234 pfn++; 2235 continue; 2236 } 2237 /* 2238 * try to use large pages, check va/pa alignments 2239 * Note that 32M/256M page sizes are not (yet) supported. 2240 */ 2241 if ((len >= MMU_PAGESIZE4M) && 2242 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 2243 !(disable_large_pages & (1 << TTE4M)) && 2244 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 2245 sfmmu_memtte(&tte, pfn, attr, TTE4M); 2246 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2247 flags, SFMMU_INVALID_SHMERID); 2248 len -= MMU_PAGESIZE4M; 2249 addr += MMU_PAGESIZE4M; 2250 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 2251 } else if ((len >= MMU_PAGESIZE512K) && 2252 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 2253 !(disable_large_pages & (1 << TTE512K)) && 2254 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 2255 sfmmu_memtte(&tte, pfn, attr, TTE512K); 2256 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2257 flags, SFMMU_INVALID_SHMERID); 2258 len -= MMU_PAGESIZE512K; 2259 addr += MMU_PAGESIZE512K; 2260 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 2261 } else if ((len >= MMU_PAGESIZE64K) && 2262 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 2263 !(disable_large_pages & (1 << TTE64K)) && 2264 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 2265 sfmmu_memtte(&tte, pfn, attr, TTE64K); 2266 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2267 flags, SFMMU_INVALID_SHMERID); 2268 len -= MMU_PAGESIZE64K; 2269 addr += MMU_PAGESIZE64K; 2270 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 2271 } else { 2272 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2273 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2274 flags, SFMMU_INVALID_SHMERID); 2275 len -= MMU_PAGESIZE; 2276 addr += MMU_PAGESIZE; 2277 pfn++; 2278 } 2279 } 2280 2281 /* 2282 * Check TSB and TLB page sizes. 2283 */ 2284 if ((flags & HAT_LOAD_SHARE) == 0) { 2285 sfmmu_check_page_sizes(hat, 1); 2286 } 2287 } 2288 2289 void 2290 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2291 struct page **pps, uint_t attr, uint_t flags) 2292 { 2293 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2294 SFMMU_INVALID_SHMERID); 2295 } 2296 2297 void 2298 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 2299 struct page **pps, uint_t attr, uint_t flags, 2300 hat_region_cookie_t rcookie) 2301 { 2302 uint_t rid; 2303 if (rcookie == HAT_INVALID_REGION_COOKIE || 2304 hat->sfmmu_xhat_provider != NULL) { 2305 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2306 SFMMU_INVALID_SHMERID); 2307 return; 2308 } 2309 rid = (uint_t)((uint64_t)rcookie); 2310 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2311 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid); 2312 } 2313 2314 /* 2315 * Map the largest extend possible out of the page array. The array may NOT 2316 * be in order. The largest possible mapping a page can have 2317 * is specified in the p_szc field. The p_szc field 2318 * cannot change as long as there any mappings (large or small) 2319 * to any of the pages that make up the large page. (ie. any 2320 * promotion/demotion of page size is not up to the hat but up to 2321 * the page free list manager). The array 2322 * should consist of properly aligned contigous pages that are 2323 * part of a big page for a large mapping to be created. 2324 */ 2325 static void 2326 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len, 2327 struct page **pps, uint_t attr, uint_t flags, uint_t rid) 2328 { 2329 int ttesz; 2330 size_t mapsz; 2331 pgcnt_t numpg, npgs; 2332 tte_t tte; 2333 page_t *pp; 2334 uint_t large_pages_disable; 2335 2336 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2337 SFMMU_VALIDATE_HMERID(hat, rid, addr, len); 2338 2339 if (hat->sfmmu_xhat_provider) { 2340 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 2341 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); 2342 return; 2343 } 2344 2345 if (hat->sfmmu_rmstat) 2346 hat_resvstat(len, hat->sfmmu_as, addr); 2347 2348 #if defined(SF_ERRATA_57) 2349 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2350 (addr < errata57_limit) && (attr & PROT_EXEC) && 2351 !(flags & HAT_LOAD_SHARE)) { 2352 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2353 "user page executable"); 2354 attr &= ~PROT_EXEC; 2355 } 2356 #endif 2357 2358 /* Get number of pages */ 2359 npgs = len >> MMU_PAGESHIFT; 2360 2361 if (flags & HAT_LOAD_SHARE) { 2362 large_pages_disable = disable_ism_large_pages; 2363 } else { 2364 large_pages_disable = disable_large_pages; 2365 } 2366 2367 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2368 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2369 rid); 2370 return; 2371 } 2372 2373 while (npgs >= NHMENTS) { 2374 pp = *pps; 2375 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2376 /* 2377 * Check if this page size is disabled. 2378 */ 2379 if (large_pages_disable & (1 << ttesz)) 2380 continue; 2381 2382 numpg = TTEPAGES(ttesz); 2383 mapsz = numpg << MMU_PAGESHIFT; 2384 if ((npgs >= numpg) && 2385 IS_P2ALIGNED(addr, mapsz) && 2386 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2387 /* 2388 * At this point we have enough pages and 2389 * we know the virtual address and the pfn 2390 * are properly aligned. We still need 2391 * to check for physical contiguity but since 2392 * it is very likely that this is the case 2393 * we will assume they are so and undo 2394 * the request if necessary. It would 2395 * be great if we could get a hint flag 2396 * like HAT_CONTIG which would tell us 2397 * the pages are contigous for sure. 2398 */ 2399 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2400 attr, ttesz); 2401 if (!sfmmu_tteload_array(hat, &tte, addr, 2402 pps, flags, rid)) { 2403 break; 2404 } 2405 } 2406 } 2407 if (ttesz == TTE8K) { 2408 /* 2409 * We were not able to map array using a large page 2410 * batch a hmeblk or fraction at a time. 2411 */ 2412 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2413 & (NHMENTS-1); 2414 numpg = NHMENTS - numpg; 2415 ASSERT(numpg <= npgs); 2416 mapsz = numpg * MMU_PAGESIZE; 2417 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2418 numpg, rid); 2419 } 2420 addr += mapsz; 2421 npgs -= numpg; 2422 pps += numpg; 2423 } 2424 2425 if (npgs) { 2426 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2427 rid); 2428 } 2429 2430 /* 2431 * Check TSB and TLB page sizes. 2432 */ 2433 if ((flags & HAT_LOAD_SHARE) == 0) { 2434 sfmmu_check_page_sizes(hat, 1); 2435 } 2436 } 2437 2438 /* 2439 * Function tries to batch 8K pages into the same hme blk. 2440 */ 2441 static void 2442 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2443 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid) 2444 { 2445 tte_t tte; 2446 page_t *pp; 2447 struct hmehash_bucket *hmebp; 2448 struct hme_blk *hmeblkp; 2449 int index; 2450 2451 while (npgs) { 2452 /* 2453 * Acquire the hash bucket. 2454 */ 2455 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K, 2456 rid); 2457 ASSERT(hmebp); 2458 2459 /* 2460 * Find the hment block. 2461 */ 2462 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2463 TTE8K, flags, rid); 2464 ASSERT(hmeblkp); 2465 2466 do { 2467 /* 2468 * Make the tte. 2469 */ 2470 pp = *pps; 2471 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2472 2473 /* 2474 * Add the translation. 2475 */ 2476 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2477 vaddr, pps, flags, rid); 2478 2479 /* 2480 * Goto next page. 2481 */ 2482 pps++; 2483 npgs--; 2484 2485 /* 2486 * Goto next address. 2487 */ 2488 vaddr += MMU_PAGESIZE; 2489 2490 /* 2491 * Don't crossover into a different hmentblk. 2492 */ 2493 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2494 (NHMENTS-1)); 2495 2496 } while (index != 0 && npgs != 0); 2497 2498 /* 2499 * Release the hash bucket. 2500 */ 2501 2502 sfmmu_tteload_release_hashbucket(hmebp); 2503 } 2504 } 2505 2506 /* 2507 * Construct a tte for a page: 2508 * 2509 * tte_valid = 1 2510 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2511 * tte_size = size 2512 * tte_nfo = attr & HAT_NOFAULT 2513 * tte_ie = attr & HAT_STRUCTURE_LE 2514 * tte_hmenum = hmenum 2515 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2516 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2517 * tte_ref = 1 (optimization) 2518 * tte_wr_perm = attr & PROT_WRITE; 2519 * tte_no_sync = attr & HAT_NOSYNC 2520 * tte_lock = attr & SFMMU_LOCKTTE 2521 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2522 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2523 * tte_e = attr & SFMMU_SIDEFFECT 2524 * tte_priv = !(attr & PROT_USER) 2525 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2526 * tte_glb = 0 2527 */ 2528 void 2529 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2530 { 2531 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2532 2533 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2534 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2535 2536 if (TTE_IS_NOSYNC(ttep)) { 2537 TTE_SET_REF(ttep); 2538 if (TTE_IS_WRITABLE(ttep)) { 2539 TTE_SET_MOD(ttep); 2540 } 2541 } 2542 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2543 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2544 } 2545 } 2546 2547 /* 2548 * This function will add a translation to the hme_blk and allocate the 2549 * hme_blk if one does not exist. 2550 * If a page structure is specified then it will add the 2551 * corresponding hment to the mapping list. 2552 * It will also update the hmenum field for the tte. 2553 * 2554 * Currently this function is only used for kernel mappings. 2555 * So pass invalid region to sfmmu_tteload_array(). 2556 */ 2557 void 2558 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2559 uint_t flags) 2560 { 2561 ASSERT(sfmmup == ksfmmup); 2562 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags, 2563 SFMMU_INVALID_SHMERID); 2564 } 2565 2566 /* 2567 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2568 * Assumes that a particular page size may only be resident in one TSB. 2569 */ 2570 static void 2571 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2572 { 2573 struct tsb_info *tsbinfop = NULL; 2574 uint64_t tag; 2575 struct tsbe *tsbe_addr; 2576 uint64_t tsb_base; 2577 uint_t tsb_size; 2578 int vpshift = MMU_PAGESHIFT; 2579 int phys = 0; 2580 2581 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2582 phys = ktsb_phys; 2583 if (ttesz >= TTE4M) { 2584 #ifndef sun4v 2585 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2586 #endif 2587 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2588 tsb_size = ktsb4m_szcode; 2589 } else { 2590 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2591 tsb_size = ktsb_szcode; 2592 } 2593 } else { 2594 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2595 2596 /* 2597 * If there isn't a TSB for this page size, or the TSB is 2598 * swapped out, there is nothing to do. Note that the latter 2599 * case seems impossible but can occur if hat_pageunload() 2600 * is called on an ISM mapping while the process is swapped 2601 * out. 2602 */ 2603 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2604 return; 2605 2606 /* 2607 * If another thread is in the middle of relocating a TSB 2608 * we can't unload the entry so set a flag so that the 2609 * TSB will be flushed before it can be accessed by the 2610 * process. 2611 */ 2612 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2613 if (ttep == NULL) 2614 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2615 return; 2616 } 2617 #if defined(UTSB_PHYS) 2618 phys = 1; 2619 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2620 #else 2621 tsb_base = (uint64_t)tsbinfop->tsb_va; 2622 #endif 2623 tsb_size = tsbinfop->tsb_szc; 2624 } 2625 if (ttesz >= TTE4M) 2626 vpshift = MMU_PAGESHIFT4M; 2627 2628 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2629 tag = sfmmu_make_tsbtag(vaddr); 2630 2631 if (ttep == NULL) { 2632 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2633 } else { 2634 if (ttesz >= TTE4M) { 2635 SFMMU_STAT(sf_tsb_load4m); 2636 } else { 2637 SFMMU_STAT(sf_tsb_load8k); 2638 } 2639 2640 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2641 } 2642 } 2643 2644 /* 2645 * Unmap all entries from [start, end) matching the given page size. 2646 * 2647 * This function is used primarily to unmap replicated 64K or 512K entries 2648 * from the TSB that are inserted using the base page size TSB pointer, but 2649 * it may also be called to unmap a range of addresses from the TSB. 2650 */ 2651 void 2652 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2653 { 2654 struct tsb_info *tsbinfop; 2655 uint64_t tag; 2656 struct tsbe *tsbe_addr; 2657 caddr_t vaddr; 2658 uint64_t tsb_base; 2659 int vpshift, vpgsz; 2660 uint_t tsb_size; 2661 int phys = 0; 2662 2663 /* 2664 * Assumptions: 2665 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2666 * at a time shooting down any valid entries we encounter. 2667 * 2668 * If ttesz >= 4M we walk the range 4M at a time shooting 2669 * down any valid mappings we find. 2670 */ 2671 if (sfmmup == ksfmmup) { 2672 phys = ktsb_phys; 2673 if (ttesz >= TTE4M) { 2674 #ifndef sun4v 2675 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2676 #endif 2677 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2678 tsb_size = ktsb4m_szcode; 2679 } else { 2680 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2681 tsb_size = ktsb_szcode; 2682 } 2683 } else { 2684 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2685 2686 /* 2687 * If there isn't a TSB for this page size, or the TSB is 2688 * swapped out, there is nothing to do. Note that the latter 2689 * case seems impossible but can occur if hat_pageunload() 2690 * is called on an ISM mapping while the process is swapped 2691 * out. 2692 */ 2693 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2694 return; 2695 2696 /* 2697 * If another thread is in the middle of relocating a TSB 2698 * we can't unload the entry so set a flag so that the 2699 * TSB will be flushed before it can be accessed by the 2700 * process. 2701 */ 2702 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2703 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2704 return; 2705 } 2706 #if defined(UTSB_PHYS) 2707 phys = 1; 2708 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2709 #else 2710 tsb_base = (uint64_t)tsbinfop->tsb_va; 2711 #endif 2712 tsb_size = tsbinfop->tsb_szc; 2713 } 2714 if (ttesz >= TTE4M) { 2715 vpshift = MMU_PAGESHIFT4M; 2716 vpgsz = MMU_PAGESIZE4M; 2717 } else { 2718 vpshift = MMU_PAGESHIFT; 2719 vpgsz = MMU_PAGESIZE; 2720 } 2721 2722 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2723 tag = sfmmu_make_tsbtag(vaddr); 2724 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2725 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2726 } 2727 } 2728 2729 /* 2730 * Select the optimum TSB size given the number of mappings 2731 * that need to be cached. 2732 */ 2733 static int 2734 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2735 { 2736 int szc = 0; 2737 2738 #ifdef DEBUG 2739 if (tsb_grow_stress) { 2740 uint32_t randval = (uint32_t)gettick() >> 4; 2741 return (randval % (tsb_max_growsize + 1)); 2742 } 2743 #endif /* DEBUG */ 2744 2745 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2746 szc++; 2747 return (szc); 2748 } 2749 2750 /* 2751 * This function will add a translation to the hme_blk and allocate the 2752 * hme_blk if one does not exist. 2753 * If a page structure is specified then it will add the 2754 * corresponding hment to the mapping list. 2755 * It will also update the hmenum field for the tte. 2756 * Furthermore, it attempts to create a large page translation 2757 * for <addr,hat> at page array pps. It assumes addr and first 2758 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2759 */ 2760 static int 2761 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2762 page_t **pps, uint_t flags, uint_t rid) 2763 { 2764 struct hmehash_bucket *hmebp; 2765 struct hme_blk *hmeblkp; 2766 int ret; 2767 uint_t size; 2768 2769 /* 2770 * Get mapping size. 2771 */ 2772 size = TTE_CSZ(ttep); 2773 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2774 2775 /* 2776 * Acquire the hash bucket. 2777 */ 2778 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid); 2779 ASSERT(hmebp); 2780 2781 /* 2782 * Find the hment block. 2783 */ 2784 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags, 2785 rid); 2786 ASSERT(hmeblkp); 2787 2788 /* 2789 * Add the translation. 2790 */ 2791 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags, 2792 rid); 2793 2794 /* 2795 * Release the hash bucket. 2796 */ 2797 sfmmu_tteload_release_hashbucket(hmebp); 2798 2799 return (ret); 2800 } 2801 2802 /* 2803 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2804 */ 2805 static struct hmehash_bucket * 2806 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size, 2807 uint_t rid) 2808 { 2809 struct hmehash_bucket *hmebp; 2810 int hmeshift; 2811 void *htagid = sfmmutohtagid(sfmmup, rid); 2812 2813 ASSERT(htagid != NULL); 2814 2815 hmeshift = HME_HASH_SHIFT(size); 2816 2817 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift); 2818 2819 SFMMU_HASH_LOCK(hmebp); 2820 2821 return (hmebp); 2822 } 2823 2824 /* 2825 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2826 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2827 * allocated. 2828 */ 2829 static struct hme_blk * 2830 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2831 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid) 2832 { 2833 hmeblk_tag hblktag; 2834 int hmeshift; 2835 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2836 2837 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2838 2839 hblktag.htag_id = sfmmutohtagid(sfmmup, rid); 2840 ASSERT(hblktag.htag_id != NULL); 2841 hmeshift = HME_HASH_SHIFT(size); 2842 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2843 hblktag.htag_rehash = HME_HASH_REHASH(size); 2844 hblktag.htag_rid = rid; 2845 2846 ttearray_realloc: 2847 2848 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 2849 2850 /* 2851 * We block until hblk_reserve_lock is released; it's held by 2852 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2853 * replaced by a hblk from sfmmu8_cache. 2854 */ 2855 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2856 hblk_reserve_thread != curthread) { 2857 SFMMU_HASH_UNLOCK(hmebp); 2858 mutex_enter(&hblk_reserve_lock); 2859 mutex_exit(&hblk_reserve_lock); 2860 SFMMU_STAT(sf_hblk_reserve_hit); 2861 SFMMU_HASH_LOCK(hmebp); 2862 goto ttearray_realloc; 2863 } 2864 2865 if (hmeblkp == NULL) { 2866 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2867 hblktag, flags, rid); 2868 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2869 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2870 } else { 2871 /* 2872 * It is possible for 8k and 64k hblks to collide since they 2873 * have the same rehash value. This is because we 2874 * lazily free hblks and 8K/64K blks could be lingering. 2875 * If we find size mismatch we free the block and & try again. 2876 */ 2877 if (get_hblk_ttesz(hmeblkp) != size) { 2878 ASSERT(!hmeblkp->hblk_vcnt); 2879 ASSERT(!hmeblkp->hblk_hmecnt); 2880 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2881 &list, 0); 2882 goto ttearray_realloc; 2883 } 2884 if (hmeblkp->hblk_shw_bit) { 2885 /* 2886 * if the hblk was previously used as a shadow hblk then 2887 * we will change it to a normal hblk 2888 */ 2889 ASSERT(!hmeblkp->hblk_shared); 2890 if (hmeblkp->hblk_shw_mask) { 2891 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 2892 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 2893 goto ttearray_realloc; 2894 } else { 2895 hmeblkp->hblk_shw_bit = 0; 2896 } 2897 } 2898 SFMMU_STAT(sf_hblk_hit); 2899 } 2900 2901 /* 2902 * hat_memload() should never call kmem_cache_free() for kernel hmeblks; 2903 * see block comment showing the stacktrace in sfmmu_hblk_alloc(); 2904 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will 2905 * just add these hmeblks to the per-cpu pending queue. 2906 */ 2907 sfmmu_hblks_list_purge(&list, 1); 2908 2909 ASSERT(get_hblk_ttesz(hmeblkp) == size); 2910 ASSERT(!hmeblkp->hblk_shw_bit); 2911 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2912 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2913 ASSERT(hmeblkp->hblk_tag.htag_rid == rid); 2914 2915 return (hmeblkp); 2916 } 2917 2918 /* 2919 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 2920 * otherwise. 2921 */ 2922 static int 2923 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 2924 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid) 2925 { 2926 page_t *pp = *pps; 2927 int hmenum, size, remap; 2928 tte_t tteold, flush_tte; 2929 #ifdef DEBUG 2930 tte_t orig_old; 2931 #endif /* DEBUG */ 2932 struct sf_hment *sfhme; 2933 kmutex_t *pml, *pmtx; 2934 hatlock_t *hatlockp; 2935 int myflt; 2936 2937 /* 2938 * remove this panic when we decide to let user virtual address 2939 * space be >= USERLIMIT. 2940 */ 2941 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 2942 panic("user addr %p in kernel space", (void *)vaddr); 2943 #if defined(TTE_IS_GLOBAL) 2944 if (TTE_IS_GLOBAL(ttep)) 2945 panic("sfmmu_tteload: creating global tte"); 2946 #endif 2947 2948 #ifdef DEBUG 2949 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 2950 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 2951 panic("sfmmu_tteload: non cacheable memory tte"); 2952 #endif /* DEBUG */ 2953 2954 /* don't simulate dirty bit for writeable ISM/DISM mappings */ 2955 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) { 2956 TTE_SET_REF(ttep); 2957 TTE_SET_MOD(ttep); 2958 } 2959 2960 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 2961 !TTE_IS_MOD(ttep)) { 2962 /* 2963 * Don't load TSB for dummy as in ISM. Also don't preload 2964 * the TSB if the TTE isn't writable since we're likely to 2965 * fault on it again -- preloading can be fairly expensive. 2966 */ 2967 flags |= SFMMU_NO_TSBLOAD; 2968 } 2969 2970 size = TTE_CSZ(ttep); 2971 switch (size) { 2972 case TTE8K: 2973 SFMMU_STAT(sf_tteload8k); 2974 break; 2975 case TTE64K: 2976 SFMMU_STAT(sf_tteload64k); 2977 break; 2978 case TTE512K: 2979 SFMMU_STAT(sf_tteload512k); 2980 break; 2981 case TTE4M: 2982 SFMMU_STAT(sf_tteload4m); 2983 break; 2984 case (TTE32M): 2985 SFMMU_STAT(sf_tteload32m); 2986 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2987 break; 2988 case (TTE256M): 2989 SFMMU_STAT(sf_tteload256m); 2990 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 2991 break; 2992 } 2993 2994 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2995 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2996 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2997 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2998 2999 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 3000 3001 /* 3002 * Need to grab mlist lock here so that pageunload 3003 * will not change tte behind us. 3004 */ 3005 if (pp) { 3006 pml = sfmmu_mlist_enter(pp); 3007 } 3008 3009 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3010 /* 3011 * Look for corresponding hment and if valid verify 3012 * pfns are equal. 3013 */ 3014 remap = TTE_IS_VALID(&tteold); 3015 if (remap) { 3016 pfn_t new_pfn, old_pfn; 3017 3018 old_pfn = TTE_TO_PFN(vaddr, &tteold); 3019 new_pfn = TTE_TO_PFN(vaddr, ttep); 3020 3021 if (flags & HAT_LOAD_REMAP) { 3022 /* make sure we are remapping same type of pages */ 3023 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 3024 panic("sfmmu_tteload - tte remap io<->memory"); 3025 } 3026 if (old_pfn != new_pfn && 3027 (pp != NULL || sfhme->hme_page != NULL)) { 3028 panic("sfmmu_tteload - tte remap pp != NULL"); 3029 } 3030 } else if (old_pfn != new_pfn) { 3031 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 3032 (void *)hmeblkp); 3033 } 3034 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 3035 } 3036 3037 if (pp) { 3038 if (size == TTE8K) { 3039 #ifdef VAC 3040 /* 3041 * Handle VAC consistency 3042 */ 3043 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 3044 sfmmu_vac_conflict(sfmmup, vaddr, pp); 3045 } 3046 #endif 3047 3048 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3049 pmtx = sfmmu_page_enter(pp); 3050 PP_CLRRO(pp); 3051 sfmmu_page_exit(pmtx); 3052 } else if (!PP_ISMAPPED(pp) && 3053 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 3054 pmtx = sfmmu_page_enter(pp); 3055 if (!(PP_ISMOD(pp))) { 3056 PP_SETRO(pp); 3057 } 3058 sfmmu_page_exit(pmtx); 3059 } 3060 3061 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 3062 /* 3063 * sfmmu_pagearray_setup failed so return 3064 */ 3065 sfmmu_mlist_exit(pml); 3066 return (1); 3067 } 3068 } 3069 3070 /* 3071 * Make sure hment is not on a mapping list. 3072 */ 3073 ASSERT(remap || (sfhme->hme_page == NULL)); 3074 3075 /* if it is not a remap then hme->next better be NULL */ 3076 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 3077 3078 if (flags & HAT_LOAD_LOCK) { 3079 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 3080 panic("too high lckcnt-hmeblk %p", 3081 (void *)hmeblkp); 3082 } 3083 atomic_add_32(&hmeblkp->hblk_lckcnt, 1); 3084 3085 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 3086 } 3087 3088 #ifdef VAC 3089 if (pp && PP_ISNC(pp)) { 3090 /* 3091 * If the physical page is marked to be uncacheable, like 3092 * by a vac conflict, make sure the new mapping is also 3093 * uncacheable. 3094 */ 3095 TTE_CLR_VCACHEABLE(ttep); 3096 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 3097 } 3098 #endif 3099 ttep->tte_hmenum = hmenum; 3100 3101 #ifdef DEBUG 3102 orig_old = tteold; 3103 #endif /* DEBUG */ 3104 3105 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 3106 if ((sfmmup == KHATID) && 3107 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 3108 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3109 } 3110 #ifdef DEBUG 3111 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 3112 #endif /* DEBUG */ 3113 } 3114 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 3115 3116 if (!TTE_IS_VALID(&tteold)) { 3117 3118 atomic_add_16(&hmeblkp->hblk_vcnt, 1); 3119 if (rid == SFMMU_INVALID_SHMERID) { 3120 atomic_add_long(&sfmmup->sfmmu_ttecnt[size], 1); 3121 } else { 3122 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 3123 sf_region_t *rgnp = srdp->srd_hmergnp[rid]; 3124 /* 3125 * We already accounted for region ttecnt's in sfmmu 3126 * during hat_join_region() processing. Here we 3127 * only update ttecnt's in region struture. 3128 */ 3129 atomic_add_long(&rgnp->rgn_ttecnt[size], 1); 3130 } 3131 } 3132 3133 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); 3134 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 3135 sfmmup != ksfmmup) { 3136 uchar_t tteflag = 1 << size; 3137 if (rid == SFMMU_INVALID_SHMERID) { 3138 if (!(sfmmup->sfmmu_tteflags & tteflag)) { 3139 hatlockp = sfmmu_hat_enter(sfmmup); 3140 sfmmup->sfmmu_tteflags |= tteflag; 3141 sfmmu_hat_exit(hatlockp); 3142 } 3143 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { 3144 hatlockp = sfmmu_hat_enter(sfmmup); 3145 sfmmup->sfmmu_rtteflags |= tteflag; 3146 sfmmu_hat_exit(hatlockp); 3147 } 3148 /* 3149 * Update the current CPU tsbmiss area, so the current thread 3150 * won't need to take the tsbmiss for the new pagesize. 3151 * The other threads in the process will update their tsb 3152 * miss area lazily in sfmmu_tsbmiss_exception() when they 3153 * fail to find the translation for a newly added pagesize. 3154 */ 3155 if (size > TTE64K && myflt) { 3156 struct tsbmiss *tsbmp; 3157 kpreempt_disable(); 3158 tsbmp = &tsbmiss_area[CPU->cpu_id]; 3159 if (rid == SFMMU_INVALID_SHMERID) { 3160 if (!(tsbmp->uhat_tteflags & tteflag)) { 3161 tsbmp->uhat_tteflags |= tteflag; 3162 } 3163 } else { 3164 if (!(tsbmp->uhat_rtteflags & tteflag)) { 3165 tsbmp->uhat_rtteflags |= tteflag; 3166 } 3167 } 3168 kpreempt_enable(); 3169 } 3170 } 3171 3172 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 3173 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 3174 hatlockp = sfmmu_hat_enter(sfmmup); 3175 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 3176 sfmmu_hat_exit(hatlockp); 3177 } 3178 3179 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 3180 hw_tte.tte_intlo; 3181 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 3182 hw_tte.tte_inthi; 3183 3184 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 3185 /* 3186 * If remap and new tte differs from old tte we need 3187 * to sync the mod bit and flush TLB/TSB. We don't 3188 * need to sync ref bit because we currently always set 3189 * ref bit in tteload. 3190 */ 3191 ASSERT(TTE_IS_REF(ttep)); 3192 if (TTE_IS_MOD(&tteold)) { 3193 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 3194 } 3195 /* 3196 * hwtte bits shouldn't change for SRD hmeblks as long as SRD 3197 * hmes are only used for read only text. Adding this code for 3198 * completeness and future use of shared hmeblks with writable 3199 * mappings of VMODSORT vnodes. 3200 */ 3201 if (hmeblkp->hblk_shared) { 3202 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr, 3203 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1); 3204 xt_sync(cpuset); 3205 SFMMU_STAT_ADD(sf_region_remap_demap, 1); 3206 } else { 3207 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 3208 xt_sync(sfmmup->sfmmu_cpusran); 3209 } 3210 } 3211 3212 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 3213 /* 3214 * We only preload 8K and 4M mappings into the TSB, since 3215 * 64K and 512K mappings are replicated and hence don't 3216 * have a single, unique TSB entry. Ditto for 32M/256M. 3217 */ 3218 if (size == TTE8K || size == TTE4M) { 3219 sf_scd_t *scdp; 3220 hatlockp = sfmmu_hat_enter(sfmmup); 3221 /* 3222 * Don't preload private TSB if the mapping is used 3223 * by the shctx in the SCD. 3224 */ 3225 scdp = sfmmup->sfmmu_scdp; 3226 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL || 3227 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 3228 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, 3229 size); 3230 } 3231 sfmmu_hat_exit(hatlockp); 3232 } 3233 } 3234 if (pp) { 3235 if (!remap) { 3236 HME_ADD(sfhme, pp); 3237 atomic_add_16(&hmeblkp->hblk_hmecnt, 1); 3238 ASSERT(hmeblkp->hblk_hmecnt > 0); 3239 3240 /* 3241 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 3242 * see pageunload() for comment. 3243 */ 3244 } 3245 sfmmu_mlist_exit(pml); 3246 } 3247 3248 return (0); 3249 } 3250 /* 3251 * Function unlocks hash bucket. 3252 */ 3253 static void 3254 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 3255 { 3256 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3257 SFMMU_HASH_UNLOCK(hmebp); 3258 } 3259 3260 /* 3261 * function which checks and sets up page array for a large 3262 * translation. Will set p_vcolor, p_index, p_ro fields. 3263 * Assumes addr and pfnum of first page are properly aligned. 3264 * Will check for physical contiguity. If check fails it return 3265 * non null. 3266 */ 3267 static int 3268 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 3269 { 3270 int i, index, ttesz; 3271 pfn_t pfnum; 3272 pgcnt_t npgs; 3273 page_t *pp, *pp1; 3274 kmutex_t *pmtx; 3275 #ifdef VAC 3276 int osz; 3277 int cflags = 0; 3278 int vac_err = 0; 3279 #endif 3280 int newidx = 0; 3281 3282 ttesz = TTE_CSZ(ttep); 3283 3284 ASSERT(ttesz > TTE8K); 3285 3286 npgs = TTEPAGES(ttesz); 3287 index = PAGESZ_TO_INDEX(ttesz); 3288 3289 pfnum = (*pps)->p_pagenum; 3290 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 3291 3292 /* 3293 * Save the first pp so we can do HAT_TMPNC at the end. 3294 */ 3295 pp1 = *pps; 3296 #ifdef VAC 3297 osz = fnd_mapping_sz(pp1); 3298 #endif 3299 3300 for (i = 0; i < npgs; i++, pps++) { 3301 pp = *pps; 3302 ASSERT(PAGE_LOCKED(pp)); 3303 ASSERT(pp->p_szc >= ttesz); 3304 ASSERT(pp->p_szc == pp1->p_szc); 3305 ASSERT(sfmmu_mlist_held(pp)); 3306 3307 /* 3308 * XXX is it possible to maintain P_RO on the root only? 3309 */ 3310 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3311 pmtx = sfmmu_page_enter(pp); 3312 PP_CLRRO(pp); 3313 sfmmu_page_exit(pmtx); 3314 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 3315 !PP_ISMOD(pp)) { 3316 pmtx = sfmmu_page_enter(pp); 3317 if (!(PP_ISMOD(pp))) { 3318 PP_SETRO(pp); 3319 } 3320 sfmmu_page_exit(pmtx); 3321 } 3322 3323 /* 3324 * If this is a remap we skip vac & contiguity checks. 3325 */ 3326 if (remap) 3327 continue; 3328 3329 /* 3330 * set p_vcolor and detect any vac conflicts. 3331 */ 3332 #ifdef VAC 3333 if (vac_err == 0) { 3334 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 3335 3336 } 3337 #endif 3338 3339 /* 3340 * Save current index in case we need to undo it. 3341 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 3342 * "SFMMU_INDEX_SHIFT 6" 3343 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 3344 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 3345 * 3346 * So: index = PAGESZ_TO_INDEX(ttesz); 3347 * if ttesz == 1 then index = 0x2 3348 * 2 then index = 0x4 3349 * 3 then index = 0x8 3350 * 4 then index = 0x10 3351 * 5 then index = 0x20 3352 * The code below checks if it's a new pagesize (ie, newidx) 3353 * in case we need to take it back out of p_index, 3354 * and then or's the new index into the existing index. 3355 */ 3356 if ((PP_MAPINDEX(pp) & index) == 0) 3357 newidx = 1; 3358 pp->p_index = (PP_MAPINDEX(pp) | index); 3359 3360 /* 3361 * contiguity check 3362 */ 3363 if (pp->p_pagenum != pfnum) { 3364 /* 3365 * If we fail the contiguity test then 3366 * the only thing we need to fix is the p_index field. 3367 * We might get a few extra flushes but since this 3368 * path is rare that is ok. The p_ro field will 3369 * get automatically fixed on the next tteload to 3370 * the page. NO TNC bit is set yet. 3371 */ 3372 while (i >= 0) { 3373 pp = *pps; 3374 if (newidx) 3375 pp->p_index = (PP_MAPINDEX(pp) & 3376 ~index); 3377 pps--; 3378 i--; 3379 } 3380 return (1); 3381 } 3382 pfnum++; 3383 addr += MMU_PAGESIZE; 3384 } 3385 3386 #ifdef VAC 3387 if (vac_err) { 3388 if (ttesz > osz) { 3389 /* 3390 * There are some smaller mappings that causes vac 3391 * conflicts. Convert all existing small mappings to 3392 * TNC. 3393 */ 3394 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3395 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3396 npgs); 3397 } else { 3398 /* EMPTY */ 3399 /* 3400 * If there exists an big page mapping, 3401 * that means the whole existing big page 3402 * has TNC setting already. No need to covert to 3403 * TNC again. 3404 */ 3405 ASSERT(PP_ISTNC(pp1)); 3406 } 3407 } 3408 #endif /* VAC */ 3409 3410 return (0); 3411 } 3412 3413 #ifdef VAC 3414 /* 3415 * Routine that detects vac consistency for a large page. It also 3416 * sets virtual color for all pp's for this big mapping. 3417 */ 3418 static int 3419 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3420 { 3421 int vcolor, ocolor; 3422 3423 ASSERT(sfmmu_mlist_held(pp)); 3424 3425 if (PP_ISNC(pp)) { 3426 return (HAT_TMPNC); 3427 } 3428 3429 vcolor = addr_to_vcolor(addr); 3430 if (PP_NEWPAGE(pp)) { 3431 PP_SET_VCOLOR(pp, vcolor); 3432 return (0); 3433 } 3434 3435 ocolor = PP_GET_VCOLOR(pp); 3436 if (ocolor == vcolor) { 3437 return (0); 3438 } 3439 3440 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 3441 /* 3442 * Previous user of page had a differnet color 3443 * but since there are no current users 3444 * we just flush the cache and change the color. 3445 * As an optimization for large pages we flush the 3446 * entire cache of that color and set a flag. 3447 */ 3448 SFMMU_STAT(sf_pgcolor_conflict); 3449 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3450 CacheColor_SetFlushed(*cflags, ocolor); 3451 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3452 } 3453 PP_SET_VCOLOR(pp, vcolor); 3454 return (0); 3455 } 3456 3457 /* 3458 * We got a real conflict with a current mapping. 3459 * set flags to start unencaching all mappings 3460 * and return failure so we restart looping 3461 * the pp array from the beginning. 3462 */ 3463 return (HAT_TMPNC); 3464 } 3465 #endif /* VAC */ 3466 3467 /* 3468 * creates a large page shadow hmeblk for a tte. 3469 * The purpose of this routine is to allow us to do quick unloads because 3470 * the vm layer can easily pass a very large but sparsely populated range. 3471 */ 3472 static struct hme_blk * 3473 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3474 { 3475 struct hmehash_bucket *hmebp; 3476 hmeblk_tag hblktag; 3477 int hmeshift, size, vshift; 3478 uint_t shw_mask, newshw_mask; 3479 struct hme_blk *hmeblkp; 3480 3481 ASSERT(sfmmup != KHATID); 3482 if (mmu_page_sizes == max_mmu_page_sizes) { 3483 ASSERT(ttesz < TTE256M); 3484 } else { 3485 ASSERT(ttesz < TTE4M); 3486 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3487 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3488 } 3489 3490 if (ttesz == TTE8K) { 3491 size = TTE512K; 3492 } else { 3493 size = ++ttesz; 3494 } 3495 3496 hblktag.htag_id = sfmmup; 3497 hmeshift = HME_HASH_SHIFT(size); 3498 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3499 hblktag.htag_rehash = HME_HASH_REHASH(size); 3500 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3501 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3502 3503 SFMMU_HASH_LOCK(hmebp); 3504 3505 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3506 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3507 if (hmeblkp == NULL) { 3508 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3509 hblktag, flags, SFMMU_INVALID_SHMERID); 3510 } 3511 ASSERT(hmeblkp); 3512 if (!hmeblkp->hblk_shw_mask) { 3513 /* 3514 * if this is a unused hblk it was just allocated or could 3515 * potentially be a previous large page hblk so we need to 3516 * set the shadow bit. 3517 */ 3518 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3519 hmeblkp->hblk_shw_bit = 1; 3520 } else if (hmeblkp->hblk_shw_bit == 0) { 3521 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3522 (void *)hmeblkp); 3523 } 3524 ASSERT(hmeblkp->hblk_shw_bit == 1); 3525 ASSERT(!hmeblkp->hblk_shared); 3526 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3527 ASSERT(vshift < 8); 3528 /* 3529 * Atomically set shw mask bit 3530 */ 3531 do { 3532 shw_mask = hmeblkp->hblk_shw_mask; 3533 newshw_mask = shw_mask | (1 << vshift); 3534 newshw_mask = cas32(&hmeblkp->hblk_shw_mask, shw_mask, 3535 newshw_mask); 3536 } while (newshw_mask != shw_mask); 3537 3538 SFMMU_HASH_UNLOCK(hmebp); 3539 3540 return (hmeblkp); 3541 } 3542 3543 /* 3544 * This routine cleanup a previous shadow hmeblk and changes it to 3545 * a regular hblk. This happens rarely but it is possible 3546 * when a process wants to use large pages and there are hblks still 3547 * lying around from the previous as that used these hmeblks. 3548 * The alternative was to cleanup the shadow hblks at unload time 3549 * but since so few user processes actually use large pages, it is 3550 * better to be lazy and cleanup at this time. 3551 */ 3552 static void 3553 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3554 struct hmehash_bucket *hmebp) 3555 { 3556 caddr_t addr, endaddr; 3557 int hashno, size; 3558 3559 ASSERT(hmeblkp->hblk_shw_bit); 3560 ASSERT(!hmeblkp->hblk_shared); 3561 3562 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3563 3564 if (!hmeblkp->hblk_shw_mask) { 3565 hmeblkp->hblk_shw_bit = 0; 3566 return; 3567 } 3568 addr = (caddr_t)get_hblk_base(hmeblkp); 3569 endaddr = get_hblk_endaddr(hmeblkp); 3570 size = get_hblk_ttesz(hmeblkp); 3571 hashno = size - 1; 3572 ASSERT(hashno > 0); 3573 SFMMU_HASH_UNLOCK(hmebp); 3574 3575 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3576 3577 SFMMU_HASH_LOCK(hmebp); 3578 } 3579 3580 static void 3581 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3582 int hashno) 3583 { 3584 int hmeshift, shadow = 0; 3585 hmeblk_tag hblktag; 3586 struct hmehash_bucket *hmebp; 3587 struct hme_blk *hmeblkp; 3588 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3589 3590 ASSERT(hashno > 0); 3591 hblktag.htag_id = sfmmup; 3592 hblktag.htag_rehash = hashno; 3593 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3594 3595 hmeshift = HME_HASH_SHIFT(hashno); 3596 3597 while (addr < endaddr) { 3598 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3599 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3600 SFMMU_HASH_LOCK(hmebp); 3601 /* inline HME_HASH_SEARCH */ 3602 hmeblkp = hmebp->hmeblkp; 3603 pr_hblk = NULL; 3604 while (hmeblkp) { 3605 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3606 /* found hme_blk */ 3607 ASSERT(!hmeblkp->hblk_shared); 3608 if (hmeblkp->hblk_shw_bit) { 3609 if (hmeblkp->hblk_shw_mask) { 3610 shadow = 1; 3611 sfmmu_shadow_hcleanup(sfmmup, 3612 hmeblkp, hmebp); 3613 break; 3614 } else { 3615 hmeblkp->hblk_shw_bit = 0; 3616 } 3617 } 3618 3619 /* 3620 * Hblk_hmecnt and hblk_vcnt could be non zero 3621 * since hblk_unload() does not gurantee that. 3622 * 3623 * XXX - this could cause tteload() to spin 3624 * where sfmmu_shadow_hcleanup() is called. 3625 */ 3626 } 3627 3628 nx_hblk = hmeblkp->hblk_next; 3629 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3630 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3631 &list, 0); 3632 } else { 3633 pr_hblk = hmeblkp; 3634 } 3635 hmeblkp = nx_hblk; 3636 } 3637 3638 SFMMU_HASH_UNLOCK(hmebp); 3639 3640 if (shadow) { 3641 /* 3642 * We found another shadow hblk so cleaned its 3643 * children. We need to go back and cleanup 3644 * the original hblk so we don't change the 3645 * addr. 3646 */ 3647 shadow = 0; 3648 } else { 3649 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3650 (1 << hmeshift)); 3651 } 3652 } 3653 sfmmu_hblks_list_purge(&list, 0); 3654 } 3655 3656 /* 3657 * This routine's job is to delete stale invalid shared hmeregions hmeblks that 3658 * may still linger on after pageunload. 3659 */ 3660 static void 3661 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz) 3662 { 3663 int hmeshift; 3664 hmeblk_tag hblktag; 3665 struct hmehash_bucket *hmebp; 3666 struct hme_blk *hmeblkp; 3667 struct hme_blk *pr_hblk; 3668 struct hme_blk *list = NULL; 3669 3670 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3671 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3672 3673 hmeshift = HME_HASH_SHIFT(ttesz); 3674 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3675 hblktag.htag_rehash = ttesz; 3676 hblktag.htag_rid = rid; 3677 hblktag.htag_id = srdp; 3678 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3679 3680 SFMMU_HASH_LOCK(hmebp); 3681 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3682 if (hmeblkp != NULL) { 3683 ASSERT(hmeblkp->hblk_shared); 3684 ASSERT(!hmeblkp->hblk_shw_bit); 3685 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3686 panic("sfmmu_cleanup_rhblk: valid hmeblk"); 3687 } 3688 ASSERT(!hmeblkp->hblk_lckcnt); 3689 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3690 &list, 0); 3691 } 3692 SFMMU_HASH_UNLOCK(hmebp); 3693 sfmmu_hblks_list_purge(&list, 0); 3694 } 3695 3696 /* ARGSUSED */ 3697 static void 3698 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 3699 size_t r_size, void *r_obj, u_offset_t r_objoff) 3700 { 3701 } 3702 3703 /* 3704 * Searches for an hmeblk which maps addr, then unloads this mapping 3705 * and updates *eaddrp, if the hmeblk is found. 3706 */ 3707 static void 3708 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr, 3709 caddr_t eaddr, int ttesz, caddr_t *eaddrp) 3710 { 3711 int hmeshift; 3712 hmeblk_tag hblktag; 3713 struct hmehash_bucket *hmebp; 3714 struct hme_blk *hmeblkp; 3715 struct hme_blk *pr_hblk; 3716 struct hme_blk *list = NULL; 3717 3718 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3719 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3720 ASSERT(ttesz >= HBLK_MIN_TTESZ); 3721 3722 hmeshift = HME_HASH_SHIFT(ttesz); 3723 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3724 hblktag.htag_rehash = ttesz; 3725 hblktag.htag_rid = rid; 3726 hblktag.htag_id = srdp; 3727 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3728 3729 SFMMU_HASH_LOCK(hmebp); 3730 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3731 if (hmeblkp != NULL) { 3732 ASSERT(hmeblkp->hblk_shared); 3733 ASSERT(!hmeblkp->hblk_lckcnt); 3734 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3735 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr, 3736 eaddr, NULL, HAT_UNLOAD); 3737 ASSERT(*eaddrp > addr); 3738 } 3739 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3740 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3741 &list, 0); 3742 } 3743 SFMMU_HASH_UNLOCK(hmebp); 3744 sfmmu_hblks_list_purge(&list, 0); 3745 } 3746 3747 static void 3748 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp) 3749 { 3750 int ttesz = rgnp->rgn_pgszc; 3751 size_t rsz = rgnp->rgn_size; 3752 caddr_t rsaddr = rgnp->rgn_saddr; 3753 caddr_t readdr = rsaddr + rsz; 3754 caddr_t rhsaddr; 3755 caddr_t va; 3756 uint_t rid = rgnp->rgn_id; 3757 caddr_t cbsaddr; 3758 caddr_t cbeaddr; 3759 hat_rgn_cb_func_t rcbfunc; 3760 ulong_t cnt; 3761 3762 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3763 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3764 3765 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz))); 3766 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz))); 3767 if (ttesz < HBLK_MIN_TTESZ) { 3768 ttesz = HBLK_MIN_TTESZ; 3769 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES); 3770 } else { 3771 rhsaddr = rsaddr; 3772 } 3773 3774 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) { 3775 rcbfunc = sfmmu_rgn_cb_noop; 3776 } 3777 3778 while (ttesz >= HBLK_MIN_TTESZ) { 3779 cbsaddr = rsaddr; 3780 cbeaddr = rsaddr; 3781 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3782 ttesz--; 3783 continue; 3784 } 3785 cnt = 0; 3786 va = rsaddr; 3787 while (va < readdr) { 3788 ASSERT(va >= rhsaddr); 3789 if (va != cbeaddr) { 3790 if (cbeaddr != cbsaddr) { 3791 ASSERT(cbeaddr > cbsaddr); 3792 (*rcbfunc)(cbsaddr, cbeaddr, 3793 rsaddr, rsz, rgnp->rgn_obj, 3794 rgnp->rgn_objoff); 3795 } 3796 cbsaddr = va; 3797 cbeaddr = va; 3798 } 3799 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr, 3800 ttesz, &cbeaddr); 3801 cnt++; 3802 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz)); 3803 } 3804 if (cbeaddr != cbsaddr) { 3805 ASSERT(cbeaddr > cbsaddr); 3806 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr, 3807 rsz, rgnp->rgn_obj, 3808 rgnp->rgn_objoff); 3809 } 3810 ttesz--; 3811 } 3812 } 3813 3814 /* 3815 * Release one hardware address translation lock on the given address range. 3816 */ 3817 void 3818 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3819 { 3820 struct hmehash_bucket *hmebp; 3821 hmeblk_tag hblktag; 3822 int hmeshift, hashno = 1; 3823 struct hme_blk *hmeblkp, *list = NULL; 3824 caddr_t endaddr; 3825 3826 ASSERT(sfmmup != NULL); 3827 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3828 3829 ASSERT((sfmmup == ksfmmup) || 3830 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3831 ASSERT((len & MMU_PAGEOFFSET) == 0); 3832 endaddr = addr + len; 3833 hblktag.htag_id = sfmmup; 3834 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3835 3836 /* 3837 * Spitfire supports 4 page sizes. 3838 * Most pages are expected to be of the smallest page size (8K) and 3839 * these will not need to be rehashed. 64K pages also don't need to be 3840 * rehashed because an hmeblk spans 64K of address space. 512K pages 3841 * might need 1 rehash and and 4M pages might need 2 rehashes. 3842 */ 3843 while (addr < endaddr) { 3844 hmeshift = HME_HASH_SHIFT(hashno); 3845 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3846 hblktag.htag_rehash = hashno; 3847 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3848 3849 SFMMU_HASH_LOCK(hmebp); 3850 3851 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3852 if (hmeblkp != NULL) { 3853 ASSERT(!hmeblkp->hblk_shared); 3854 /* 3855 * If we encounter a shadow hmeblk then 3856 * we know there are no valid hmeblks mapping 3857 * this address at this size or larger. 3858 * Just increment address by the smallest 3859 * page size. 3860 */ 3861 if (hmeblkp->hblk_shw_bit) { 3862 addr += MMU_PAGESIZE; 3863 } else { 3864 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3865 endaddr); 3866 } 3867 SFMMU_HASH_UNLOCK(hmebp); 3868 hashno = 1; 3869 continue; 3870 } 3871 SFMMU_HASH_UNLOCK(hmebp); 3872 3873 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3874 /* 3875 * We have traversed the whole list and rehashed 3876 * if necessary without finding the address to unlock 3877 * which should never happen. 3878 */ 3879 panic("sfmmu_unlock: addr not found. " 3880 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3881 } else { 3882 hashno++; 3883 } 3884 } 3885 3886 sfmmu_hblks_list_purge(&list, 0); 3887 } 3888 3889 void 3890 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len, 3891 hat_region_cookie_t rcookie) 3892 { 3893 sf_srd_t *srdp; 3894 sf_region_t *rgnp; 3895 int ttesz; 3896 uint_t rid; 3897 caddr_t eaddr; 3898 caddr_t va; 3899 int hmeshift; 3900 hmeblk_tag hblktag; 3901 struct hmehash_bucket *hmebp; 3902 struct hme_blk *hmeblkp; 3903 struct hme_blk *pr_hblk; 3904 struct hme_blk *list; 3905 3906 if (rcookie == HAT_INVALID_REGION_COOKIE) { 3907 hat_unlock(sfmmup, addr, len); 3908 return; 3909 } 3910 3911 ASSERT(sfmmup != NULL); 3912 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 3913 ASSERT(sfmmup != ksfmmup); 3914 3915 srdp = sfmmup->sfmmu_srdp; 3916 rid = (uint_t)((uint64_t)rcookie); 3917 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS); 3918 eaddr = addr + len; 3919 va = addr; 3920 list = NULL; 3921 rgnp = srdp->srd_hmergnp[rid]; 3922 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len); 3923 3924 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc))); 3925 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc))); 3926 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) { 3927 ttesz = HBLK_MIN_TTESZ; 3928 } else { 3929 ttesz = rgnp->rgn_pgszc; 3930 } 3931 while (va < eaddr) { 3932 while (ttesz < rgnp->rgn_pgszc && 3933 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) { 3934 ttesz++; 3935 } 3936 while (ttesz >= HBLK_MIN_TTESZ) { 3937 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3938 ttesz--; 3939 continue; 3940 } 3941 hmeshift = HME_HASH_SHIFT(ttesz); 3942 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift); 3943 hblktag.htag_rehash = ttesz; 3944 hblktag.htag_rid = rid; 3945 hblktag.htag_id = srdp; 3946 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift); 3947 SFMMU_HASH_LOCK(hmebp); 3948 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, 3949 &list); 3950 if (hmeblkp == NULL) { 3951 SFMMU_HASH_UNLOCK(hmebp); 3952 ttesz--; 3953 continue; 3954 } 3955 ASSERT(hmeblkp->hblk_shared); 3956 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr); 3957 ASSERT(va >= eaddr || 3958 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz))); 3959 SFMMU_HASH_UNLOCK(hmebp); 3960 break; 3961 } 3962 if (ttesz < HBLK_MIN_TTESZ) { 3963 panic("hat_unlock_region: addr not found " 3964 "addr %p hat %p", (void *)va, (void *)sfmmup); 3965 } 3966 } 3967 sfmmu_hblks_list_purge(&list, 0); 3968 } 3969 3970 /* 3971 * Function to unlock a range of addresses in an hmeblk. It returns the 3972 * next address that needs to be unlocked. 3973 * Should be called with the hash lock held. 3974 */ 3975 static caddr_t 3976 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 3977 { 3978 struct sf_hment *sfhme; 3979 tte_t tteold, ttemod; 3980 int ttesz, ret; 3981 3982 ASSERT(in_hblk_range(hmeblkp, addr)); 3983 ASSERT(hmeblkp->hblk_shw_bit == 0); 3984 3985 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 3986 ttesz = get_hblk_ttesz(hmeblkp); 3987 3988 HBLKTOHME(sfhme, hmeblkp, addr); 3989 while (addr < endaddr) { 3990 readtte: 3991 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3992 if (TTE_IS_VALID(&tteold)) { 3993 3994 ttemod = tteold; 3995 3996 ret = sfmmu_modifytte_try(&tteold, &ttemod, 3997 &sfhme->hme_tte); 3998 3999 if (ret < 0) 4000 goto readtte; 4001 4002 if (hmeblkp->hblk_lckcnt == 0) 4003 panic("zero hblk lckcnt"); 4004 4005 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 4006 (uintptr_t)endaddr) 4007 panic("can't unlock large tte"); 4008 4009 ASSERT(hmeblkp->hblk_lckcnt > 0); 4010 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 4011 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 4012 } else { 4013 panic("sfmmu_hblk_unlock: invalid tte"); 4014 } 4015 addr += TTEBYTES(ttesz); 4016 sfhme++; 4017 } 4018 return (addr); 4019 } 4020 4021 /* 4022 * Physical Address Mapping Framework 4023 * 4024 * General rules: 4025 * 4026 * (1) Applies only to seg_kmem memory pages. To make things easier, 4027 * seg_kpm addresses are also accepted by the routines, but nothing 4028 * is done with them since by definition their PA mappings are static. 4029 * (2) hat_add_callback() may only be called while holding the page lock 4030 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 4031 * or passing HAC_PAGELOCK flag. 4032 * (3) prehandler() and posthandler() may not call hat_add_callback() or 4033 * hat_delete_callback(), nor should they allocate memory. Post quiesce 4034 * callbacks may not sleep or acquire adaptive mutex locks. 4035 * (4) Either prehandler() or posthandler() (but not both) may be specified 4036 * as being NULL. Specifying an errhandler() is optional. 4037 * 4038 * Details of using the framework: 4039 * 4040 * registering a callback (hat_register_callback()) 4041 * 4042 * Pass prehandler, posthandler, errhandler addresses 4043 * as described below. If capture_cpus argument is nonzero, 4044 * suspend callback to the prehandler will occur with CPUs 4045 * captured and executing xc_loop() and CPUs will remain 4046 * captured until after the posthandler suspend callback 4047 * occurs. 4048 * 4049 * adding a callback (hat_add_callback()) 4050 * 4051 * as_pagelock(); 4052 * hat_add_callback(); 4053 * save returned pfn in private data structures or program registers; 4054 * as_pageunlock(); 4055 * 4056 * prehandler() 4057 * 4058 * Stop all accesses by physical address to this memory page. 4059 * Called twice: the first, PRESUSPEND, is a context safe to acquire 4060 * adaptive locks. The second, SUSPEND, is called at high PIL with 4061 * CPUs captured so adaptive locks may NOT be acquired (and all spin 4062 * locks must be XCALL_PIL or higher locks). 4063 * 4064 * May return the following errors: 4065 * EIO: A fatal error has occurred. This will result in panic. 4066 * EAGAIN: The page cannot be suspended. This will fail the 4067 * relocation. 4068 * 0: Success. 4069 * 4070 * posthandler() 4071 * 4072 * Save new pfn in private data structures or program registers; 4073 * not allowed to fail (non-zero return values will result in panic). 4074 * 4075 * errhandler() 4076 * 4077 * called when an error occurs related to the callback. Currently 4078 * the only such error is HAT_CB_ERR_LEAKED which indicates that 4079 * a page is being freed, but there are still outstanding callback(s) 4080 * registered on the page. 4081 * 4082 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 4083 * 4084 * stop using physical address 4085 * hat_delete_callback(); 4086 * 4087 */ 4088 4089 /* 4090 * Register a callback class. Each subsystem should do this once and 4091 * cache the id_t returned for use in setting up and tearing down callbacks. 4092 * 4093 * There is no facility for removing callback IDs once they are created; 4094 * the "key" should be unique for each module, so in case a module is unloaded 4095 * and subsequently re-loaded, we can recycle the module's previous entry. 4096 */ 4097 id_t 4098 hat_register_callback(int key, 4099 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 4100 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 4101 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 4102 int capture_cpus) 4103 { 4104 id_t id; 4105 4106 /* 4107 * Search the table for a pre-existing callback associated with 4108 * the identifier "key". If one exists, we re-use that entry in 4109 * the table for this instance, otherwise we assign the next 4110 * available table slot. 4111 */ 4112 for (id = 0; id < sfmmu_max_cb_id; id++) { 4113 if (sfmmu_cb_table[id].key == key) 4114 break; 4115 } 4116 4117 if (id == sfmmu_max_cb_id) { 4118 id = sfmmu_cb_nextid++; 4119 if (id >= sfmmu_max_cb_id) 4120 panic("hat_register_callback: out of callback IDs"); 4121 } 4122 4123 ASSERT(prehandler != NULL || posthandler != NULL); 4124 4125 sfmmu_cb_table[id].key = key; 4126 sfmmu_cb_table[id].prehandler = prehandler; 4127 sfmmu_cb_table[id].posthandler = posthandler; 4128 sfmmu_cb_table[id].errhandler = errhandler; 4129 sfmmu_cb_table[id].capture_cpus = capture_cpus; 4130 4131 return (id); 4132 } 4133 4134 #define HAC_COOKIE_NONE (void *)-1 4135 4136 /* 4137 * Add relocation callbacks to the specified addr/len which will be called 4138 * when relocating the associated page. See the description of pre and 4139 * posthandler above for more details. 4140 * 4141 * If HAC_PAGELOCK is included in flags, the underlying memory page is 4142 * locked internally so the caller must be able to deal with the callback 4143 * running even before this function has returned. If HAC_PAGELOCK is not 4144 * set, it is assumed that the underlying memory pages are locked. 4145 * 4146 * Since the caller must track the individual page boundaries anyway, 4147 * we only allow a callback to be added to a single page (large 4148 * or small). Thus [addr, addr + len) MUST be contained within a single 4149 * page. 4150 * 4151 * Registering multiple callbacks on the same [addr, addr+len) is supported, 4152 * _provided_that_ a unique parameter is specified for each callback. 4153 * If multiple callbacks are registered on the same range the callback will 4154 * be invoked with each unique parameter. Registering the same callback with 4155 * the same argument more than once will result in corrupted kernel state. 4156 * 4157 * Returns the pfn of the underlying kernel page in *rpfn 4158 * on success, or PFN_INVALID on failure. 4159 * 4160 * cookiep (if passed) provides storage space for an opaque cookie 4161 * to return later to hat_delete_callback(). This cookie makes the callback 4162 * deletion significantly quicker by avoiding a potentially lengthy hash 4163 * search. 4164 * 4165 * Returns values: 4166 * 0: success 4167 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 4168 * EINVAL: callback ID is not valid 4169 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 4170 * space 4171 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 4172 */ 4173 int 4174 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 4175 void *pvt, pfn_t *rpfn, void **cookiep) 4176 { 4177 struct hmehash_bucket *hmebp; 4178 hmeblk_tag hblktag; 4179 struct hme_blk *hmeblkp; 4180 int hmeshift, hashno; 4181 caddr_t saddr, eaddr, baseaddr; 4182 struct pa_hment *pahmep; 4183 struct sf_hment *sfhmep, *osfhmep; 4184 kmutex_t *pml; 4185 tte_t tte; 4186 page_t *pp; 4187 vnode_t *vp; 4188 u_offset_t off; 4189 pfn_t pfn; 4190 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 4191 int locked = 0; 4192 4193 /* 4194 * For KPM mappings, just return the physical address since we 4195 * don't need to register any callbacks. 4196 */ 4197 if (IS_KPM_ADDR(vaddr)) { 4198 uint64_t paddr; 4199 SFMMU_KPM_VTOP(vaddr, paddr); 4200 *rpfn = btop(paddr); 4201 if (cookiep != NULL) 4202 *cookiep = HAC_COOKIE_NONE; 4203 return (0); 4204 } 4205 4206 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 4207 *rpfn = PFN_INVALID; 4208 return (EINVAL); 4209 } 4210 4211 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 4212 *rpfn = PFN_INVALID; 4213 return (ENOMEM); 4214 } 4215 4216 sfhmep = &pahmep->sfment; 4217 4218 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4219 eaddr = saddr + len; 4220 4221 rehash: 4222 /* Find the mapping(s) for this page */ 4223 for (hashno = TTE64K, hmeblkp = NULL; 4224 hmeblkp == NULL && hashno <= mmu_hashcnt; 4225 hashno++) { 4226 hmeshift = HME_HASH_SHIFT(hashno); 4227 hblktag.htag_id = ksfmmup; 4228 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4229 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4230 hblktag.htag_rehash = hashno; 4231 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4232 4233 SFMMU_HASH_LOCK(hmebp); 4234 4235 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4236 4237 if (hmeblkp == NULL) 4238 SFMMU_HASH_UNLOCK(hmebp); 4239 } 4240 4241 if (hmeblkp == NULL) { 4242 kmem_cache_free(pa_hment_cache, pahmep); 4243 *rpfn = PFN_INVALID; 4244 return (ENXIO); 4245 } 4246 4247 ASSERT(!hmeblkp->hblk_shared); 4248 4249 HBLKTOHME(osfhmep, hmeblkp, saddr); 4250 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4251 4252 if (!TTE_IS_VALID(&tte)) { 4253 SFMMU_HASH_UNLOCK(hmebp); 4254 kmem_cache_free(pa_hment_cache, pahmep); 4255 *rpfn = PFN_INVALID; 4256 return (ENXIO); 4257 } 4258 4259 /* 4260 * Make sure the boundaries for the callback fall within this 4261 * single mapping. 4262 */ 4263 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 4264 ASSERT(saddr >= baseaddr); 4265 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 4266 SFMMU_HASH_UNLOCK(hmebp); 4267 kmem_cache_free(pa_hment_cache, pahmep); 4268 *rpfn = PFN_INVALID; 4269 return (ERANGE); 4270 } 4271 4272 pfn = sfmmu_ttetopfn(&tte, vaddr); 4273 4274 /* 4275 * The pfn may not have a page_t underneath in which case we 4276 * just return it. This can happen if we are doing I/O to a 4277 * static portion of the kernel's address space, for instance. 4278 */ 4279 pp = osfhmep->hme_page; 4280 if (pp == NULL) { 4281 SFMMU_HASH_UNLOCK(hmebp); 4282 kmem_cache_free(pa_hment_cache, pahmep); 4283 *rpfn = pfn; 4284 if (cookiep) 4285 *cookiep = HAC_COOKIE_NONE; 4286 return (0); 4287 } 4288 ASSERT(pp == PP_PAGEROOT(pp)); 4289 4290 vp = pp->p_vnode; 4291 off = pp->p_offset; 4292 4293 pml = sfmmu_mlist_enter(pp); 4294 4295 if (flags & HAC_PAGELOCK) { 4296 if (!page_trylock(pp, SE_SHARED)) { 4297 /* 4298 * Somebody is holding SE_EXCL lock. Might 4299 * even be hat_page_relocate(). Drop all 4300 * our locks, lookup the page in &kvp, and 4301 * retry. If it doesn't exist in &kvp and &zvp, 4302 * then we must be dealing with a kernel mapped 4303 * page which doesn't actually belong to 4304 * segkmem so we punt. 4305 */ 4306 sfmmu_mlist_exit(pml); 4307 SFMMU_HASH_UNLOCK(hmebp); 4308 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4309 4310 /* check zvp before giving up */ 4311 if (pp == NULL) 4312 pp = page_lookup(&zvp, (u_offset_t)saddr, 4313 SE_SHARED); 4314 4315 /* Okay, we didn't find it, give up */ 4316 if (pp == NULL) { 4317 kmem_cache_free(pa_hment_cache, pahmep); 4318 *rpfn = pfn; 4319 if (cookiep) 4320 *cookiep = HAC_COOKIE_NONE; 4321 return (0); 4322 } 4323 page_unlock(pp); 4324 goto rehash; 4325 } 4326 locked = 1; 4327 } 4328 4329 if (!PAGE_LOCKED(pp) && !panicstr) 4330 panic("hat_add_callback: page 0x%p not locked", (void *)pp); 4331 4332 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4333 pp->p_offset != off) { 4334 /* 4335 * The page moved before we got our hands on it. Drop 4336 * all the locks and try again. 4337 */ 4338 ASSERT((flags & HAC_PAGELOCK) != 0); 4339 sfmmu_mlist_exit(pml); 4340 SFMMU_HASH_UNLOCK(hmebp); 4341 page_unlock(pp); 4342 locked = 0; 4343 goto rehash; 4344 } 4345 4346 if (!VN_ISKAS(vp)) { 4347 /* 4348 * This is not a segkmem page but another page which 4349 * has been kernel mapped. It had better have at least 4350 * a share lock on it. Return the pfn. 4351 */ 4352 sfmmu_mlist_exit(pml); 4353 SFMMU_HASH_UNLOCK(hmebp); 4354 if (locked) 4355 page_unlock(pp); 4356 kmem_cache_free(pa_hment_cache, pahmep); 4357 ASSERT(PAGE_LOCKED(pp)); 4358 *rpfn = pfn; 4359 if (cookiep) 4360 *cookiep = HAC_COOKIE_NONE; 4361 return (0); 4362 } 4363 4364 /* 4365 * Setup this pa_hment and link its embedded dummy sf_hment into 4366 * the mapping list. 4367 */ 4368 pp->p_share++; 4369 pahmep->cb_id = callback_id; 4370 pahmep->addr = vaddr; 4371 pahmep->len = len; 4372 pahmep->refcnt = 1; 4373 pahmep->flags = 0; 4374 pahmep->pvt = pvt; 4375 4376 sfhmep->hme_tte.ll = 0; 4377 sfhmep->hme_data = pahmep; 4378 sfhmep->hme_prev = osfhmep; 4379 sfhmep->hme_next = osfhmep->hme_next; 4380 4381 if (osfhmep->hme_next) 4382 osfhmep->hme_next->hme_prev = sfhmep; 4383 4384 osfhmep->hme_next = sfhmep; 4385 4386 sfmmu_mlist_exit(pml); 4387 SFMMU_HASH_UNLOCK(hmebp); 4388 4389 if (locked) 4390 page_unlock(pp); 4391 4392 *rpfn = pfn; 4393 if (cookiep) 4394 *cookiep = (void *)pahmep; 4395 4396 return (0); 4397 } 4398 4399 /* 4400 * Remove the relocation callbacks from the specified addr/len. 4401 */ 4402 void 4403 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 4404 void *cookie) 4405 { 4406 struct hmehash_bucket *hmebp; 4407 hmeblk_tag hblktag; 4408 struct hme_blk *hmeblkp; 4409 int hmeshift, hashno; 4410 caddr_t saddr; 4411 struct pa_hment *pahmep; 4412 struct sf_hment *sfhmep, *osfhmep; 4413 kmutex_t *pml; 4414 tte_t tte; 4415 page_t *pp; 4416 vnode_t *vp; 4417 u_offset_t off; 4418 int locked = 0; 4419 4420 /* 4421 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 4422 * remove so just return. 4423 */ 4424 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 4425 return; 4426 4427 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4428 4429 rehash: 4430 /* Find the mapping(s) for this page */ 4431 for (hashno = TTE64K, hmeblkp = NULL; 4432 hmeblkp == NULL && hashno <= mmu_hashcnt; 4433 hashno++) { 4434 hmeshift = HME_HASH_SHIFT(hashno); 4435 hblktag.htag_id = ksfmmup; 4436 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4437 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4438 hblktag.htag_rehash = hashno; 4439 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4440 4441 SFMMU_HASH_LOCK(hmebp); 4442 4443 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4444 4445 if (hmeblkp == NULL) 4446 SFMMU_HASH_UNLOCK(hmebp); 4447 } 4448 4449 if (hmeblkp == NULL) 4450 return; 4451 4452 ASSERT(!hmeblkp->hblk_shared); 4453 4454 HBLKTOHME(osfhmep, hmeblkp, saddr); 4455 4456 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4457 if (!TTE_IS_VALID(&tte)) { 4458 SFMMU_HASH_UNLOCK(hmebp); 4459 return; 4460 } 4461 4462 pp = osfhmep->hme_page; 4463 if (pp == NULL) { 4464 SFMMU_HASH_UNLOCK(hmebp); 4465 ASSERT(cookie == NULL); 4466 return; 4467 } 4468 4469 vp = pp->p_vnode; 4470 off = pp->p_offset; 4471 4472 pml = sfmmu_mlist_enter(pp); 4473 4474 if (flags & HAC_PAGELOCK) { 4475 if (!page_trylock(pp, SE_SHARED)) { 4476 /* 4477 * Somebody is holding SE_EXCL lock. Might 4478 * even be hat_page_relocate(). Drop all 4479 * our locks, lookup the page in &kvp, and 4480 * retry. If it doesn't exist in &kvp and &zvp, 4481 * then we must be dealing with a kernel mapped 4482 * page which doesn't actually belong to 4483 * segkmem so we punt. 4484 */ 4485 sfmmu_mlist_exit(pml); 4486 SFMMU_HASH_UNLOCK(hmebp); 4487 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4488 /* check zvp before giving up */ 4489 if (pp == NULL) 4490 pp = page_lookup(&zvp, (u_offset_t)saddr, 4491 SE_SHARED); 4492 4493 if (pp == NULL) { 4494 ASSERT(cookie == NULL); 4495 return; 4496 } 4497 page_unlock(pp); 4498 goto rehash; 4499 } 4500 locked = 1; 4501 } 4502 4503 ASSERT(PAGE_LOCKED(pp)); 4504 4505 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4506 pp->p_offset != off) { 4507 /* 4508 * The page moved before we got our hands on it. Drop 4509 * all the locks and try again. 4510 */ 4511 ASSERT((flags & HAC_PAGELOCK) != 0); 4512 sfmmu_mlist_exit(pml); 4513 SFMMU_HASH_UNLOCK(hmebp); 4514 page_unlock(pp); 4515 locked = 0; 4516 goto rehash; 4517 } 4518 4519 if (!VN_ISKAS(vp)) { 4520 /* 4521 * This is not a segkmem page but another page which 4522 * has been kernel mapped. 4523 */ 4524 sfmmu_mlist_exit(pml); 4525 SFMMU_HASH_UNLOCK(hmebp); 4526 if (locked) 4527 page_unlock(pp); 4528 ASSERT(cookie == NULL); 4529 return; 4530 } 4531 4532 if (cookie != NULL) { 4533 pahmep = (struct pa_hment *)cookie; 4534 sfhmep = &pahmep->sfment; 4535 } else { 4536 for (sfhmep = pp->p_mapping; sfhmep != NULL; 4537 sfhmep = sfhmep->hme_next) { 4538 4539 /* 4540 * skip va<->pa mappings 4541 */ 4542 if (!IS_PAHME(sfhmep)) 4543 continue; 4544 4545 pahmep = sfhmep->hme_data; 4546 ASSERT(pahmep != NULL); 4547 4548 /* 4549 * if pa_hment matches, remove it 4550 */ 4551 if ((pahmep->pvt == pvt) && 4552 (pahmep->addr == vaddr) && 4553 (pahmep->len == len)) { 4554 break; 4555 } 4556 } 4557 } 4558 4559 if (sfhmep == NULL) { 4560 if (!panicstr) { 4561 panic("hat_delete_callback: pa_hment not found, pp %p", 4562 (void *)pp); 4563 } 4564 return; 4565 } 4566 4567 /* 4568 * Note: at this point a valid kernel mapping must still be 4569 * present on this page. 4570 */ 4571 pp->p_share--; 4572 if (pp->p_share <= 0) 4573 panic("hat_delete_callback: zero p_share"); 4574 4575 if (--pahmep->refcnt == 0) { 4576 if (pahmep->flags != 0) 4577 panic("hat_delete_callback: pa_hment is busy"); 4578 4579 /* 4580 * Remove sfhmep from the mapping list for the page. 4581 */ 4582 if (sfhmep->hme_prev) { 4583 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 4584 } else { 4585 pp->p_mapping = sfhmep->hme_next; 4586 } 4587 4588 if (sfhmep->hme_next) 4589 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 4590 4591 sfmmu_mlist_exit(pml); 4592 SFMMU_HASH_UNLOCK(hmebp); 4593 4594 if (locked) 4595 page_unlock(pp); 4596 4597 kmem_cache_free(pa_hment_cache, pahmep); 4598 return; 4599 } 4600 4601 sfmmu_mlist_exit(pml); 4602 SFMMU_HASH_UNLOCK(hmebp); 4603 if (locked) 4604 page_unlock(pp); 4605 } 4606 4607 /* 4608 * hat_probe returns 1 if the translation for the address 'addr' is 4609 * loaded, zero otherwise. 4610 * 4611 * hat_probe should be used only for advisorary purposes because it may 4612 * occasionally return the wrong value. The implementation must guarantee that 4613 * returning the wrong value is a very rare event. hat_probe is used 4614 * to implement optimizations in the segment drivers. 4615 * 4616 */ 4617 int 4618 hat_probe(struct hat *sfmmup, caddr_t addr) 4619 { 4620 pfn_t pfn; 4621 tte_t tte; 4622 4623 ASSERT(sfmmup != NULL); 4624 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4625 4626 ASSERT((sfmmup == ksfmmup) || 4627 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4628 4629 if (sfmmup == ksfmmup) { 4630 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4631 == PFN_SUSPENDED) { 4632 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4633 } 4634 } else { 4635 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL); 4636 } 4637 4638 if (pfn != PFN_INVALID) 4639 return (1); 4640 else 4641 return (0); 4642 } 4643 4644 ssize_t 4645 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4646 { 4647 tte_t tte; 4648 4649 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4650 4651 if (sfmmup == ksfmmup) { 4652 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4653 return (-1); 4654 } 4655 } else { 4656 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4657 return (-1); 4658 } 4659 } 4660 4661 ASSERT(TTE_IS_VALID(&tte)); 4662 return (TTEBYTES(TTE_CSZ(&tte))); 4663 } 4664 4665 uint_t 4666 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4667 { 4668 tte_t tte; 4669 4670 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 4671 4672 if (sfmmup == ksfmmup) { 4673 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4674 tte.ll = 0; 4675 } 4676 } else { 4677 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4678 tte.ll = 0; 4679 } 4680 } 4681 if (TTE_IS_VALID(&tte)) { 4682 *attr = sfmmu_ptov_attr(&tte); 4683 return (0); 4684 } 4685 *attr = 0; 4686 return ((uint_t)0xffffffff); 4687 } 4688 4689 /* 4690 * Enables more attributes on specified address range (ie. logical OR) 4691 */ 4692 void 4693 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4694 { 4695 if (hat->sfmmu_xhat_provider) { 4696 XHAT_SETATTR(hat, addr, len, attr); 4697 return; 4698 } else { 4699 /* 4700 * This must be a CPU HAT. If the address space has 4701 * XHATs attached, change attributes for all of them, 4702 * just in case 4703 */ 4704 ASSERT(hat->sfmmu_as != NULL); 4705 if (hat->sfmmu_as->a_xhat != NULL) 4706 xhat_setattr_all(hat->sfmmu_as, addr, len, attr); 4707 } 4708 4709 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4710 } 4711 4712 /* 4713 * Assigns attributes to the specified address range. All the attributes 4714 * are specified. 4715 */ 4716 void 4717 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4718 { 4719 if (hat->sfmmu_xhat_provider) { 4720 XHAT_CHGATTR(hat, addr, len, attr); 4721 return; 4722 } else { 4723 /* 4724 * This must be a CPU HAT. If the address space has 4725 * XHATs attached, change attributes for all of them, 4726 * just in case 4727 */ 4728 ASSERT(hat->sfmmu_as != NULL); 4729 if (hat->sfmmu_as->a_xhat != NULL) 4730 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); 4731 } 4732 4733 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4734 } 4735 4736 /* 4737 * Remove attributes on the specified address range (ie. loginal NAND) 4738 */ 4739 void 4740 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4741 { 4742 if (hat->sfmmu_xhat_provider) { 4743 XHAT_CLRATTR(hat, addr, len, attr); 4744 return; 4745 } else { 4746 /* 4747 * This must be a CPU HAT. If the address space has 4748 * XHATs attached, change attributes for all of them, 4749 * just in case 4750 */ 4751 ASSERT(hat->sfmmu_as != NULL); 4752 if (hat->sfmmu_as->a_xhat != NULL) 4753 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); 4754 } 4755 4756 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4757 } 4758 4759 /* 4760 * Change attributes on an address range to that specified by attr and mode. 4761 */ 4762 static void 4763 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4764 int mode) 4765 { 4766 struct hmehash_bucket *hmebp; 4767 hmeblk_tag hblktag; 4768 int hmeshift, hashno = 1; 4769 struct hme_blk *hmeblkp, *list = NULL; 4770 caddr_t endaddr; 4771 cpuset_t cpuset; 4772 demap_range_t dmr; 4773 4774 CPUSET_ZERO(cpuset); 4775 4776 ASSERT((sfmmup == ksfmmup) || 4777 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4778 ASSERT((len & MMU_PAGEOFFSET) == 0); 4779 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4780 4781 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4782 ((addr + len) > (caddr_t)USERLIMIT)) { 4783 panic("user addr %p in kernel space", 4784 (void *)addr); 4785 } 4786 4787 endaddr = addr + len; 4788 hblktag.htag_id = sfmmup; 4789 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4790 DEMAP_RANGE_INIT(sfmmup, &dmr); 4791 4792 while (addr < endaddr) { 4793 hmeshift = HME_HASH_SHIFT(hashno); 4794 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4795 hblktag.htag_rehash = hashno; 4796 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4797 4798 SFMMU_HASH_LOCK(hmebp); 4799 4800 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4801 if (hmeblkp != NULL) { 4802 ASSERT(!hmeblkp->hblk_shared); 4803 /* 4804 * We've encountered a shadow hmeblk so skip the range 4805 * of the next smaller mapping size. 4806 */ 4807 if (hmeblkp->hblk_shw_bit) { 4808 ASSERT(sfmmup != ksfmmup); 4809 ASSERT(hashno > 1); 4810 addr = (caddr_t)P2END((uintptr_t)addr, 4811 TTEBYTES(hashno - 1)); 4812 } else { 4813 addr = sfmmu_hblk_chgattr(sfmmup, 4814 hmeblkp, addr, endaddr, &dmr, attr, mode); 4815 } 4816 SFMMU_HASH_UNLOCK(hmebp); 4817 hashno = 1; 4818 continue; 4819 } 4820 SFMMU_HASH_UNLOCK(hmebp); 4821 4822 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4823 /* 4824 * We have traversed the whole list and rehashed 4825 * if necessary without finding the address to chgattr. 4826 * This is ok, so we increment the address by the 4827 * smallest hmeblk range for kernel mappings or for 4828 * user mappings with no large pages, and the largest 4829 * hmeblk range, to account for shadow hmeblks, for 4830 * user mappings with large pages and continue. 4831 */ 4832 if (sfmmup == ksfmmup) 4833 addr = (caddr_t)P2END((uintptr_t)addr, 4834 TTEBYTES(1)); 4835 else 4836 addr = (caddr_t)P2END((uintptr_t)addr, 4837 TTEBYTES(hashno)); 4838 hashno = 1; 4839 } else { 4840 hashno++; 4841 } 4842 } 4843 4844 sfmmu_hblks_list_purge(&list, 0); 4845 DEMAP_RANGE_FLUSH(&dmr); 4846 cpuset = sfmmup->sfmmu_cpusran; 4847 xt_sync(cpuset); 4848 } 4849 4850 /* 4851 * This function chgattr on a range of addresses in an hmeblk. It returns the 4852 * next addres that needs to be chgattr. 4853 * It should be called with the hash lock held. 4854 * XXX It should be possible to optimize chgattr by not flushing every time but 4855 * on the other hand: 4856 * 1. do one flush crosscall. 4857 * 2. only flush if we are increasing permissions (make sure this will work) 4858 */ 4859 static caddr_t 4860 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4861 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4862 { 4863 tte_t tte, tteattr, tteflags, ttemod; 4864 struct sf_hment *sfhmep; 4865 int ttesz; 4866 struct page *pp = NULL; 4867 kmutex_t *pml, *pmtx; 4868 int ret; 4869 int use_demap_range; 4870 #if defined(SF_ERRATA_57) 4871 int check_exec; 4872 #endif 4873 4874 ASSERT(in_hblk_range(hmeblkp, addr)); 4875 ASSERT(hmeblkp->hblk_shw_bit == 0); 4876 ASSERT(!hmeblkp->hblk_shared); 4877 4878 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4879 ttesz = get_hblk_ttesz(hmeblkp); 4880 4881 /* 4882 * Flush the current demap region if addresses have been 4883 * skipped or the page size doesn't match. 4884 */ 4885 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4886 if (use_demap_range) { 4887 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4888 } else if (dmrp != NULL) { 4889 DEMAP_RANGE_FLUSH(dmrp); 4890 } 4891 4892 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4893 #if defined(SF_ERRATA_57) 4894 check_exec = (sfmmup != ksfmmup) && 4895 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4896 TTE_IS_EXECUTABLE(&tteattr); 4897 #endif 4898 HBLKTOHME(sfhmep, hmeblkp, addr); 4899 while (addr < endaddr) { 4900 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4901 if (TTE_IS_VALID(&tte)) { 4902 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4903 /* 4904 * if the new attr is the same as old 4905 * continue 4906 */ 4907 goto next_addr; 4908 } 4909 if (!TTE_IS_WRITABLE(&tteattr)) { 4910 /* 4911 * make sure we clear hw modify bit if we 4912 * removing write protections 4913 */ 4914 tteflags.tte_intlo |= TTE_HWWR_INT; 4915 } 4916 4917 pml = NULL; 4918 pp = sfhmep->hme_page; 4919 if (pp) { 4920 pml = sfmmu_mlist_enter(pp); 4921 } 4922 4923 if (pp != sfhmep->hme_page) { 4924 /* 4925 * tte must have been unloaded. 4926 */ 4927 ASSERT(pml); 4928 sfmmu_mlist_exit(pml); 4929 continue; 4930 } 4931 4932 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 4933 4934 ttemod = tte; 4935 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 4936 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 4937 4938 #if defined(SF_ERRATA_57) 4939 if (check_exec && addr < errata57_limit) 4940 ttemod.tte_exec_perm = 0; 4941 #endif 4942 ret = sfmmu_modifytte_try(&tte, &ttemod, 4943 &sfhmep->hme_tte); 4944 4945 if (ret < 0) { 4946 /* tte changed underneath us */ 4947 if (pml) { 4948 sfmmu_mlist_exit(pml); 4949 } 4950 continue; 4951 } 4952 4953 if (tteflags.tte_intlo & TTE_HWWR_INT) { 4954 /* 4955 * need to sync if we are clearing modify bit. 4956 */ 4957 sfmmu_ttesync(sfmmup, addr, &tte, pp); 4958 } 4959 4960 if (pp && PP_ISRO(pp)) { 4961 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 4962 pmtx = sfmmu_page_enter(pp); 4963 PP_CLRRO(pp); 4964 sfmmu_page_exit(pmtx); 4965 } 4966 } 4967 4968 if (ret > 0 && use_demap_range) { 4969 DEMAP_RANGE_MARKPG(dmrp, addr); 4970 } else if (ret > 0) { 4971 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 4972 } 4973 4974 if (pml) { 4975 sfmmu_mlist_exit(pml); 4976 } 4977 } 4978 next_addr: 4979 addr += TTEBYTES(ttesz); 4980 sfhmep++; 4981 DEMAP_RANGE_NEXTPG(dmrp); 4982 } 4983 return (addr); 4984 } 4985 4986 /* 4987 * This routine converts virtual attributes to physical ones. It will 4988 * update the tteflags field with the tte mask corresponding to the attributes 4989 * affected and it returns the new attributes. It will also clear the modify 4990 * bit if we are taking away write permission. This is necessary since the 4991 * modify bit is the hardware permission bit and we need to clear it in order 4992 * to detect write faults. 4993 */ 4994 static uint64_t 4995 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 4996 { 4997 tte_t ttevalue; 4998 4999 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 5000 5001 switch (mode) { 5002 case SFMMU_CHGATTR: 5003 /* all attributes specified */ 5004 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 5005 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 5006 ttemaskp->tte_inthi = TTEINTHI_ATTR; 5007 ttemaskp->tte_intlo = TTEINTLO_ATTR; 5008 break; 5009 case SFMMU_SETATTR: 5010 ASSERT(!(attr & ~HAT_PROT_MASK)); 5011 ttemaskp->ll = 0; 5012 ttevalue.ll = 0; 5013 /* 5014 * a valid tte implies exec and read for sfmmu 5015 * so no need to do anything about them. 5016 * since priviledged access implies user access 5017 * PROT_USER doesn't make sense either. 5018 */ 5019 if (attr & PROT_WRITE) { 5020 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 5021 ttevalue.tte_intlo |= TTE_WRPRM_INT; 5022 } 5023 break; 5024 case SFMMU_CLRATTR: 5025 /* attributes will be nand with current ones */ 5026 if (attr & ~(PROT_WRITE | PROT_USER)) { 5027 panic("sfmmu: attr %x not supported", attr); 5028 } 5029 ttemaskp->ll = 0; 5030 ttevalue.ll = 0; 5031 if (attr & PROT_WRITE) { 5032 /* clear both writable and modify bit */ 5033 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 5034 } 5035 if (attr & PROT_USER) { 5036 ttemaskp->tte_intlo |= TTE_PRIV_INT; 5037 ttevalue.tte_intlo |= TTE_PRIV_INT; 5038 } 5039 break; 5040 default: 5041 panic("sfmmu_vtop_attr: bad mode %x", mode); 5042 } 5043 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 5044 return (ttevalue.ll); 5045 } 5046 5047 static uint_t 5048 sfmmu_ptov_attr(tte_t *ttep) 5049 { 5050 uint_t attr; 5051 5052 ASSERT(TTE_IS_VALID(ttep)); 5053 5054 attr = PROT_READ; 5055 5056 if (TTE_IS_WRITABLE(ttep)) { 5057 attr |= PROT_WRITE; 5058 } 5059 if (TTE_IS_EXECUTABLE(ttep)) { 5060 attr |= PROT_EXEC; 5061 } 5062 if (!TTE_IS_PRIVILEGED(ttep)) { 5063 attr |= PROT_USER; 5064 } 5065 if (TTE_IS_NFO(ttep)) { 5066 attr |= HAT_NOFAULT; 5067 } 5068 if (TTE_IS_NOSYNC(ttep)) { 5069 attr |= HAT_NOSYNC; 5070 } 5071 if (TTE_IS_SIDEFFECT(ttep)) { 5072 attr |= SFMMU_SIDEFFECT; 5073 } 5074 if (!TTE_IS_VCACHEABLE(ttep)) { 5075 attr |= SFMMU_UNCACHEVTTE; 5076 } 5077 if (!TTE_IS_PCACHEABLE(ttep)) { 5078 attr |= SFMMU_UNCACHEPTTE; 5079 } 5080 return (attr); 5081 } 5082 5083 /* 5084 * hat_chgprot is a deprecated hat call. New segment drivers 5085 * should store all attributes and use hat_*attr calls. 5086 * 5087 * Change the protections in the virtual address range 5088 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 5089 * then remove write permission, leaving the other 5090 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 5091 * 5092 */ 5093 void 5094 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 5095 { 5096 struct hmehash_bucket *hmebp; 5097 hmeblk_tag hblktag; 5098 int hmeshift, hashno = 1; 5099 struct hme_blk *hmeblkp, *list = NULL; 5100 caddr_t endaddr; 5101 cpuset_t cpuset; 5102 demap_range_t dmr; 5103 5104 ASSERT((len & MMU_PAGEOFFSET) == 0); 5105 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 5106 5107 if (sfmmup->sfmmu_xhat_provider) { 5108 XHAT_CHGPROT(sfmmup, addr, len, vprot); 5109 return; 5110 } else { 5111 /* 5112 * This must be a CPU HAT. If the address space has 5113 * XHATs attached, change attributes for all of them, 5114 * just in case 5115 */ 5116 ASSERT(sfmmup->sfmmu_as != NULL); 5117 if (sfmmup->sfmmu_as->a_xhat != NULL) 5118 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); 5119 } 5120 5121 CPUSET_ZERO(cpuset); 5122 5123 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 5124 ((addr + len) > (caddr_t)USERLIMIT)) { 5125 panic("user addr %p vprot %x in kernel space", 5126 (void *)addr, vprot); 5127 } 5128 endaddr = addr + len; 5129 hblktag.htag_id = sfmmup; 5130 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5131 DEMAP_RANGE_INIT(sfmmup, &dmr); 5132 5133 while (addr < endaddr) { 5134 hmeshift = HME_HASH_SHIFT(hashno); 5135 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5136 hblktag.htag_rehash = hashno; 5137 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5138 5139 SFMMU_HASH_LOCK(hmebp); 5140 5141 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5142 if (hmeblkp != NULL) { 5143 ASSERT(!hmeblkp->hblk_shared); 5144 /* 5145 * We've encountered a shadow hmeblk so skip the range 5146 * of the next smaller mapping size. 5147 */ 5148 if (hmeblkp->hblk_shw_bit) { 5149 ASSERT(sfmmup != ksfmmup); 5150 ASSERT(hashno > 1); 5151 addr = (caddr_t)P2END((uintptr_t)addr, 5152 TTEBYTES(hashno - 1)); 5153 } else { 5154 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 5155 addr, endaddr, &dmr, vprot); 5156 } 5157 SFMMU_HASH_UNLOCK(hmebp); 5158 hashno = 1; 5159 continue; 5160 } 5161 SFMMU_HASH_UNLOCK(hmebp); 5162 5163 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5164 /* 5165 * We have traversed the whole list and rehashed 5166 * if necessary without finding the address to chgprot. 5167 * This is ok so we increment the address by the 5168 * smallest hmeblk range for kernel mappings and the 5169 * largest hmeblk range, to account for shadow hmeblks, 5170 * for user mappings and continue. 5171 */ 5172 if (sfmmup == ksfmmup) 5173 addr = (caddr_t)P2END((uintptr_t)addr, 5174 TTEBYTES(1)); 5175 else 5176 addr = (caddr_t)P2END((uintptr_t)addr, 5177 TTEBYTES(hashno)); 5178 hashno = 1; 5179 } else { 5180 hashno++; 5181 } 5182 } 5183 5184 sfmmu_hblks_list_purge(&list, 0); 5185 DEMAP_RANGE_FLUSH(&dmr); 5186 cpuset = sfmmup->sfmmu_cpusran; 5187 xt_sync(cpuset); 5188 } 5189 5190 /* 5191 * This function chgprots a range of addresses in an hmeblk. It returns the 5192 * next addres that needs to be chgprot. 5193 * It should be called with the hash lock held. 5194 * XXX It shold be possible to optimize chgprot by not flushing every time but 5195 * on the other hand: 5196 * 1. do one flush crosscall. 5197 * 2. only flush if we are increasing permissions (make sure this will work) 5198 */ 5199 static caddr_t 5200 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5201 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 5202 { 5203 uint_t pprot; 5204 tte_t tte, ttemod; 5205 struct sf_hment *sfhmep; 5206 uint_t tteflags; 5207 int ttesz; 5208 struct page *pp = NULL; 5209 kmutex_t *pml, *pmtx; 5210 int ret; 5211 int use_demap_range; 5212 #if defined(SF_ERRATA_57) 5213 int check_exec; 5214 #endif 5215 5216 ASSERT(in_hblk_range(hmeblkp, addr)); 5217 ASSERT(hmeblkp->hblk_shw_bit == 0); 5218 ASSERT(!hmeblkp->hblk_shared); 5219 5220 #ifdef DEBUG 5221 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5222 (endaddr < get_hblk_endaddr(hmeblkp))) { 5223 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 5224 } 5225 #endif /* DEBUG */ 5226 5227 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5228 ttesz = get_hblk_ttesz(hmeblkp); 5229 5230 pprot = sfmmu_vtop_prot(vprot, &tteflags); 5231 #if defined(SF_ERRATA_57) 5232 check_exec = (sfmmup != ksfmmup) && 5233 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 5234 ((vprot & PROT_EXEC) == PROT_EXEC); 5235 #endif 5236 HBLKTOHME(sfhmep, hmeblkp, addr); 5237 5238 /* 5239 * Flush the current demap region if addresses have been 5240 * skipped or the page size doesn't match. 5241 */ 5242 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 5243 if (use_demap_range) { 5244 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5245 } else if (dmrp != NULL) { 5246 DEMAP_RANGE_FLUSH(dmrp); 5247 } 5248 5249 while (addr < endaddr) { 5250 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5251 if (TTE_IS_VALID(&tte)) { 5252 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 5253 /* 5254 * if the new protection is the same as old 5255 * continue 5256 */ 5257 goto next_addr; 5258 } 5259 pml = NULL; 5260 pp = sfhmep->hme_page; 5261 if (pp) { 5262 pml = sfmmu_mlist_enter(pp); 5263 } 5264 if (pp != sfhmep->hme_page) { 5265 /* 5266 * tte most have been unloaded 5267 * underneath us. Recheck 5268 */ 5269 ASSERT(pml); 5270 sfmmu_mlist_exit(pml); 5271 continue; 5272 } 5273 5274 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5275 5276 ttemod = tte; 5277 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 5278 #if defined(SF_ERRATA_57) 5279 if (check_exec && addr < errata57_limit) 5280 ttemod.tte_exec_perm = 0; 5281 #endif 5282 ret = sfmmu_modifytte_try(&tte, &ttemod, 5283 &sfhmep->hme_tte); 5284 5285 if (ret < 0) { 5286 /* tte changed underneath us */ 5287 if (pml) { 5288 sfmmu_mlist_exit(pml); 5289 } 5290 continue; 5291 } 5292 5293 if (tteflags & TTE_HWWR_INT) { 5294 /* 5295 * need to sync if we are clearing modify bit. 5296 */ 5297 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5298 } 5299 5300 if (pp && PP_ISRO(pp)) { 5301 if (pprot & TTE_WRPRM_INT) { 5302 pmtx = sfmmu_page_enter(pp); 5303 PP_CLRRO(pp); 5304 sfmmu_page_exit(pmtx); 5305 } 5306 } 5307 5308 if (ret > 0 && use_demap_range) { 5309 DEMAP_RANGE_MARKPG(dmrp, addr); 5310 } else if (ret > 0) { 5311 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5312 } 5313 5314 if (pml) { 5315 sfmmu_mlist_exit(pml); 5316 } 5317 } 5318 next_addr: 5319 addr += TTEBYTES(ttesz); 5320 sfhmep++; 5321 DEMAP_RANGE_NEXTPG(dmrp); 5322 } 5323 return (addr); 5324 } 5325 5326 /* 5327 * This routine is deprecated and should only be used by hat_chgprot. 5328 * The correct routine is sfmmu_vtop_attr. 5329 * This routine converts virtual page protections to physical ones. It will 5330 * update the tteflags field with the tte mask corresponding to the protections 5331 * affected and it returns the new protections. It will also clear the modify 5332 * bit if we are taking away write permission. This is necessary since the 5333 * modify bit is the hardware permission bit and we need to clear it in order 5334 * to detect write faults. 5335 * It accepts the following special protections: 5336 * ~PROT_WRITE = remove write permissions. 5337 * ~PROT_USER = remove user permissions. 5338 */ 5339 static uint_t 5340 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 5341 { 5342 if (vprot == (uint_t)~PROT_WRITE) { 5343 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 5344 return (0); /* will cause wrprm to be cleared */ 5345 } 5346 if (vprot == (uint_t)~PROT_USER) { 5347 *tteflagsp = TTE_PRIV_INT; 5348 return (0); /* will cause privprm to be cleared */ 5349 } 5350 if ((vprot == 0) || (vprot == PROT_USER) || 5351 ((vprot & PROT_ALL) != vprot)) { 5352 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5353 } 5354 5355 switch (vprot) { 5356 case (PROT_READ): 5357 case (PROT_EXEC): 5358 case (PROT_EXEC | PROT_READ): 5359 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5360 return (TTE_PRIV_INT); /* set prv and clr wrt */ 5361 case (PROT_WRITE): 5362 case (PROT_WRITE | PROT_READ): 5363 case (PROT_EXEC | PROT_WRITE): 5364 case (PROT_EXEC | PROT_WRITE | PROT_READ): 5365 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5366 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 5367 case (PROT_USER | PROT_READ): 5368 case (PROT_USER | PROT_EXEC): 5369 case (PROT_USER | PROT_EXEC | PROT_READ): 5370 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5371 return (0); /* clr prv and wrt */ 5372 case (PROT_USER | PROT_WRITE): 5373 case (PROT_USER | PROT_WRITE | PROT_READ): 5374 case (PROT_USER | PROT_EXEC | PROT_WRITE): 5375 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 5376 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5377 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 5378 default: 5379 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5380 } 5381 return (0); 5382 } 5383 5384 /* 5385 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 5386 * the normal algorithm would take too long for a very large VA range with 5387 * few real mappings. This routine just walks thru all HMEs in the global 5388 * hash table to find and remove mappings. 5389 */ 5390 static void 5391 hat_unload_large_virtual( 5392 struct hat *sfmmup, 5393 caddr_t startaddr, 5394 size_t len, 5395 uint_t flags, 5396 hat_callback_t *callback) 5397 { 5398 struct hmehash_bucket *hmebp; 5399 struct hme_blk *hmeblkp; 5400 struct hme_blk *pr_hblk = NULL; 5401 struct hme_blk *nx_hblk; 5402 struct hme_blk *list = NULL; 5403 int i; 5404 demap_range_t dmr, *dmrp; 5405 cpuset_t cpuset; 5406 caddr_t endaddr = startaddr + len; 5407 caddr_t sa; 5408 caddr_t ea; 5409 caddr_t cb_sa[MAX_CB_ADDR]; 5410 caddr_t cb_ea[MAX_CB_ADDR]; 5411 int addr_cnt = 0; 5412 int a = 0; 5413 5414 if (sfmmup->sfmmu_free) { 5415 dmrp = NULL; 5416 } else { 5417 dmrp = &dmr; 5418 DEMAP_RANGE_INIT(sfmmup, dmrp); 5419 } 5420 5421 /* 5422 * Loop through all the hash buckets of HME blocks looking for matches. 5423 */ 5424 for (i = 0; i <= UHMEHASH_SZ; i++) { 5425 hmebp = &uhme_hash[i]; 5426 SFMMU_HASH_LOCK(hmebp); 5427 hmeblkp = hmebp->hmeblkp; 5428 pr_hblk = NULL; 5429 while (hmeblkp) { 5430 nx_hblk = hmeblkp->hblk_next; 5431 5432 /* 5433 * skip if not this context, if a shadow block or 5434 * if the mapping is not in the requested range 5435 */ 5436 if (hmeblkp->hblk_tag.htag_id != sfmmup || 5437 hmeblkp->hblk_shw_bit || 5438 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 5439 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 5440 pr_hblk = hmeblkp; 5441 goto next_block; 5442 } 5443 5444 ASSERT(!hmeblkp->hblk_shared); 5445 /* 5446 * unload if there are any current valid mappings 5447 */ 5448 if (hmeblkp->hblk_vcnt != 0 || 5449 hmeblkp->hblk_hmecnt != 0) 5450 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 5451 sa, ea, dmrp, flags); 5452 5453 /* 5454 * on unmap we also release the HME block itself, once 5455 * all mappings are gone. 5456 */ 5457 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 5458 !hmeblkp->hblk_vcnt && 5459 !hmeblkp->hblk_hmecnt) { 5460 ASSERT(!hmeblkp->hblk_lckcnt); 5461 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5462 &list, 0); 5463 } else { 5464 pr_hblk = hmeblkp; 5465 } 5466 5467 if (callback == NULL) 5468 goto next_block; 5469 5470 /* 5471 * HME blocks may span more than one page, but we may be 5472 * unmapping only one page, so check for a smaller range 5473 * for the callback 5474 */ 5475 if (sa < startaddr) 5476 sa = startaddr; 5477 if (--ea > endaddr) 5478 ea = endaddr - 1; 5479 5480 cb_sa[addr_cnt] = sa; 5481 cb_ea[addr_cnt] = ea; 5482 if (++addr_cnt == MAX_CB_ADDR) { 5483 if (dmrp != NULL) { 5484 DEMAP_RANGE_FLUSH(dmrp); 5485 cpuset = sfmmup->sfmmu_cpusran; 5486 xt_sync(cpuset); 5487 } 5488 5489 for (a = 0; a < MAX_CB_ADDR; ++a) { 5490 callback->hcb_start_addr = cb_sa[a]; 5491 callback->hcb_end_addr = cb_ea[a]; 5492 callback->hcb_function(callback); 5493 } 5494 addr_cnt = 0; 5495 } 5496 5497 next_block: 5498 hmeblkp = nx_hblk; 5499 } 5500 SFMMU_HASH_UNLOCK(hmebp); 5501 } 5502 5503 sfmmu_hblks_list_purge(&list, 0); 5504 if (dmrp != NULL) { 5505 DEMAP_RANGE_FLUSH(dmrp); 5506 cpuset = sfmmup->sfmmu_cpusran; 5507 xt_sync(cpuset); 5508 } 5509 5510 for (a = 0; a < addr_cnt; ++a) { 5511 callback->hcb_start_addr = cb_sa[a]; 5512 callback->hcb_end_addr = cb_ea[a]; 5513 callback->hcb_function(callback); 5514 } 5515 5516 /* 5517 * Check TSB and TLB page sizes if the process isn't exiting. 5518 */ 5519 if (!sfmmup->sfmmu_free) 5520 sfmmu_check_page_sizes(sfmmup, 0); 5521 } 5522 5523 /* 5524 * Unload all the mappings in the range [addr..addr+len). addr and len must 5525 * be MMU_PAGESIZE aligned. 5526 */ 5527 5528 extern struct seg *segkmap; 5529 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 5530 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 5531 5532 5533 void 5534 hat_unload_callback( 5535 struct hat *sfmmup, 5536 caddr_t addr, 5537 size_t len, 5538 uint_t flags, 5539 hat_callback_t *callback) 5540 { 5541 struct hmehash_bucket *hmebp; 5542 hmeblk_tag hblktag; 5543 int hmeshift, hashno, iskernel; 5544 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 5545 caddr_t endaddr; 5546 cpuset_t cpuset; 5547 int addr_count = 0; 5548 int a; 5549 caddr_t cb_start_addr[MAX_CB_ADDR]; 5550 caddr_t cb_end_addr[MAX_CB_ADDR]; 5551 int issegkmap = ISSEGKMAP(sfmmup, addr); 5552 demap_range_t dmr, *dmrp; 5553 5554 if (sfmmup->sfmmu_xhat_provider) { 5555 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); 5556 return; 5557 } else { 5558 /* 5559 * This must be a CPU HAT. If the address space has 5560 * XHATs attached, unload the mappings for all of them, 5561 * just in case 5562 */ 5563 ASSERT(sfmmup->sfmmu_as != NULL); 5564 if (sfmmup->sfmmu_as->a_xhat != NULL) 5565 xhat_unload_callback_all(sfmmup->sfmmu_as, addr, 5566 len, flags, callback); 5567 } 5568 5569 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5570 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5571 5572 ASSERT(sfmmup != NULL); 5573 ASSERT((len & MMU_PAGEOFFSET) == 0); 5574 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5575 5576 /* 5577 * Probing through a large VA range (say 63 bits) will be slow, even 5578 * at 4 Meg steps between the probes. So, when the virtual address range 5579 * is very large, search the HME entries for what to unload. 5580 * 5581 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5582 * 5583 * UHMEHASH_SZ is number of hash buckets to examine 5584 * 5585 */ 5586 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5587 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5588 return; 5589 } 5590 5591 CPUSET_ZERO(cpuset); 5592 5593 /* 5594 * If the process is exiting, we can save a lot of fuss since 5595 * we'll flush the TLB when we free the ctx anyway. 5596 */ 5597 if (sfmmup->sfmmu_free) { 5598 dmrp = NULL; 5599 } else { 5600 dmrp = &dmr; 5601 DEMAP_RANGE_INIT(sfmmup, dmrp); 5602 } 5603 5604 endaddr = addr + len; 5605 hblktag.htag_id = sfmmup; 5606 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5607 5608 /* 5609 * It is likely for the vm to call unload over a wide range of 5610 * addresses that are actually very sparsely populated by 5611 * translations. In order to speed this up the sfmmu hat supports 5612 * the concept of shadow hmeblks. Dummy large page hmeblks that 5613 * correspond to actual small translations are allocated at tteload 5614 * time and are referred to as shadow hmeblks. Now, during unload 5615 * time, we first check if we have a shadow hmeblk for that 5616 * translation. The absence of one means the corresponding address 5617 * range is empty and can be skipped. 5618 * 5619 * The kernel is an exception to above statement and that is why 5620 * we don't use shadow hmeblks and hash starting from the smallest 5621 * page size. 5622 */ 5623 if (sfmmup == KHATID) { 5624 iskernel = 1; 5625 hashno = TTE64K; 5626 } else { 5627 iskernel = 0; 5628 if (mmu_page_sizes == max_mmu_page_sizes) { 5629 hashno = TTE256M; 5630 } else { 5631 hashno = TTE4M; 5632 } 5633 } 5634 while (addr < endaddr) { 5635 hmeshift = HME_HASH_SHIFT(hashno); 5636 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5637 hblktag.htag_rehash = hashno; 5638 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5639 5640 SFMMU_HASH_LOCK(hmebp); 5641 5642 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 5643 if (hmeblkp == NULL) { 5644 /* 5645 * didn't find an hmeblk. skip the appropiate 5646 * address range. 5647 */ 5648 SFMMU_HASH_UNLOCK(hmebp); 5649 if (iskernel) { 5650 if (hashno < mmu_hashcnt) { 5651 hashno++; 5652 continue; 5653 } else { 5654 hashno = TTE64K; 5655 addr = (caddr_t)roundup((uintptr_t)addr 5656 + 1, MMU_PAGESIZE64K); 5657 continue; 5658 } 5659 } 5660 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5661 (1 << hmeshift)); 5662 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5663 ASSERT(hashno == TTE64K); 5664 continue; 5665 } 5666 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5667 hashno = TTE512K; 5668 continue; 5669 } 5670 if (mmu_page_sizes == max_mmu_page_sizes) { 5671 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5672 hashno = TTE4M; 5673 continue; 5674 } 5675 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5676 hashno = TTE32M; 5677 continue; 5678 } 5679 hashno = TTE256M; 5680 continue; 5681 } else { 5682 hashno = TTE4M; 5683 continue; 5684 } 5685 } 5686 ASSERT(hmeblkp); 5687 ASSERT(!hmeblkp->hblk_shared); 5688 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5689 /* 5690 * If the valid count is zero we can skip the range 5691 * mapped by this hmeblk. 5692 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5693 * is used by segment drivers as a hint 5694 * that the mapping resource won't be used any longer. 5695 * The best example of this is during exit(). 5696 */ 5697 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5698 get_hblk_span(hmeblkp)); 5699 if ((flags & HAT_UNLOAD_UNMAP) || 5700 (iskernel && !issegkmap)) { 5701 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5702 &list, 0); 5703 } 5704 SFMMU_HASH_UNLOCK(hmebp); 5705 5706 if (iskernel) { 5707 hashno = TTE64K; 5708 continue; 5709 } 5710 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5711 ASSERT(hashno == TTE64K); 5712 continue; 5713 } 5714 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5715 hashno = TTE512K; 5716 continue; 5717 } 5718 if (mmu_page_sizes == max_mmu_page_sizes) { 5719 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5720 hashno = TTE4M; 5721 continue; 5722 } 5723 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5724 hashno = TTE32M; 5725 continue; 5726 } 5727 hashno = TTE256M; 5728 continue; 5729 } else { 5730 hashno = TTE4M; 5731 continue; 5732 } 5733 } 5734 if (hmeblkp->hblk_shw_bit) { 5735 /* 5736 * If we encounter a shadow hmeblk we know there is 5737 * smaller sized hmeblks mapping the same address space. 5738 * Decrement the hash size and rehash. 5739 */ 5740 ASSERT(sfmmup != KHATID); 5741 hashno--; 5742 SFMMU_HASH_UNLOCK(hmebp); 5743 continue; 5744 } 5745 5746 /* 5747 * track callback address ranges. 5748 * only start a new range when it's not contiguous 5749 */ 5750 if (callback != NULL) { 5751 if (addr_count > 0 && 5752 addr == cb_end_addr[addr_count - 1]) 5753 --addr_count; 5754 else 5755 cb_start_addr[addr_count] = addr; 5756 } 5757 5758 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5759 dmrp, flags); 5760 5761 if (callback != NULL) 5762 cb_end_addr[addr_count++] = addr; 5763 5764 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5765 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5766 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0); 5767 } 5768 SFMMU_HASH_UNLOCK(hmebp); 5769 5770 /* 5771 * Notify our caller as to exactly which pages 5772 * have been unloaded. We do these in clumps, 5773 * to minimize the number of xt_sync()s that need to occur. 5774 */ 5775 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5776 if (dmrp != NULL) { 5777 DEMAP_RANGE_FLUSH(dmrp); 5778 cpuset = sfmmup->sfmmu_cpusran; 5779 xt_sync(cpuset); 5780 } 5781 5782 for (a = 0; a < MAX_CB_ADDR; ++a) { 5783 callback->hcb_start_addr = cb_start_addr[a]; 5784 callback->hcb_end_addr = cb_end_addr[a]; 5785 callback->hcb_function(callback); 5786 } 5787 addr_count = 0; 5788 } 5789 if (iskernel) { 5790 hashno = TTE64K; 5791 continue; 5792 } 5793 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5794 ASSERT(hashno == TTE64K); 5795 continue; 5796 } 5797 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5798 hashno = TTE512K; 5799 continue; 5800 } 5801 if (mmu_page_sizes == max_mmu_page_sizes) { 5802 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5803 hashno = TTE4M; 5804 continue; 5805 } 5806 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5807 hashno = TTE32M; 5808 continue; 5809 } 5810 hashno = TTE256M; 5811 } else { 5812 hashno = TTE4M; 5813 } 5814 } 5815 5816 sfmmu_hblks_list_purge(&list, 0); 5817 if (dmrp != NULL) { 5818 DEMAP_RANGE_FLUSH(dmrp); 5819 cpuset = sfmmup->sfmmu_cpusran; 5820 xt_sync(cpuset); 5821 } 5822 if (callback && addr_count != 0) { 5823 for (a = 0; a < addr_count; ++a) { 5824 callback->hcb_start_addr = cb_start_addr[a]; 5825 callback->hcb_end_addr = cb_end_addr[a]; 5826 callback->hcb_function(callback); 5827 } 5828 } 5829 5830 /* 5831 * Check TSB and TLB page sizes if the process isn't exiting. 5832 */ 5833 if (!sfmmup->sfmmu_free) 5834 sfmmu_check_page_sizes(sfmmup, 0); 5835 } 5836 5837 /* 5838 * Unload all the mappings in the range [addr..addr+len). addr and len must 5839 * be MMU_PAGESIZE aligned. 5840 */ 5841 void 5842 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5843 { 5844 if (sfmmup->sfmmu_xhat_provider) { 5845 XHAT_UNLOAD(sfmmup, addr, len, flags); 5846 return; 5847 } 5848 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5849 } 5850 5851 5852 /* 5853 * Find the largest mapping size for this page. 5854 */ 5855 int 5856 fnd_mapping_sz(page_t *pp) 5857 { 5858 int sz; 5859 int p_index; 5860 5861 p_index = PP_MAPINDEX(pp); 5862 5863 sz = 0; 5864 p_index >>= 1; /* don't care about 8K bit */ 5865 for (; p_index; p_index >>= 1) { 5866 sz++; 5867 } 5868 5869 return (sz); 5870 } 5871 5872 /* 5873 * This function unloads a range of addresses for an hmeblk. 5874 * It returns the next address to be unloaded. 5875 * It should be called with the hash lock held. 5876 */ 5877 static caddr_t 5878 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5879 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5880 { 5881 tte_t tte, ttemod; 5882 struct sf_hment *sfhmep; 5883 int ttesz; 5884 long ttecnt; 5885 page_t *pp; 5886 kmutex_t *pml; 5887 int ret; 5888 int use_demap_range; 5889 5890 ASSERT(in_hblk_range(hmeblkp, addr)); 5891 ASSERT(!hmeblkp->hblk_shw_bit); 5892 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared); 5893 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared); 5894 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared); 5895 5896 #ifdef DEBUG 5897 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5898 (endaddr < get_hblk_endaddr(hmeblkp))) { 5899 panic("sfmmu_hblk_unload: partial unload of large page"); 5900 } 5901 #endif /* DEBUG */ 5902 5903 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5904 ttesz = get_hblk_ttesz(hmeblkp); 5905 5906 use_demap_range = ((dmrp == NULL) || 5907 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5908 5909 if (use_demap_range) { 5910 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5911 } else if (dmrp != NULL) { 5912 DEMAP_RANGE_FLUSH(dmrp); 5913 } 5914 ttecnt = 0; 5915 HBLKTOHME(sfhmep, hmeblkp, addr); 5916 5917 while (addr < endaddr) { 5918 pml = NULL; 5919 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5920 if (TTE_IS_VALID(&tte)) { 5921 pp = sfhmep->hme_page; 5922 if (pp != NULL) { 5923 pml = sfmmu_mlist_enter(pp); 5924 } 5925 5926 /* 5927 * Verify if hme still points to 'pp' now that 5928 * we have p_mapping lock. 5929 */ 5930 if (sfhmep->hme_page != pp) { 5931 if (pp != NULL && sfhmep->hme_page != NULL) { 5932 ASSERT(pml != NULL); 5933 sfmmu_mlist_exit(pml); 5934 /* Re-start this iteration. */ 5935 continue; 5936 } 5937 ASSERT((pp != NULL) && 5938 (sfhmep->hme_page == NULL)); 5939 goto tte_unloaded; 5940 } 5941 5942 /* 5943 * This point on we have both HASH and p_mapping 5944 * lock. 5945 */ 5946 ASSERT(pp == sfhmep->hme_page); 5947 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5948 5949 /* 5950 * We need to loop on modify tte because it is 5951 * possible for pagesync to come along and 5952 * change the software bits beneath us. 5953 * 5954 * Page_unload can also invalidate the tte after 5955 * we read tte outside of p_mapping lock. 5956 */ 5957 again: 5958 ttemod = tte; 5959 5960 TTE_SET_INVALID(&ttemod); 5961 ret = sfmmu_modifytte_try(&tte, &ttemod, 5962 &sfhmep->hme_tte); 5963 5964 if (ret <= 0) { 5965 if (TTE_IS_VALID(&tte)) { 5966 ASSERT(ret < 0); 5967 goto again; 5968 } 5969 if (pp != NULL) { 5970 panic("sfmmu_hblk_unload: pp = 0x%p " 5971 "tte became invalid under mlist" 5972 " lock = 0x%p", (void *)pp, 5973 (void *)pml); 5974 } 5975 continue; 5976 } 5977 5978 if (!(flags & HAT_UNLOAD_NOSYNC)) { 5979 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5980 } 5981 5982 /* 5983 * Ok- we invalidated the tte. Do the rest of the job. 5984 */ 5985 ttecnt++; 5986 5987 if (flags & HAT_UNLOAD_UNLOCK) { 5988 ASSERT(hmeblkp->hblk_lckcnt > 0); 5989 atomic_add_32(&hmeblkp->hblk_lckcnt, -1); 5990 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 5991 } 5992 5993 /* 5994 * Normally we would need to flush the page 5995 * from the virtual cache at this point in 5996 * order to prevent a potential cache alias 5997 * inconsistency. 5998 * The particular scenario we need to worry 5999 * about is: 6000 * Given: va1 and va2 are two virtual address 6001 * that alias and map the same physical 6002 * address. 6003 * 1. mapping exists from va1 to pa and data 6004 * has been read into the cache. 6005 * 2. unload va1. 6006 * 3. load va2 and modify data using va2. 6007 * 4 unload va2. 6008 * 5. load va1 and reference data. Unless we 6009 * flush the data cache when we unload we will 6010 * get stale data. 6011 * Fortunately, page coloring eliminates the 6012 * above scenario by remembering the color a 6013 * physical page was last or is currently 6014 * mapped to. Now, we delay the flush until 6015 * the loading of translations. Only when the 6016 * new translation is of a different color 6017 * are we forced to flush. 6018 */ 6019 if (use_demap_range) { 6020 /* 6021 * Mark this page as needing a demap. 6022 */ 6023 DEMAP_RANGE_MARKPG(dmrp, addr); 6024 } else { 6025 ASSERT(sfmmup != NULL); 6026 ASSERT(!hmeblkp->hblk_shared); 6027 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 6028 sfmmup->sfmmu_free, 0); 6029 } 6030 6031 if (pp) { 6032 /* 6033 * Remove the hment from the mapping list 6034 */ 6035 ASSERT(hmeblkp->hblk_hmecnt > 0); 6036 6037 /* 6038 * Again, we cannot 6039 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 6040 */ 6041 HME_SUB(sfhmep, pp); 6042 membar_stst(); 6043 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 6044 } 6045 6046 ASSERT(hmeblkp->hblk_vcnt > 0); 6047 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 6048 6049 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6050 !hmeblkp->hblk_lckcnt); 6051 6052 #ifdef VAC 6053 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 6054 if (PP_ISTNC(pp)) { 6055 /* 6056 * If page was temporary 6057 * uncached, try to recache 6058 * it. Note that HME_SUB() was 6059 * called above so p_index and 6060 * mlist had been updated. 6061 */ 6062 conv_tnc(pp, ttesz); 6063 } else if (pp->p_mapping == NULL) { 6064 ASSERT(kpm_enable); 6065 /* 6066 * Page is marked to be in VAC conflict 6067 * to an existing kpm mapping and/or is 6068 * kpm mapped using only the regular 6069 * pagesize. 6070 */ 6071 sfmmu_kpm_hme_unload(pp); 6072 } 6073 } 6074 #endif /* VAC */ 6075 } else if ((pp = sfhmep->hme_page) != NULL) { 6076 /* 6077 * TTE is invalid but the hme 6078 * still exists. let pageunload 6079 * complete its job. 6080 */ 6081 ASSERT(pml == NULL); 6082 pml = sfmmu_mlist_enter(pp); 6083 if (sfhmep->hme_page != NULL) { 6084 sfmmu_mlist_exit(pml); 6085 continue; 6086 } 6087 ASSERT(sfhmep->hme_page == NULL); 6088 } else if (hmeblkp->hblk_hmecnt != 0) { 6089 /* 6090 * pageunload may have not finished decrementing 6091 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 6092 * wait for pageunload to finish. Rely on pageunload 6093 * to decrement hblk_hmecnt after hblk_vcnt. 6094 */ 6095 pfn_t pfn = TTE_TO_TTEPFN(&tte); 6096 ASSERT(pml == NULL); 6097 if (pf_is_memory(pfn)) { 6098 pp = page_numtopp_nolock(pfn); 6099 if (pp != NULL) { 6100 pml = sfmmu_mlist_enter(pp); 6101 sfmmu_mlist_exit(pml); 6102 pml = NULL; 6103 } 6104 } 6105 } 6106 6107 tte_unloaded: 6108 /* 6109 * At this point, the tte we are looking at 6110 * should be unloaded, and hme has been unlinked 6111 * from page too. This is important because in 6112 * pageunload, it does ttesync() then HME_SUB. 6113 * We need to make sure HME_SUB has been completed 6114 * so we know ttesync() has been completed. Otherwise, 6115 * at exit time, after return from hat layer, VM will 6116 * release as structure which hat_setstat() (called 6117 * by ttesync()) needs. 6118 */ 6119 #ifdef DEBUG 6120 { 6121 tte_t dtte; 6122 6123 ASSERT(sfhmep->hme_page == NULL); 6124 6125 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 6126 ASSERT(!TTE_IS_VALID(&dtte)); 6127 } 6128 #endif 6129 6130 if (pml) { 6131 sfmmu_mlist_exit(pml); 6132 } 6133 6134 addr += TTEBYTES(ttesz); 6135 sfhmep++; 6136 DEMAP_RANGE_NEXTPG(dmrp); 6137 } 6138 /* 6139 * For shared hmeblks this routine is only called when region is freed 6140 * and no longer referenced. So no need to decrement ttecnt 6141 * in the region structure here. 6142 */ 6143 if (ttecnt > 0 && sfmmup != NULL) { 6144 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 6145 } 6146 return (addr); 6147 } 6148 6149 /* 6150 * Invalidate a virtual address range for the local CPU. 6151 * For best performance ensure that the va range is completely 6152 * mapped, otherwise the entire TLB will be flushed. 6153 */ 6154 void 6155 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size) 6156 { 6157 ssize_t sz; 6158 caddr_t endva = va + size; 6159 6160 while (va < endva) { 6161 sz = hat_getpagesize(sfmmup, va); 6162 if (sz < 0) { 6163 vtag_flushall(); 6164 break; 6165 } 6166 vtag_flushpage(va, (uint64_t)sfmmup); 6167 va += sz; 6168 } 6169 } 6170 6171 /* 6172 * Synchronize all the mappings in the range [addr..addr+len). 6173 * Can be called with clearflag having two states: 6174 * HAT_SYNC_DONTZERO means just return the rm stats 6175 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 6176 */ 6177 void 6178 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 6179 { 6180 struct hmehash_bucket *hmebp; 6181 hmeblk_tag hblktag; 6182 int hmeshift, hashno = 1; 6183 struct hme_blk *hmeblkp, *list = NULL; 6184 caddr_t endaddr; 6185 cpuset_t cpuset; 6186 6187 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 6188 ASSERT((sfmmup == ksfmmup) || 6189 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 6190 ASSERT((len & MMU_PAGEOFFSET) == 0); 6191 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6192 (clearflag == HAT_SYNC_ZERORM)); 6193 6194 CPUSET_ZERO(cpuset); 6195 6196 endaddr = addr + len; 6197 hblktag.htag_id = sfmmup; 6198 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 6199 6200 /* 6201 * Spitfire supports 4 page sizes. 6202 * Most pages are expected to be of the smallest page 6203 * size (8K) and these will not need to be rehashed. 64K 6204 * pages also don't need to be rehashed because the an hmeblk 6205 * spans 64K of address space. 512K pages might need 1 rehash and 6206 * and 4M pages 2 rehashes. 6207 */ 6208 while (addr < endaddr) { 6209 hmeshift = HME_HASH_SHIFT(hashno); 6210 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 6211 hblktag.htag_rehash = hashno; 6212 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 6213 6214 SFMMU_HASH_LOCK(hmebp); 6215 6216 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 6217 if (hmeblkp != NULL) { 6218 ASSERT(!hmeblkp->hblk_shared); 6219 /* 6220 * We've encountered a shadow hmeblk so skip the range 6221 * of the next smaller mapping size. 6222 */ 6223 if (hmeblkp->hblk_shw_bit) { 6224 ASSERT(sfmmup != ksfmmup); 6225 ASSERT(hashno > 1); 6226 addr = (caddr_t)P2END((uintptr_t)addr, 6227 TTEBYTES(hashno - 1)); 6228 } else { 6229 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 6230 addr, endaddr, clearflag); 6231 } 6232 SFMMU_HASH_UNLOCK(hmebp); 6233 hashno = 1; 6234 continue; 6235 } 6236 SFMMU_HASH_UNLOCK(hmebp); 6237 6238 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 6239 /* 6240 * We have traversed the whole list and rehashed 6241 * if necessary without finding the address to sync. 6242 * This is ok so we increment the address by the 6243 * smallest hmeblk range for kernel mappings and the 6244 * largest hmeblk range, to account for shadow hmeblks, 6245 * for user mappings and continue. 6246 */ 6247 if (sfmmup == ksfmmup) 6248 addr = (caddr_t)P2END((uintptr_t)addr, 6249 TTEBYTES(1)); 6250 else 6251 addr = (caddr_t)P2END((uintptr_t)addr, 6252 TTEBYTES(hashno)); 6253 hashno = 1; 6254 } else { 6255 hashno++; 6256 } 6257 } 6258 sfmmu_hblks_list_purge(&list, 0); 6259 cpuset = sfmmup->sfmmu_cpusran; 6260 xt_sync(cpuset); 6261 } 6262 6263 static caddr_t 6264 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 6265 caddr_t endaddr, int clearflag) 6266 { 6267 tte_t tte, ttemod; 6268 struct sf_hment *sfhmep; 6269 int ttesz; 6270 struct page *pp; 6271 kmutex_t *pml; 6272 int ret; 6273 6274 ASSERT(hmeblkp->hblk_shw_bit == 0); 6275 ASSERT(!hmeblkp->hblk_shared); 6276 6277 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6278 6279 ttesz = get_hblk_ttesz(hmeblkp); 6280 HBLKTOHME(sfhmep, hmeblkp, addr); 6281 6282 while (addr < endaddr) { 6283 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6284 if (TTE_IS_VALID(&tte)) { 6285 pml = NULL; 6286 pp = sfhmep->hme_page; 6287 if (pp) { 6288 pml = sfmmu_mlist_enter(pp); 6289 } 6290 if (pp != sfhmep->hme_page) { 6291 /* 6292 * tte most have been unloaded 6293 * underneath us. Recheck 6294 */ 6295 ASSERT(pml); 6296 sfmmu_mlist_exit(pml); 6297 continue; 6298 } 6299 6300 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6301 6302 if (clearflag == HAT_SYNC_ZERORM) { 6303 ttemod = tte; 6304 TTE_CLR_RM(&ttemod); 6305 ret = sfmmu_modifytte_try(&tte, &ttemod, 6306 &sfhmep->hme_tte); 6307 if (ret < 0) { 6308 if (pml) { 6309 sfmmu_mlist_exit(pml); 6310 } 6311 continue; 6312 } 6313 6314 if (ret > 0) { 6315 sfmmu_tlb_demap(addr, sfmmup, 6316 hmeblkp, 0, 0); 6317 } 6318 } 6319 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6320 if (pml) { 6321 sfmmu_mlist_exit(pml); 6322 } 6323 } 6324 addr += TTEBYTES(ttesz); 6325 sfhmep++; 6326 } 6327 return (addr); 6328 } 6329 6330 /* 6331 * This function will sync a tte to the page struct and it will 6332 * update the hat stats. Currently it allows us to pass a NULL pp 6333 * and we will simply update the stats. We may want to change this 6334 * so we only keep stats for pages backed by pp's. 6335 */ 6336 static void 6337 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 6338 { 6339 uint_t rm = 0; 6340 int sz; 6341 pgcnt_t npgs; 6342 6343 ASSERT(TTE_IS_VALID(ttep)); 6344 6345 if (TTE_IS_NOSYNC(ttep)) { 6346 return; 6347 } 6348 6349 if (TTE_IS_REF(ttep)) { 6350 rm = P_REF; 6351 } 6352 if (TTE_IS_MOD(ttep)) { 6353 rm |= P_MOD; 6354 } 6355 6356 if (rm == 0) { 6357 return; 6358 } 6359 6360 sz = TTE_CSZ(ttep); 6361 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) { 6362 int i; 6363 caddr_t vaddr = addr; 6364 6365 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 6366 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 6367 } 6368 6369 } 6370 6371 /* 6372 * XXX I want to use cas to update nrm bits but they 6373 * currently belong in common/vm and not in hat where 6374 * they should be. 6375 * The nrm bits are protected by the same mutex as 6376 * the one that protects the page's mapping list. 6377 */ 6378 if (!pp) 6379 return; 6380 ASSERT(sfmmu_mlist_held(pp)); 6381 /* 6382 * If the tte is for a large page, we need to sync all the 6383 * pages covered by the tte. 6384 */ 6385 if (sz != TTE8K) { 6386 ASSERT(pp->p_szc != 0); 6387 pp = PP_GROUPLEADER(pp, sz); 6388 ASSERT(sfmmu_mlist_held(pp)); 6389 } 6390 6391 /* Get number of pages from tte size. */ 6392 npgs = TTEPAGES(sz); 6393 6394 do { 6395 ASSERT(pp); 6396 ASSERT(sfmmu_mlist_held(pp)); 6397 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 6398 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 6399 hat_page_setattr(pp, rm); 6400 6401 /* 6402 * Are we done? If not, we must have a large mapping. 6403 * For large mappings we need to sync the rest of the pages 6404 * covered by this tte; goto the next page. 6405 */ 6406 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 6407 } 6408 6409 /* 6410 * Execute pre-callback handler of each pa_hment linked to pp 6411 * 6412 * Inputs: 6413 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 6414 * capture_cpus: pointer to return value (below) 6415 * 6416 * Returns: 6417 * Propagates the subsystem callback return values back to the caller; 6418 * returns 0 on success. If capture_cpus is non-NULL, the value returned 6419 * is zero if all of the pa_hments are of a type that do not require 6420 * capturing CPUs prior to suspending the mapping, else it is 1. 6421 */ 6422 static int 6423 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 6424 { 6425 struct sf_hment *sfhmep; 6426 struct pa_hment *pahmep; 6427 int (*f)(caddr_t, uint_t, uint_t, void *); 6428 int ret; 6429 id_t id; 6430 int locked = 0; 6431 kmutex_t *pml; 6432 6433 ASSERT(PAGE_EXCL(pp)); 6434 if (!sfmmu_mlist_held(pp)) { 6435 pml = sfmmu_mlist_enter(pp); 6436 locked = 1; 6437 } 6438 6439 if (capture_cpus) 6440 *capture_cpus = 0; 6441 6442 top: 6443 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6444 /* 6445 * skip sf_hments corresponding to VA<->PA mappings; 6446 * for pa_hment's, hme_tte.ll is zero 6447 */ 6448 if (!IS_PAHME(sfhmep)) 6449 continue; 6450 6451 pahmep = sfhmep->hme_data; 6452 ASSERT(pahmep != NULL); 6453 6454 /* 6455 * skip if pre-handler has been called earlier in this loop 6456 */ 6457 if (pahmep->flags & flag) 6458 continue; 6459 6460 id = pahmep->cb_id; 6461 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6462 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 6463 *capture_cpus = 1; 6464 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 6465 pahmep->flags |= flag; 6466 continue; 6467 } 6468 6469 /* 6470 * Drop the mapping list lock to avoid locking order issues. 6471 */ 6472 if (locked) 6473 sfmmu_mlist_exit(pml); 6474 6475 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 6476 if (ret != 0) 6477 return (ret); /* caller must do the cleanup */ 6478 6479 if (locked) { 6480 pml = sfmmu_mlist_enter(pp); 6481 pahmep->flags |= flag; 6482 goto top; 6483 } 6484 6485 pahmep->flags |= flag; 6486 } 6487 6488 if (locked) 6489 sfmmu_mlist_exit(pml); 6490 6491 return (0); 6492 } 6493 6494 /* 6495 * Execute post-callback handler of each pa_hment linked to pp 6496 * 6497 * Same overall assumptions and restrictions apply as for 6498 * hat_pageprocess_precallbacks(). 6499 */ 6500 static void 6501 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 6502 { 6503 pfn_t pgpfn = pp->p_pagenum; 6504 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 6505 pfn_t newpfn; 6506 struct sf_hment *sfhmep; 6507 struct pa_hment *pahmep; 6508 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 6509 id_t id; 6510 int locked = 0; 6511 kmutex_t *pml; 6512 6513 ASSERT(PAGE_EXCL(pp)); 6514 if (!sfmmu_mlist_held(pp)) { 6515 pml = sfmmu_mlist_enter(pp); 6516 locked = 1; 6517 } 6518 6519 top: 6520 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6521 /* 6522 * skip sf_hments corresponding to VA<->PA mappings; 6523 * for pa_hment's, hme_tte.ll is zero 6524 */ 6525 if (!IS_PAHME(sfhmep)) 6526 continue; 6527 6528 pahmep = sfhmep->hme_data; 6529 ASSERT(pahmep != NULL); 6530 6531 if ((pahmep->flags & flag) == 0) 6532 continue; 6533 6534 pahmep->flags &= ~flag; 6535 6536 id = pahmep->cb_id; 6537 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6538 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 6539 continue; 6540 6541 /* 6542 * Convert the base page PFN into the constituent PFN 6543 * which is needed by the callback handler. 6544 */ 6545 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 6546 6547 /* 6548 * Drop the mapping list lock to avoid locking order issues. 6549 */ 6550 if (locked) 6551 sfmmu_mlist_exit(pml); 6552 6553 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 6554 != 0) 6555 panic("sfmmu: posthandler failed"); 6556 6557 if (locked) { 6558 pml = sfmmu_mlist_enter(pp); 6559 goto top; 6560 } 6561 } 6562 6563 if (locked) 6564 sfmmu_mlist_exit(pml); 6565 } 6566 6567 /* 6568 * Suspend locked kernel mapping 6569 */ 6570 void 6571 hat_pagesuspend(struct page *pp) 6572 { 6573 struct sf_hment *sfhmep; 6574 sfmmu_t *sfmmup; 6575 tte_t tte, ttemod; 6576 struct hme_blk *hmeblkp; 6577 caddr_t addr; 6578 int index, cons; 6579 cpuset_t cpuset; 6580 6581 ASSERT(PAGE_EXCL(pp)); 6582 ASSERT(sfmmu_mlist_held(pp)); 6583 6584 mutex_enter(&kpr_suspendlock); 6585 6586 /* 6587 * We're about to suspend a kernel mapping so mark this thread as 6588 * non-traceable by DTrace. This prevents us from running into issues 6589 * with probe context trying to touch a suspended page 6590 * in the relocation codepath itself. 6591 */ 6592 curthread->t_flag |= T_DONTDTRACE; 6593 6594 index = PP_MAPINDEX(pp); 6595 cons = TTE8K; 6596 6597 retry: 6598 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6599 6600 if (IS_PAHME(sfhmep)) 6601 continue; 6602 6603 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6604 continue; 6605 6606 /* 6607 * Loop until we successfully set the suspend bit in 6608 * the TTE. 6609 */ 6610 again: 6611 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6612 ASSERT(TTE_IS_VALID(&tte)); 6613 6614 ttemod = tte; 6615 TTE_SET_SUSPEND(&ttemod); 6616 if (sfmmu_modifytte_try(&tte, &ttemod, 6617 &sfhmep->hme_tte) < 0) 6618 goto again; 6619 6620 /* 6621 * Invalidate TSB entry 6622 */ 6623 hmeblkp = sfmmu_hmetohblk(sfhmep); 6624 6625 sfmmup = hblktosfmmu(hmeblkp); 6626 ASSERT(sfmmup == ksfmmup); 6627 ASSERT(!hmeblkp->hblk_shared); 6628 6629 addr = tte_to_vaddr(hmeblkp, tte); 6630 6631 /* 6632 * No need to make sure that the TSB for this sfmmu is 6633 * not being relocated since it is ksfmmup and thus it 6634 * will never be relocated. 6635 */ 6636 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 6637 6638 /* 6639 * Update xcall stats 6640 */ 6641 cpuset = cpu_ready_set; 6642 CPUSET_DEL(cpuset, CPU->cpu_id); 6643 6644 /* LINTED: constant in conditional context */ 6645 SFMMU_XCALL_STATS(ksfmmup); 6646 6647 /* 6648 * Flush TLB entry on remote CPU's 6649 */ 6650 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6651 (uint64_t)ksfmmup); 6652 xt_sync(cpuset); 6653 6654 /* 6655 * Flush TLB entry on local CPU 6656 */ 6657 vtag_flushpage(addr, (uint64_t)ksfmmup); 6658 } 6659 6660 while (index != 0) { 6661 index = index >> 1; 6662 if (index != 0) 6663 cons++; 6664 if (index & 0x1) { 6665 pp = PP_GROUPLEADER(pp, cons); 6666 goto retry; 6667 } 6668 } 6669 } 6670 6671 #ifdef DEBUG 6672 6673 #define N_PRLE 1024 6674 struct prle { 6675 page_t *targ; 6676 page_t *repl; 6677 int status; 6678 int pausecpus; 6679 hrtime_t whence; 6680 }; 6681 6682 static struct prle page_relocate_log[N_PRLE]; 6683 static int prl_entry; 6684 static kmutex_t prl_mutex; 6685 6686 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6687 mutex_enter(&prl_mutex); \ 6688 page_relocate_log[prl_entry].targ = *(t); \ 6689 page_relocate_log[prl_entry].repl = *(r); \ 6690 page_relocate_log[prl_entry].status = (s); \ 6691 page_relocate_log[prl_entry].pausecpus = (p); \ 6692 page_relocate_log[prl_entry].whence = gethrtime(); \ 6693 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6694 mutex_exit(&prl_mutex); 6695 6696 #else /* !DEBUG */ 6697 #define PAGE_RELOCATE_LOG(t, r, s, p) 6698 #endif 6699 6700 /* 6701 * Core Kernel Page Relocation Algorithm 6702 * 6703 * Input: 6704 * 6705 * target : constituent pages are SE_EXCL locked. 6706 * replacement: constituent pages are SE_EXCL locked. 6707 * 6708 * Output: 6709 * 6710 * nrelocp: number of pages relocated 6711 */ 6712 int 6713 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6714 { 6715 page_t *targ, *repl; 6716 page_t *tpp, *rpp; 6717 kmutex_t *low, *high; 6718 spgcnt_t npages, i; 6719 page_t *pl = NULL; 6720 int old_pil; 6721 cpuset_t cpuset; 6722 int cap_cpus; 6723 int ret; 6724 #ifdef VAC 6725 int cflags = 0; 6726 #endif 6727 6728 if (!kcage_on || PP_ISNORELOC(*target)) { 6729 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6730 return (EAGAIN); 6731 } 6732 6733 mutex_enter(&kpr_mutex); 6734 kreloc_thread = curthread; 6735 6736 targ = *target; 6737 repl = *replacement; 6738 ASSERT(repl != NULL); 6739 ASSERT(targ->p_szc == repl->p_szc); 6740 6741 npages = page_get_pagecnt(targ->p_szc); 6742 6743 /* 6744 * unload VA<->PA mappings that are not locked 6745 */ 6746 tpp = targ; 6747 for (i = 0; i < npages; i++) { 6748 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6749 tpp++; 6750 } 6751 6752 /* 6753 * Do "presuspend" callbacks, in a context from which we can still 6754 * block as needed. Note that we don't hold the mapping list lock 6755 * of "targ" at this point due to potential locking order issues; 6756 * we assume that between the hat_pageunload() above and holding 6757 * the SE_EXCL lock that the mapping list *cannot* change at this 6758 * point. 6759 */ 6760 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6761 if (ret != 0) { 6762 /* 6763 * EIO translates to fatal error, for all others cleanup 6764 * and return EAGAIN. 6765 */ 6766 ASSERT(ret != EIO); 6767 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6768 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6769 kreloc_thread = NULL; 6770 mutex_exit(&kpr_mutex); 6771 return (EAGAIN); 6772 } 6773 6774 /* 6775 * acquire p_mapping list lock for both the target and replacement 6776 * root pages. 6777 * 6778 * low and high refer to the need to grab the mlist locks in a 6779 * specific order in order to prevent race conditions. Thus the 6780 * lower lock must be grabbed before the higher lock. 6781 * 6782 * This will block hat_unload's accessing p_mapping list. Since 6783 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6784 * blocked. Thus, no one else will be accessing the p_mapping list 6785 * while we suspend and reload the locked mapping below. 6786 */ 6787 tpp = targ; 6788 rpp = repl; 6789 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6790 6791 kpreempt_disable(); 6792 6793 /* 6794 * We raise our PIL to 13 so that we don't get captured by 6795 * another CPU or pinned by an interrupt thread. We can't go to 6796 * PIL 14 since the nexus driver(s) may need to interrupt at 6797 * that level in the case of IOMMU pseudo mappings. 6798 */ 6799 cpuset = cpu_ready_set; 6800 CPUSET_DEL(cpuset, CPU->cpu_id); 6801 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6802 old_pil = splr(XCALL_PIL); 6803 } else { 6804 old_pil = -1; 6805 xc_attention(cpuset); 6806 } 6807 ASSERT(getpil() == XCALL_PIL); 6808 6809 /* 6810 * Now do suspend callbacks. In the case of an IOMMU mapping 6811 * this will suspend all DMA activity to the page while it is 6812 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6813 * may be captured at this point we should have acquired any needed 6814 * locks in the presuspend callback. 6815 */ 6816 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6817 if (ret != 0) { 6818 repl = targ; 6819 goto suspend_fail; 6820 } 6821 6822 /* 6823 * Raise the PIL yet again, this time to block all high-level 6824 * interrupts on this CPU. This is necessary to prevent an 6825 * interrupt routine from pinning the thread which holds the 6826 * mapping suspended and then touching the suspended page. 6827 * 6828 * Once the page is suspended we also need to be careful to 6829 * avoid calling any functions which touch any seg_kmem memory 6830 * since that memory may be backed by the very page we are 6831 * relocating in here! 6832 */ 6833 hat_pagesuspend(targ); 6834 6835 /* 6836 * Now that we are confident everybody has stopped using this page, 6837 * copy the page contents. Note we use a physical copy to prevent 6838 * locking issues and to avoid fpRAS because we can't handle it in 6839 * this context. 6840 */ 6841 for (i = 0; i < npages; i++, tpp++, rpp++) { 6842 #ifdef VAC 6843 /* 6844 * If the replacement has a different vcolor than 6845 * the one being replacd, we need to handle VAC 6846 * consistency for it just as we were setting up 6847 * a new mapping to it. 6848 */ 6849 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) && 6850 (tpp->p_vcolor != rpp->p_vcolor) && 6851 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) { 6852 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp)); 6853 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6854 rpp->p_pagenum); 6855 } 6856 #endif 6857 /* 6858 * Copy the contents of the page. 6859 */ 6860 ppcopy_kernel(tpp, rpp); 6861 } 6862 6863 tpp = targ; 6864 rpp = repl; 6865 for (i = 0; i < npages; i++, tpp++, rpp++) { 6866 /* 6867 * Copy attributes. VAC consistency was handled above, 6868 * if required. 6869 */ 6870 rpp->p_nrm = tpp->p_nrm; 6871 tpp->p_nrm = 0; 6872 rpp->p_index = tpp->p_index; 6873 tpp->p_index = 0; 6874 #ifdef VAC 6875 rpp->p_vcolor = tpp->p_vcolor; 6876 #endif 6877 } 6878 6879 /* 6880 * First, unsuspend the page, if we set the suspend bit, and transfer 6881 * the mapping list from the target page to the replacement page. 6882 * Next process postcallbacks; since pa_hment's are linked only to the 6883 * p_mapping list of root page, we don't iterate over the constituent 6884 * pages. 6885 */ 6886 hat_pagereload(targ, repl); 6887 6888 suspend_fail: 6889 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6890 6891 /* 6892 * Now lower our PIL and release any captured CPUs since we 6893 * are out of the "danger zone". After this it will again be 6894 * safe to acquire adaptive mutex locks, or to drop them... 6895 */ 6896 if (old_pil != -1) { 6897 splx(old_pil); 6898 } else { 6899 xc_dismissed(cpuset); 6900 } 6901 6902 kpreempt_enable(); 6903 6904 sfmmu_mlist_reloc_exit(low, high); 6905 6906 /* 6907 * Postsuspend callbacks should drop any locks held across 6908 * the suspend callbacks. As before, we don't hold the mapping 6909 * list lock at this point.. our assumption is that the mapping 6910 * list still can't change due to our holding SE_EXCL lock and 6911 * there being no unlocked mappings left. Hence the restriction 6912 * on calling context to hat_delete_callback() 6913 */ 6914 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6915 if (ret != 0) { 6916 /* 6917 * The second presuspend call failed: we got here through 6918 * the suspend_fail label above. 6919 */ 6920 ASSERT(ret != EIO); 6921 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6922 kreloc_thread = NULL; 6923 mutex_exit(&kpr_mutex); 6924 return (EAGAIN); 6925 } 6926 6927 /* 6928 * Now that we're out of the performance critical section we can 6929 * take care of updating the hash table, since we still 6930 * hold all the pages locked SE_EXCL at this point we 6931 * needn't worry about things changing out from under us. 6932 */ 6933 tpp = targ; 6934 rpp = repl; 6935 for (i = 0; i < npages; i++, tpp++, rpp++) { 6936 6937 /* 6938 * replace targ with replacement in page_hash table 6939 */ 6940 targ = tpp; 6941 page_relocate_hash(rpp, targ); 6942 6943 /* 6944 * concatenate target; caller of platform_page_relocate() 6945 * expects target to be concatenated after returning. 6946 */ 6947 ASSERT(targ->p_next == targ); 6948 ASSERT(targ->p_prev == targ); 6949 page_list_concat(&pl, &targ); 6950 } 6951 6952 ASSERT(*target == pl); 6953 *nrelocp = npages; 6954 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6955 kreloc_thread = NULL; 6956 mutex_exit(&kpr_mutex); 6957 return (0); 6958 } 6959 6960 /* 6961 * Called when stray pa_hments are found attached to a page which is 6962 * being freed. Notify the subsystem which attached the pa_hment of 6963 * the error if it registered a suitable handler, else panic. 6964 */ 6965 static void 6966 sfmmu_pahment_leaked(struct pa_hment *pahmep) 6967 { 6968 id_t cb_id = pahmep->cb_id; 6969 6970 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 6971 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 6972 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 6973 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 6974 return; /* non-fatal */ 6975 } 6976 panic("pa_hment leaked: 0x%p", (void *)pahmep); 6977 } 6978 6979 /* 6980 * Remove all mappings to page 'pp'. 6981 */ 6982 int 6983 hat_pageunload(struct page *pp, uint_t forceflag) 6984 { 6985 struct page *origpp = pp; 6986 struct sf_hment *sfhme, *tmphme; 6987 struct hme_blk *hmeblkp; 6988 kmutex_t *pml; 6989 #ifdef VAC 6990 kmutex_t *pmtx; 6991 #endif 6992 cpuset_t cpuset, tset; 6993 int index, cons; 6994 int xhme_blks; 6995 int pa_hments; 6996 6997 ASSERT(PAGE_EXCL(pp)); 6998 6999 retry_xhat: 7000 tmphme = NULL; 7001 xhme_blks = 0; 7002 pa_hments = 0; 7003 CPUSET_ZERO(cpuset); 7004 7005 pml = sfmmu_mlist_enter(pp); 7006 7007 #ifdef VAC 7008 if (pp->p_kpmref) 7009 sfmmu_kpm_pageunload(pp); 7010 ASSERT(!PP_ISMAPPED_KPM(pp)); 7011 #endif 7012 /* 7013 * Clear vpm reference. Since the page is exclusively locked 7014 * vpm cannot be referencing it. 7015 */ 7016 if (vpm_enable) { 7017 pp->p_vpmref = 0; 7018 } 7019 7020 index = PP_MAPINDEX(pp); 7021 cons = TTE8K; 7022 retry: 7023 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7024 tmphme = sfhme->hme_next; 7025 7026 if (IS_PAHME(sfhme)) { 7027 ASSERT(sfhme->hme_data != NULL); 7028 pa_hments++; 7029 continue; 7030 } 7031 7032 hmeblkp = sfmmu_hmetohblk(sfhme); 7033 if (hmeblkp->hblk_xhat_bit) { 7034 struct xhat_hme_blk *xblk = 7035 (struct xhat_hme_blk *)hmeblkp; 7036 7037 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, 7038 pp, forceflag, XBLK2PROVBLK(xblk)); 7039 7040 xhme_blks = 1; 7041 continue; 7042 } 7043 7044 /* 7045 * If there are kernel mappings don't unload them, they will 7046 * be suspended. 7047 */ 7048 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 7049 hmeblkp->hblk_tag.htag_id == ksfmmup) 7050 continue; 7051 7052 tset = sfmmu_pageunload(pp, sfhme, cons); 7053 CPUSET_OR(cpuset, tset); 7054 } 7055 7056 while (index != 0) { 7057 index = index >> 1; 7058 if (index != 0) 7059 cons++; 7060 if (index & 0x1) { 7061 /* Go to leading page */ 7062 pp = PP_GROUPLEADER(pp, cons); 7063 ASSERT(sfmmu_mlist_held(pp)); 7064 goto retry; 7065 } 7066 } 7067 7068 /* 7069 * cpuset may be empty if the page was only mapped by segkpm, 7070 * in which case we won't actually cross-trap. 7071 */ 7072 xt_sync(cpuset); 7073 7074 /* 7075 * The page should have no mappings at this point, unless 7076 * we were called from hat_page_relocate() in which case we 7077 * leave the locked mappings which will be suspended later. 7078 */ 7079 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || 7080 (forceflag == SFMMU_KERNEL_RELOC)); 7081 7082 #ifdef VAC 7083 if (PP_ISTNC(pp)) { 7084 if (cons == TTE8K) { 7085 pmtx = sfmmu_page_enter(pp); 7086 PP_CLRTNC(pp); 7087 sfmmu_page_exit(pmtx); 7088 } else { 7089 conv_tnc(pp, cons); 7090 } 7091 } 7092 #endif /* VAC */ 7093 7094 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 7095 /* 7096 * Unlink any pa_hments and free them, calling back 7097 * the responsible subsystem to notify it of the error. 7098 * This can occur in situations such as drivers leaking 7099 * DMA handles: naughty, but common enough that we'd like 7100 * to keep the system running rather than bringing it 7101 * down with an obscure error like "pa_hment leaked" 7102 * which doesn't aid the user in debugging their driver. 7103 */ 7104 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7105 tmphme = sfhme->hme_next; 7106 if (IS_PAHME(sfhme)) { 7107 struct pa_hment *pahmep = sfhme->hme_data; 7108 sfmmu_pahment_leaked(pahmep); 7109 HME_SUB(sfhme, pp); 7110 kmem_cache_free(pa_hment_cache, pahmep); 7111 } 7112 } 7113 7114 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); 7115 } 7116 7117 sfmmu_mlist_exit(pml); 7118 7119 /* 7120 * XHAT may not have finished unloading pages 7121 * because some other thread was waiting for 7122 * mlist lock and XHAT_PAGEUNLOAD let it do 7123 * the job. 7124 */ 7125 if (xhme_blks) { 7126 pp = origpp; 7127 goto retry_xhat; 7128 } 7129 7130 return (0); 7131 } 7132 7133 cpuset_t 7134 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 7135 { 7136 struct hme_blk *hmeblkp; 7137 sfmmu_t *sfmmup; 7138 tte_t tte, ttemod; 7139 #ifdef DEBUG 7140 tte_t orig_old; 7141 #endif /* DEBUG */ 7142 caddr_t addr; 7143 int ttesz; 7144 int ret; 7145 cpuset_t cpuset; 7146 7147 ASSERT(pp != NULL); 7148 ASSERT(sfmmu_mlist_held(pp)); 7149 ASSERT(!PP_ISKAS(pp)); 7150 7151 CPUSET_ZERO(cpuset); 7152 7153 hmeblkp = sfmmu_hmetohblk(sfhme); 7154 7155 readtte: 7156 sfmmu_copytte(&sfhme->hme_tte, &tte); 7157 if (TTE_IS_VALID(&tte)) { 7158 sfmmup = hblktosfmmu(hmeblkp); 7159 ttesz = get_hblk_ttesz(hmeblkp); 7160 /* 7161 * Only unload mappings of 'cons' size. 7162 */ 7163 if (ttesz != cons) 7164 return (cpuset); 7165 7166 /* 7167 * Note that we have p_mapping lock, but no hash lock here. 7168 * hblk_unload() has to have both hash lock AND p_mapping 7169 * lock before it tries to modify tte. So, the tte could 7170 * not become invalid in the sfmmu_modifytte_try() below. 7171 */ 7172 ttemod = tte; 7173 #ifdef DEBUG 7174 orig_old = tte; 7175 #endif /* DEBUG */ 7176 7177 TTE_SET_INVALID(&ttemod); 7178 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7179 if (ret < 0) { 7180 #ifdef DEBUG 7181 /* only R/M bits can change. */ 7182 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 7183 #endif /* DEBUG */ 7184 goto readtte; 7185 } 7186 7187 if (ret == 0) { 7188 panic("pageunload: cas failed?"); 7189 } 7190 7191 addr = tte_to_vaddr(hmeblkp, tte); 7192 7193 if (hmeblkp->hblk_shared) { 7194 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7195 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7196 sf_region_t *rgnp; 7197 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7198 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7199 ASSERT(srdp != NULL); 7200 rgnp = srdp->srd_hmergnp[rid]; 7201 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7202 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); 7203 sfmmu_ttesync(NULL, addr, &tte, pp); 7204 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); 7205 atomic_add_long(&rgnp->rgn_ttecnt[ttesz], -1); 7206 } else { 7207 sfmmu_ttesync(sfmmup, addr, &tte, pp); 7208 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -1); 7209 7210 /* 7211 * We need to flush the page from the virtual cache 7212 * in order to prevent a virtual cache alias 7213 * inconsistency. The particular scenario we need 7214 * to worry about is: 7215 * Given: va1 and va2 are two virtual address that 7216 * alias and will map the same physical address. 7217 * 1. mapping exists from va1 to pa and data has 7218 * been read into the cache. 7219 * 2. unload va1. 7220 * 3. load va2 and modify data using va2. 7221 * 4 unload va2. 7222 * 5. load va1 and reference data. Unless we flush 7223 * the data cache when we unload we will get 7224 * stale data. 7225 * This scenario is taken care of by using virtual 7226 * page coloring. 7227 */ 7228 if (sfmmup->sfmmu_ismhat) { 7229 /* 7230 * Flush TSBs, TLBs and caches 7231 * of every process 7232 * sharing this ism segment. 7233 */ 7234 sfmmu_hat_lock_all(); 7235 mutex_enter(&ism_mlist_lock); 7236 kpreempt_disable(); 7237 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 7238 pp->p_pagenum, CACHE_NO_FLUSH); 7239 kpreempt_enable(); 7240 mutex_exit(&ism_mlist_lock); 7241 sfmmu_hat_unlock_all(); 7242 cpuset = cpu_ready_set; 7243 } else { 7244 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7245 cpuset = sfmmup->sfmmu_cpusran; 7246 } 7247 } 7248 7249 /* 7250 * Hme_sub has to run after ttesync() and a_rss update. 7251 * See hblk_unload(). 7252 */ 7253 HME_SUB(sfhme, pp); 7254 membar_stst(); 7255 7256 /* 7257 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 7258 * since pteload may have done a HME_ADD() right after 7259 * we did the HME_SUB() above. Hmecnt is now maintained 7260 * by cas only. no lock guranteed its value. The only 7261 * gurantee we have is the hmecnt should not be less than 7262 * what it should be so the hblk will not be taken away. 7263 * It's also important that we decremented the hmecnt after 7264 * we are done with hmeblkp so that this hmeblk won't be 7265 * stolen. 7266 */ 7267 ASSERT(hmeblkp->hblk_hmecnt > 0); 7268 ASSERT(hmeblkp->hblk_vcnt > 0); 7269 atomic_add_16(&hmeblkp->hblk_vcnt, -1); 7270 atomic_add_16(&hmeblkp->hblk_hmecnt, -1); 7271 /* 7272 * This is bug 4063182. 7273 * XXX: fixme 7274 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 7275 * !hmeblkp->hblk_lckcnt); 7276 */ 7277 } else { 7278 panic("invalid tte? pp %p &tte %p", 7279 (void *)pp, (void *)&tte); 7280 } 7281 7282 return (cpuset); 7283 } 7284 7285 /* 7286 * While relocating a kernel page, this function will move the mappings 7287 * from tpp to dpp and modify any associated data with these mappings. 7288 * It also unsuspends the suspended kernel mapping. 7289 */ 7290 static void 7291 hat_pagereload(struct page *tpp, struct page *dpp) 7292 { 7293 struct sf_hment *sfhme; 7294 tte_t tte, ttemod; 7295 int index, cons; 7296 7297 ASSERT(getpil() == PIL_MAX); 7298 ASSERT(sfmmu_mlist_held(tpp)); 7299 ASSERT(sfmmu_mlist_held(dpp)); 7300 7301 index = PP_MAPINDEX(tpp); 7302 cons = TTE8K; 7303 7304 /* Update real mappings to the page */ 7305 retry: 7306 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 7307 if (IS_PAHME(sfhme)) 7308 continue; 7309 sfmmu_copytte(&sfhme->hme_tte, &tte); 7310 ttemod = tte; 7311 7312 /* 7313 * replace old pfn with new pfn in TTE 7314 */ 7315 PFN_TO_TTE(ttemod, dpp->p_pagenum); 7316 7317 /* 7318 * clear suspend bit 7319 */ 7320 ASSERT(TTE_IS_SUSPEND(&ttemod)); 7321 TTE_CLR_SUSPEND(&ttemod); 7322 7323 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 7324 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 7325 7326 /* 7327 * set hme_page point to new page 7328 */ 7329 sfhme->hme_page = dpp; 7330 } 7331 7332 /* 7333 * move p_mapping list from old page to new page 7334 */ 7335 dpp->p_mapping = tpp->p_mapping; 7336 tpp->p_mapping = NULL; 7337 dpp->p_share = tpp->p_share; 7338 tpp->p_share = 0; 7339 7340 while (index != 0) { 7341 index = index >> 1; 7342 if (index != 0) 7343 cons++; 7344 if (index & 0x1) { 7345 tpp = PP_GROUPLEADER(tpp, cons); 7346 dpp = PP_GROUPLEADER(dpp, cons); 7347 goto retry; 7348 } 7349 } 7350 7351 curthread->t_flag &= ~T_DONTDTRACE; 7352 mutex_exit(&kpr_suspendlock); 7353 } 7354 7355 uint_t 7356 hat_pagesync(struct page *pp, uint_t clearflag) 7357 { 7358 struct sf_hment *sfhme, *tmphme = NULL; 7359 struct hme_blk *hmeblkp; 7360 kmutex_t *pml; 7361 cpuset_t cpuset, tset; 7362 int index, cons; 7363 extern ulong_t po_share; 7364 page_t *save_pp = pp; 7365 int stop_on_sh = 0; 7366 uint_t shcnt; 7367 7368 CPUSET_ZERO(cpuset); 7369 7370 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 7371 return (PP_GENERIC_ATTR(pp)); 7372 } 7373 7374 if ((clearflag & HAT_SYNC_ZERORM) == 0) { 7375 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) { 7376 return (PP_GENERIC_ATTR(pp)); 7377 } 7378 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) { 7379 return (PP_GENERIC_ATTR(pp)); 7380 } 7381 if (clearflag & HAT_SYNC_STOPON_SHARED) { 7382 if (pp->p_share > po_share) { 7383 hat_page_setattr(pp, P_REF); 7384 return (PP_GENERIC_ATTR(pp)); 7385 } 7386 stop_on_sh = 1; 7387 shcnt = 0; 7388 } 7389 } 7390 7391 clearflag &= ~HAT_SYNC_STOPON_SHARED; 7392 pml = sfmmu_mlist_enter(pp); 7393 index = PP_MAPINDEX(pp); 7394 cons = TTE8K; 7395 retry: 7396 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7397 /* 7398 * We need to save the next hment on the list since 7399 * it is possible for pagesync to remove an invalid hment 7400 * from the list. 7401 */ 7402 tmphme = sfhme->hme_next; 7403 if (IS_PAHME(sfhme)) 7404 continue; 7405 /* 7406 * If we are looking for large mappings and this hme doesn't 7407 * reach the range we are seeking, just ignore it. 7408 */ 7409 hmeblkp = sfmmu_hmetohblk(sfhme); 7410 if (hmeblkp->hblk_xhat_bit) 7411 continue; 7412 7413 if (hme_size(sfhme) < cons) 7414 continue; 7415 7416 if (stop_on_sh) { 7417 if (hmeblkp->hblk_shared) { 7418 sf_srd_t *srdp = hblktosrd(hmeblkp); 7419 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7420 sf_region_t *rgnp; 7421 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7422 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7423 ASSERT(srdp != NULL); 7424 rgnp = srdp->srd_hmergnp[rid]; 7425 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 7426 rgnp, rid); 7427 shcnt += rgnp->rgn_refcnt; 7428 } else { 7429 shcnt++; 7430 } 7431 if (shcnt > po_share) { 7432 /* 7433 * tell the pager to spare the page this time 7434 * around. 7435 */ 7436 hat_page_setattr(save_pp, P_REF); 7437 index = 0; 7438 break; 7439 } 7440 } 7441 tset = sfmmu_pagesync(pp, sfhme, 7442 clearflag & ~HAT_SYNC_STOPON_RM); 7443 CPUSET_OR(cpuset, tset); 7444 7445 /* 7446 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 7447 * as the "ref" or "mod" is set or share cnt exceeds po_share. 7448 */ 7449 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 7450 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 7451 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) { 7452 index = 0; 7453 break; 7454 } 7455 } 7456 7457 while (index) { 7458 index = index >> 1; 7459 cons++; 7460 if (index & 0x1) { 7461 /* Go to leading page */ 7462 pp = PP_GROUPLEADER(pp, cons); 7463 goto retry; 7464 } 7465 } 7466 7467 xt_sync(cpuset); 7468 sfmmu_mlist_exit(pml); 7469 return (PP_GENERIC_ATTR(save_pp)); 7470 } 7471 7472 /* 7473 * Get all the hardware dependent attributes for a page struct 7474 */ 7475 static cpuset_t 7476 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 7477 uint_t clearflag) 7478 { 7479 caddr_t addr; 7480 tte_t tte, ttemod; 7481 struct hme_blk *hmeblkp; 7482 int ret; 7483 sfmmu_t *sfmmup; 7484 cpuset_t cpuset; 7485 7486 ASSERT(pp != NULL); 7487 ASSERT(sfmmu_mlist_held(pp)); 7488 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 7489 (clearflag == HAT_SYNC_ZERORM)); 7490 7491 SFMMU_STAT(sf_pagesync); 7492 7493 CPUSET_ZERO(cpuset); 7494 7495 sfmmu_pagesync_retry: 7496 7497 sfmmu_copytte(&sfhme->hme_tte, &tte); 7498 if (TTE_IS_VALID(&tte)) { 7499 hmeblkp = sfmmu_hmetohblk(sfhme); 7500 sfmmup = hblktosfmmu(hmeblkp); 7501 addr = tte_to_vaddr(hmeblkp, tte); 7502 if (clearflag == HAT_SYNC_ZERORM) { 7503 ttemod = tte; 7504 TTE_CLR_RM(&ttemod); 7505 ret = sfmmu_modifytte_try(&tte, &ttemod, 7506 &sfhme->hme_tte); 7507 if (ret < 0) { 7508 /* 7509 * cas failed and the new value is not what 7510 * we want. 7511 */ 7512 goto sfmmu_pagesync_retry; 7513 } 7514 7515 if (ret > 0) { 7516 /* we win the cas */ 7517 if (hmeblkp->hblk_shared) { 7518 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7519 uint_t rid = 7520 hmeblkp->hblk_tag.htag_rid; 7521 sf_region_t *rgnp; 7522 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7523 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7524 ASSERT(srdp != NULL); 7525 rgnp = srdp->srd_hmergnp[rid]; 7526 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7527 srdp, rgnp, rid); 7528 cpuset = sfmmu_rgntlb_demap(addr, 7529 rgnp, hmeblkp, 1); 7530 } else { 7531 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 7532 0, 0); 7533 cpuset = sfmmup->sfmmu_cpusran; 7534 } 7535 } 7536 } 7537 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr, 7538 &tte, pp); 7539 } 7540 return (cpuset); 7541 } 7542 7543 /* 7544 * Remove write permission from a mappings to a page, so that 7545 * we can detect the next modification of it. This requires modifying 7546 * the TTE then invalidating (demap) any TLB entry using that TTE. 7547 * This code is similar to sfmmu_pagesync(). 7548 */ 7549 static cpuset_t 7550 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 7551 { 7552 caddr_t addr; 7553 tte_t tte; 7554 tte_t ttemod; 7555 struct hme_blk *hmeblkp; 7556 int ret; 7557 sfmmu_t *sfmmup; 7558 cpuset_t cpuset; 7559 7560 ASSERT(pp != NULL); 7561 ASSERT(sfmmu_mlist_held(pp)); 7562 7563 CPUSET_ZERO(cpuset); 7564 SFMMU_STAT(sf_clrwrt); 7565 7566 retry: 7567 7568 sfmmu_copytte(&sfhme->hme_tte, &tte); 7569 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 7570 hmeblkp = sfmmu_hmetohblk(sfhme); 7571 7572 /* 7573 * xhat mappings should never be to a VMODSORT page. 7574 */ 7575 ASSERT(hmeblkp->hblk_xhat_bit == 0); 7576 7577 sfmmup = hblktosfmmu(hmeblkp); 7578 addr = tte_to_vaddr(hmeblkp, tte); 7579 7580 ttemod = tte; 7581 TTE_CLR_WRT(&ttemod); 7582 TTE_CLR_MOD(&ttemod); 7583 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7584 7585 /* 7586 * if cas failed and the new value is not what 7587 * we want retry 7588 */ 7589 if (ret < 0) 7590 goto retry; 7591 7592 /* we win the cas */ 7593 if (ret > 0) { 7594 if (hmeblkp->hblk_shared) { 7595 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7596 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7597 sf_region_t *rgnp; 7598 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7599 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7600 ASSERT(srdp != NULL); 7601 rgnp = srdp->srd_hmergnp[rid]; 7602 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7603 srdp, rgnp, rid); 7604 cpuset = sfmmu_rgntlb_demap(addr, 7605 rgnp, hmeblkp, 1); 7606 } else { 7607 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7608 cpuset = sfmmup->sfmmu_cpusran; 7609 } 7610 } 7611 } 7612 7613 return (cpuset); 7614 } 7615 7616 /* 7617 * Walk all mappings of a page, removing write permission and clearing the 7618 * ref/mod bits. This code is similar to hat_pagesync() 7619 */ 7620 static void 7621 hat_page_clrwrt(page_t *pp) 7622 { 7623 struct sf_hment *sfhme; 7624 struct sf_hment *tmphme = NULL; 7625 kmutex_t *pml; 7626 cpuset_t cpuset; 7627 cpuset_t tset; 7628 int index; 7629 int cons; 7630 7631 CPUSET_ZERO(cpuset); 7632 7633 pml = sfmmu_mlist_enter(pp); 7634 index = PP_MAPINDEX(pp); 7635 cons = TTE8K; 7636 retry: 7637 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7638 tmphme = sfhme->hme_next; 7639 7640 /* 7641 * If we are looking for large mappings and this hme doesn't 7642 * reach the range we are seeking, just ignore its. 7643 */ 7644 7645 if (hme_size(sfhme) < cons) 7646 continue; 7647 7648 tset = sfmmu_pageclrwrt(pp, sfhme); 7649 CPUSET_OR(cpuset, tset); 7650 } 7651 7652 while (index) { 7653 index = index >> 1; 7654 cons++; 7655 if (index & 0x1) { 7656 /* Go to leading page */ 7657 pp = PP_GROUPLEADER(pp, cons); 7658 goto retry; 7659 } 7660 } 7661 7662 xt_sync(cpuset); 7663 sfmmu_mlist_exit(pml); 7664 } 7665 7666 /* 7667 * Set the given REF/MOD/RO bits for the given page. 7668 * For a vnode with a sorted v_pages list, we need to change 7669 * the attributes and the v_pages list together under page_vnode_mutex. 7670 */ 7671 void 7672 hat_page_setattr(page_t *pp, uint_t flag) 7673 { 7674 vnode_t *vp = pp->p_vnode; 7675 page_t **listp; 7676 kmutex_t *pmtx; 7677 kmutex_t *vphm = NULL; 7678 int noshuffle; 7679 7680 noshuffle = flag & P_NSH; 7681 flag &= ~P_NSH; 7682 7683 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7684 7685 /* 7686 * nothing to do if attribute already set 7687 */ 7688 if ((pp->p_nrm & flag) == flag) 7689 return; 7690 7691 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7692 !noshuffle) { 7693 vphm = page_vnode_mutex(vp); 7694 mutex_enter(vphm); 7695 } 7696 7697 pmtx = sfmmu_page_enter(pp); 7698 pp->p_nrm |= flag; 7699 sfmmu_page_exit(pmtx); 7700 7701 if (vphm != NULL) { 7702 /* 7703 * Some File Systems examine v_pages for NULL w/o 7704 * grabbing the vphm mutex. Must not let it become NULL when 7705 * pp is the only page on the list. 7706 */ 7707 if (pp->p_vpnext != pp) { 7708 page_vpsub(&vp->v_pages, pp); 7709 if (vp->v_pages != NULL) 7710 listp = &vp->v_pages->p_vpprev->p_vpnext; 7711 else 7712 listp = &vp->v_pages; 7713 page_vpadd(listp, pp); 7714 } 7715 mutex_exit(vphm); 7716 } 7717 } 7718 7719 void 7720 hat_page_clrattr(page_t *pp, uint_t flag) 7721 { 7722 vnode_t *vp = pp->p_vnode; 7723 kmutex_t *pmtx; 7724 7725 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7726 7727 pmtx = sfmmu_page_enter(pp); 7728 7729 /* 7730 * Caller is expected to hold page's io lock for VMODSORT to work 7731 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7732 * bit is cleared. 7733 * We don't have assert to avoid tripping some existing third party 7734 * code. The dirty page is moved back to top of the v_page list 7735 * after IO is done in pvn_write_done(). 7736 */ 7737 pp->p_nrm &= ~flag; 7738 sfmmu_page_exit(pmtx); 7739 7740 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7741 7742 /* 7743 * VMODSORT works by removing write permissions and getting 7744 * a fault when a page is made dirty. At this point 7745 * we need to remove write permission from all mappings 7746 * to this page. 7747 */ 7748 hat_page_clrwrt(pp); 7749 } 7750 } 7751 7752 uint_t 7753 hat_page_getattr(page_t *pp, uint_t flag) 7754 { 7755 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7756 return ((uint_t)(pp->p_nrm & flag)); 7757 } 7758 7759 /* 7760 * DEBUG kernels: verify that a kernel va<->pa translation 7761 * is safe by checking the underlying page_t is in a page 7762 * relocation-safe state. 7763 */ 7764 #ifdef DEBUG 7765 void 7766 sfmmu_check_kpfn(pfn_t pfn) 7767 { 7768 page_t *pp; 7769 int index, cons; 7770 7771 if (hat_check_vtop == 0) 7772 return; 7773 7774 if (kvseg.s_base == NULL || panicstr) 7775 return; 7776 7777 pp = page_numtopp_nolock(pfn); 7778 if (!pp) 7779 return; 7780 7781 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7782 return; 7783 7784 /* 7785 * Handed a large kernel page, we dig up the root page since we 7786 * know the root page might have the lock also. 7787 */ 7788 if (pp->p_szc != 0) { 7789 index = PP_MAPINDEX(pp); 7790 cons = TTE8K; 7791 again: 7792 while (index != 0) { 7793 index >>= 1; 7794 if (index != 0) 7795 cons++; 7796 if (index & 0x1) { 7797 pp = PP_GROUPLEADER(pp, cons); 7798 goto again; 7799 } 7800 } 7801 } 7802 7803 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7804 return; 7805 7806 /* 7807 * Pages need to be locked or allocated "permanent" (either from 7808 * static_arena arena or explicitly setting PG_NORELOC when calling 7809 * page_create_va()) for VA->PA translations to be valid. 7810 */ 7811 if (!PP_ISNORELOC(pp)) 7812 panic("Illegal VA->PA translation, pp 0x%p not permanent", 7813 (void *)pp); 7814 else 7815 panic("Illegal VA->PA translation, pp 0x%p not locked", 7816 (void *)pp); 7817 } 7818 #endif /* DEBUG */ 7819 7820 /* 7821 * Returns a page frame number for a given virtual address. 7822 * Returns PFN_INVALID to indicate an invalid mapping 7823 */ 7824 pfn_t 7825 hat_getpfnum(struct hat *hat, caddr_t addr) 7826 { 7827 pfn_t pfn; 7828 tte_t tte; 7829 7830 /* 7831 * We would like to 7832 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7833 * but we can't because the iommu driver will call this 7834 * routine at interrupt time and it can't grab the as lock 7835 * or it will deadlock: A thread could have the as lock 7836 * and be waiting for io. The io can't complete 7837 * because the interrupt thread is blocked trying to grab 7838 * the as lock. 7839 */ 7840 7841 ASSERT(hat->sfmmu_xhat_provider == NULL); 7842 7843 if (hat == ksfmmup) { 7844 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7845 ASSERT(segkmem_lpszc > 0); 7846 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7847 if (pfn != PFN_INVALID) { 7848 sfmmu_check_kpfn(pfn); 7849 return (pfn); 7850 } 7851 } else if (segkpm && IS_KPM_ADDR(addr)) { 7852 return (sfmmu_kpm_vatopfn(addr)); 7853 } 7854 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7855 == PFN_SUSPENDED) { 7856 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7857 } 7858 sfmmu_check_kpfn(pfn); 7859 return (pfn); 7860 } else { 7861 return (sfmmu_uvatopfn(addr, hat, NULL)); 7862 } 7863 } 7864 7865 /* 7866 * This routine will return both pfn and tte for the vaddr. 7867 */ 7868 static pfn_t 7869 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep) 7870 { 7871 struct hmehash_bucket *hmebp; 7872 hmeblk_tag hblktag; 7873 int hmeshift, hashno = 1; 7874 struct hme_blk *hmeblkp = NULL; 7875 tte_t tte; 7876 7877 struct sf_hment *sfhmep; 7878 pfn_t pfn; 7879 7880 /* support for ISM */ 7881 ism_map_t *ism_map; 7882 ism_blk_t *ism_blkp; 7883 int i; 7884 sfmmu_t *ism_hatid = NULL; 7885 sfmmu_t *locked_hatid = NULL; 7886 sfmmu_t *sv_sfmmup = sfmmup; 7887 caddr_t sv_vaddr = vaddr; 7888 sf_srd_t *srdp; 7889 7890 if (ttep == NULL) { 7891 ttep = &tte; 7892 } else { 7893 ttep->ll = 0; 7894 } 7895 7896 ASSERT(sfmmup != ksfmmup); 7897 SFMMU_STAT(sf_user_vtop); 7898 /* 7899 * Set ism_hatid if vaddr falls in a ISM segment. 7900 */ 7901 ism_blkp = sfmmup->sfmmu_iblk; 7902 if (ism_blkp != NULL) { 7903 sfmmu_ismhat_enter(sfmmup, 0); 7904 locked_hatid = sfmmup; 7905 } 7906 while (ism_blkp != NULL && ism_hatid == NULL) { 7907 ism_map = ism_blkp->iblk_maps; 7908 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7909 if (vaddr >= ism_start(ism_map[i]) && 7910 vaddr < ism_end(ism_map[i])) { 7911 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7912 vaddr = (caddr_t)(vaddr - 7913 ism_start(ism_map[i])); 7914 break; 7915 } 7916 } 7917 ism_blkp = ism_blkp->iblk_next; 7918 } 7919 if (locked_hatid) { 7920 sfmmu_ismhat_exit(locked_hatid, 0); 7921 } 7922 7923 hblktag.htag_id = sfmmup; 7924 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 7925 do { 7926 hmeshift = HME_HASH_SHIFT(hashno); 7927 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7928 hblktag.htag_rehash = hashno; 7929 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7930 7931 SFMMU_HASH_LOCK(hmebp); 7932 7933 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7934 if (hmeblkp != NULL) { 7935 ASSERT(!hmeblkp->hblk_shared); 7936 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7937 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7938 SFMMU_HASH_UNLOCK(hmebp); 7939 if (TTE_IS_VALID(ttep)) { 7940 pfn = TTE_TO_PFN(vaddr, ttep); 7941 return (pfn); 7942 } 7943 break; 7944 } 7945 SFMMU_HASH_UNLOCK(hmebp); 7946 hashno++; 7947 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7948 7949 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) { 7950 return (PFN_INVALID); 7951 } 7952 srdp = sv_sfmmup->sfmmu_srdp; 7953 ASSERT(srdp != NULL); 7954 ASSERT(srdp->srd_refcnt != 0); 7955 hblktag.htag_id = srdp; 7956 hashno = 1; 7957 do { 7958 hmeshift = HME_HASH_SHIFT(hashno); 7959 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift); 7960 hblktag.htag_rehash = hashno; 7961 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift); 7962 7963 SFMMU_HASH_LOCK(hmebp); 7964 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL; 7965 hmeblkp = hmeblkp->hblk_next) { 7966 uint_t rid; 7967 sf_region_t *rgnp; 7968 caddr_t rsaddr; 7969 caddr_t readdr; 7970 7971 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag, 7972 sv_sfmmup->sfmmu_hmeregion_map)) { 7973 continue; 7974 } 7975 ASSERT(hmeblkp->hblk_shared); 7976 rid = hmeblkp->hblk_tag.htag_rid; 7977 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7978 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7979 rgnp = srdp->srd_hmergnp[rid]; 7980 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7981 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr); 7982 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7983 rsaddr = rgnp->rgn_saddr; 7984 readdr = rsaddr + rgnp->rgn_size; 7985 #ifdef DEBUG 7986 if (TTE_IS_VALID(ttep) || 7987 get_hblk_ttesz(hmeblkp) > TTE8K) { 7988 caddr_t eva = tte_to_evaddr(hmeblkp, ttep); 7989 ASSERT(eva > sv_vaddr); 7990 ASSERT(sv_vaddr >= rsaddr); 7991 ASSERT(sv_vaddr < readdr); 7992 ASSERT(eva <= readdr); 7993 } 7994 #endif /* DEBUG */ 7995 /* 7996 * Continue the search if we 7997 * found an invalid 8K tte outside of the area 7998 * covered by this hmeblk's region. 7999 */ 8000 if (TTE_IS_VALID(ttep)) { 8001 SFMMU_HASH_UNLOCK(hmebp); 8002 pfn = TTE_TO_PFN(sv_vaddr, ttep); 8003 return (pfn); 8004 } else if (get_hblk_ttesz(hmeblkp) > TTE8K || 8005 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) { 8006 SFMMU_HASH_UNLOCK(hmebp); 8007 pfn = PFN_INVALID; 8008 return (pfn); 8009 } 8010 } 8011 SFMMU_HASH_UNLOCK(hmebp); 8012 hashno++; 8013 } while (hashno <= mmu_hashcnt); 8014 return (PFN_INVALID); 8015 } 8016 8017 8018 /* 8019 * For compatability with AT&T and later optimizations 8020 */ 8021 /* ARGSUSED */ 8022 void 8023 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 8024 { 8025 ASSERT(hat != NULL); 8026 ASSERT(hat->sfmmu_xhat_provider == NULL); 8027 } 8028 8029 /* 8030 * Return the number of mappings to a particular page. This number is an 8031 * approximation of the number of people sharing the page. 8032 * 8033 * shared hmeblks or ism hmeblks are counted as 1 mapping here. 8034 * hat_page_checkshare() can be used to compare threshold to share 8035 * count that reflects the number of region sharers albeit at higher cost. 8036 */ 8037 ulong_t 8038 hat_page_getshare(page_t *pp) 8039 { 8040 page_t *spp = pp; /* start page */ 8041 kmutex_t *pml; 8042 ulong_t cnt; 8043 int index, sz = TTE64K; 8044 8045 /* 8046 * We need to grab the mlist lock to make sure any outstanding 8047 * load/unloads complete. Otherwise we could return zero 8048 * even though the unload(s) hasn't finished yet. 8049 */ 8050 pml = sfmmu_mlist_enter(spp); 8051 cnt = spp->p_share; 8052 8053 #ifdef VAC 8054 if (kpm_enable) 8055 cnt += spp->p_kpmref; 8056 #endif 8057 if (vpm_enable && pp->p_vpmref) { 8058 cnt += 1; 8059 } 8060 8061 /* 8062 * If we have any large mappings, we count the number of 8063 * mappings that this large page is part of. 8064 */ 8065 index = PP_MAPINDEX(spp); 8066 index >>= 1; 8067 while (index) { 8068 pp = PP_GROUPLEADER(spp, sz); 8069 if ((index & 0x1) && pp != spp) { 8070 cnt += pp->p_share; 8071 spp = pp; 8072 } 8073 index >>= 1; 8074 sz++; 8075 } 8076 sfmmu_mlist_exit(pml); 8077 return (cnt); 8078 } 8079 8080 /* 8081 * Return 1 if the number of mappings exceeds sh_thresh. Return 0 8082 * otherwise. Count shared hmeblks by region's refcnt. 8083 */ 8084 int 8085 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 8086 { 8087 kmutex_t *pml; 8088 ulong_t cnt = 0; 8089 int index, sz = TTE8K; 8090 struct sf_hment *sfhme, *tmphme = NULL; 8091 struct hme_blk *hmeblkp; 8092 8093 pml = sfmmu_mlist_enter(pp); 8094 8095 #ifdef VAC 8096 if (kpm_enable) 8097 cnt = pp->p_kpmref; 8098 #endif 8099 8100 if (vpm_enable && pp->p_vpmref) { 8101 cnt += 1; 8102 } 8103 8104 if (pp->p_share + cnt > sh_thresh) { 8105 sfmmu_mlist_exit(pml); 8106 return (1); 8107 } 8108 8109 index = PP_MAPINDEX(pp); 8110 8111 again: 8112 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 8113 tmphme = sfhme->hme_next; 8114 if (IS_PAHME(sfhme)) { 8115 continue; 8116 } 8117 8118 hmeblkp = sfmmu_hmetohblk(sfhme); 8119 if (hmeblkp->hblk_xhat_bit) { 8120 cnt++; 8121 if (cnt > sh_thresh) { 8122 sfmmu_mlist_exit(pml); 8123 return (1); 8124 } 8125 continue; 8126 } 8127 if (hme_size(sfhme) != sz) { 8128 continue; 8129 } 8130 8131 if (hmeblkp->hblk_shared) { 8132 sf_srd_t *srdp = hblktosrd(hmeblkp); 8133 uint_t rid = hmeblkp->hblk_tag.htag_rid; 8134 sf_region_t *rgnp; 8135 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8136 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8137 ASSERT(srdp != NULL); 8138 rgnp = srdp->srd_hmergnp[rid]; 8139 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 8140 rgnp, rid); 8141 cnt += rgnp->rgn_refcnt; 8142 } else { 8143 cnt++; 8144 } 8145 if (cnt > sh_thresh) { 8146 sfmmu_mlist_exit(pml); 8147 return (1); 8148 } 8149 } 8150 8151 index >>= 1; 8152 sz++; 8153 while (index) { 8154 pp = PP_GROUPLEADER(pp, sz); 8155 ASSERT(sfmmu_mlist_held(pp)); 8156 if (index & 0x1) { 8157 goto again; 8158 } 8159 index >>= 1; 8160 sz++; 8161 } 8162 sfmmu_mlist_exit(pml); 8163 return (0); 8164 } 8165 8166 /* 8167 * Unload all large mappings to the pp and reset the p_szc field of every 8168 * constituent page according to the remaining mappings. 8169 * 8170 * pp must be locked SE_EXCL. Even though no other constituent pages are 8171 * locked it's legal to unload the large mappings to the pp because all 8172 * constituent pages of large locked mappings have to be locked SE_SHARED. 8173 * This means if we have SE_EXCL lock on one of constituent pages none of the 8174 * large mappings to pp are locked. 8175 * 8176 * Decrease p_szc field starting from the last constituent page and ending 8177 * with the root page. This method is used because other threads rely on the 8178 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 8179 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 8180 * ensures that p_szc changes of the constituent pages appears atomic for all 8181 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 8182 * 8183 * This mechanism is only used for file system pages where it's not always 8184 * possible to get SE_EXCL locks on all constituent pages to demote the size 8185 * code (as is done for anonymous or kernel large pages). 8186 * 8187 * See more comments in front of sfmmu_mlspl_enter(). 8188 */ 8189 void 8190 hat_page_demote(page_t *pp) 8191 { 8192 int index; 8193 int sz; 8194 cpuset_t cpuset; 8195 int sync = 0; 8196 page_t *rootpp; 8197 struct sf_hment *sfhme; 8198 struct sf_hment *tmphme = NULL; 8199 struct hme_blk *hmeblkp; 8200 uint_t pszc; 8201 page_t *lastpp; 8202 cpuset_t tset; 8203 pgcnt_t npgs; 8204 kmutex_t *pml; 8205 kmutex_t *pmtx = NULL; 8206 8207 ASSERT(PAGE_EXCL(pp)); 8208 ASSERT(!PP_ISFREE(pp)); 8209 ASSERT(!PP_ISKAS(pp)); 8210 ASSERT(page_szc_lock_assert(pp)); 8211 pml = sfmmu_mlist_enter(pp); 8212 8213 pszc = pp->p_szc; 8214 if (pszc == 0) { 8215 goto out; 8216 } 8217 8218 index = PP_MAPINDEX(pp) >> 1; 8219 8220 if (index) { 8221 CPUSET_ZERO(cpuset); 8222 sz = TTE64K; 8223 sync = 1; 8224 } 8225 8226 while (index) { 8227 if (!(index & 0x1)) { 8228 index >>= 1; 8229 sz++; 8230 continue; 8231 } 8232 ASSERT(sz <= pszc); 8233 rootpp = PP_GROUPLEADER(pp, sz); 8234 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 8235 tmphme = sfhme->hme_next; 8236 ASSERT(!IS_PAHME(sfhme)); 8237 hmeblkp = sfmmu_hmetohblk(sfhme); 8238 if (hme_size(sfhme) != sz) { 8239 continue; 8240 } 8241 if (hmeblkp->hblk_xhat_bit) { 8242 cmn_err(CE_PANIC, 8243 "hat_page_demote: xhat hmeblk"); 8244 } 8245 tset = sfmmu_pageunload(rootpp, sfhme, sz); 8246 CPUSET_OR(cpuset, tset); 8247 } 8248 if (index >>= 1) { 8249 sz++; 8250 } 8251 } 8252 8253 ASSERT(!PP_ISMAPPED_LARGE(pp)); 8254 8255 if (sync) { 8256 xt_sync(cpuset); 8257 #ifdef VAC 8258 if (PP_ISTNC(pp)) { 8259 conv_tnc(rootpp, sz); 8260 } 8261 #endif /* VAC */ 8262 } 8263 8264 pmtx = sfmmu_page_enter(pp); 8265 8266 ASSERT(pp->p_szc == pszc); 8267 rootpp = PP_PAGEROOT(pp); 8268 ASSERT(rootpp->p_szc == pszc); 8269 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 8270 8271 while (lastpp != rootpp) { 8272 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 8273 ASSERT(sz < pszc); 8274 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 8275 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 8276 while (--npgs > 0) { 8277 lastpp->p_szc = (uchar_t)sz; 8278 lastpp = PP_PAGEPREV(lastpp); 8279 } 8280 if (sz) { 8281 /* 8282 * make sure before current root's pszc 8283 * is updated all updates to constituent pages pszc 8284 * fields are globally visible. 8285 */ 8286 membar_producer(); 8287 } 8288 lastpp->p_szc = sz; 8289 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 8290 if (lastpp != rootpp) { 8291 lastpp = PP_PAGEPREV(lastpp); 8292 } 8293 } 8294 if (sz == 0) { 8295 /* the loop above doesn't cover this case */ 8296 rootpp->p_szc = 0; 8297 } 8298 out: 8299 ASSERT(pp->p_szc == 0); 8300 if (pmtx != NULL) { 8301 sfmmu_page_exit(pmtx); 8302 } 8303 sfmmu_mlist_exit(pml); 8304 } 8305 8306 /* 8307 * Refresh the HAT ismttecnt[] element for size szc. 8308 * Caller must have set ISM busy flag to prevent mapping 8309 * lists from changing while we're traversing them. 8310 */ 8311 pgcnt_t 8312 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 8313 { 8314 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 8315 ism_map_t *ism_map; 8316 pgcnt_t npgs = 0; 8317 pgcnt_t npgs_scd = 0; 8318 int j; 8319 sf_scd_t *scdp; 8320 uchar_t rid; 8321 8322 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 8323 scdp = sfmmup->sfmmu_scdp; 8324 8325 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 8326 ism_map = ism_blkp->iblk_maps; 8327 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) { 8328 rid = ism_map[j].imap_rid; 8329 ASSERT(rid == SFMMU_INVALID_ISMRID || 8330 rid < sfmmup->sfmmu_srdp->srd_next_ismrid); 8331 8332 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID && 8333 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 8334 /* ISM is in sfmmup's SCD */ 8335 npgs_scd += 8336 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8337 } else { 8338 /* ISMs is not in SCD */ 8339 npgs += 8340 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8341 } 8342 } 8343 } 8344 sfmmup->sfmmu_ismttecnt[szc] = npgs; 8345 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; 8346 return (npgs); 8347 } 8348 8349 /* 8350 * Yield the memory claim requirement for an address space. 8351 * 8352 * This is currently implemented as the number of bytes that have active 8353 * hardware translations that have page structures. Therefore, it can 8354 * underestimate the traditional resident set size, eg, if the 8355 * physical page is present and the hardware translation is missing; 8356 * and it can overestimate the rss, eg, if there are active 8357 * translations to a frame buffer with page structs. 8358 * Also, it does not take sharing into account. 8359 * 8360 * Note that we don't acquire locks here since this function is most often 8361 * called from the clock thread. 8362 */ 8363 size_t 8364 hat_get_mapped_size(struct hat *hat) 8365 { 8366 size_t assize = 0; 8367 int i; 8368 8369 if (hat == NULL) 8370 return (0); 8371 8372 ASSERT(hat->sfmmu_xhat_provider == NULL); 8373 8374 for (i = 0; i < mmu_page_sizes; i++) 8375 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + 8376 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); 8377 8378 if (hat->sfmmu_iblk == NULL) 8379 return (assize); 8380 8381 for (i = 0; i < mmu_page_sizes; i++) 8382 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] + 8383 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i); 8384 8385 return (assize); 8386 } 8387 8388 int 8389 hat_stats_enable(struct hat *hat) 8390 { 8391 hatlock_t *hatlockp; 8392 8393 ASSERT(hat->sfmmu_xhat_provider == NULL); 8394 8395 hatlockp = sfmmu_hat_enter(hat); 8396 hat->sfmmu_rmstat++; 8397 sfmmu_hat_exit(hatlockp); 8398 return (1); 8399 } 8400 8401 void 8402 hat_stats_disable(struct hat *hat) 8403 { 8404 hatlock_t *hatlockp; 8405 8406 ASSERT(hat->sfmmu_xhat_provider == NULL); 8407 8408 hatlockp = sfmmu_hat_enter(hat); 8409 hat->sfmmu_rmstat--; 8410 sfmmu_hat_exit(hatlockp); 8411 } 8412 8413 /* 8414 * Routines for entering or removing ourselves from the 8415 * ism_hat's mapping list. This is used for both private and 8416 * SCD hats. 8417 */ 8418 static void 8419 iment_add(struct ism_ment *iment, struct hat *ism_hat) 8420 { 8421 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8422 8423 iment->iment_prev = NULL; 8424 iment->iment_next = ism_hat->sfmmu_iment; 8425 if (ism_hat->sfmmu_iment) { 8426 ism_hat->sfmmu_iment->iment_prev = iment; 8427 } 8428 ism_hat->sfmmu_iment = iment; 8429 } 8430 8431 static void 8432 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 8433 { 8434 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8435 8436 if (ism_hat->sfmmu_iment == NULL) { 8437 panic("ism map entry remove - no entries"); 8438 } 8439 8440 if (iment->iment_prev) { 8441 ASSERT(ism_hat->sfmmu_iment != iment); 8442 iment->iment_prev->iment_next = iment->iment_next; 8443 } else { 8444 ASSERT(ism_hat->sfmmu_iment == iment); 8445 ism_hat->sfmmu_iment = iment->iment_next; 8446 } 8447 8448 if (iment->iment_next) { 8449 iment->iment_next->iment_prev = iment->iment_prev; 8450 } 8451 8452 /* 8453 * zero out the entry 8454 */ 8455 iment->iment_next = NULL; 8456 iment->iment_prev = NULL; 8457 iment->iment_hat = NULL; 8458 iment->iment_base_va = 0; 8459 } 8460 8461 /* 8462 * Hat_share()/unshare() return an (non-zero) error 8463 * when saddr and daddr are not properly aligned. 8464 * 8465 * The top level mapping element determines the alignment 8466 * requirement for saddr and daddr, depending on different 8467 * architectures. 8468 * 8469 * When hat_share()/unshare() are not supported, 8470 * HATOP_SHARE()/UNSHARE() return 0 8471 */ 8472 int 8473 hat_share(struct hat *sfmmup, caddr_t addr, 8474 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 8475 { 8476 ism_blk_t *ism_blkp; 8477 ism_blk_t *new_iblk; 8478 ism_map_t *ism_map; 8479 ism_ment_t *ism_ment; 8480 int i, added; 8481 hatlock_t *hatlockp; 8482 int reload_mmu = 0; 8483 uint_t ismshift = page_get_shift(ismszc); 8484 size_t ismpgsz = page_get_pagesize(ismszc); 8485 uint_t ismmask = (uint_t)ismpgsz - 1; 8486 size_t sh_size = ISM_SHIFT(ismshift, len); 8487 ushort_t ismhatflag; 8488 hat_region_cookie_t rcookie; 8489 sf_scd_t *old_scdp; 8490 8491 #ifdef DEBUG 8492 caddr_t eaddr = addr + len; 8493 #endif /* DEBUG */ 8494 8495 ASSERT(ism_hatid != NULL && sfmmup != NULL); 8496 ASSERT(sptaddr == ISMID_STARTADDR); 8497 /* 8498 * Check the alignment. 8499 */ 8500 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 8501 return (EINVAL); 8502 8503 /* 8504 * Check size alignment. 8505 */ 8506 if (!ISM_ALIGNED(ismshift, len)) 8507 return (EINVAL); 8508 8509 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 8510 8511 /* 8512 * Allocate ism_ment for the ism_hat's mapping list, and an 8513 * ism map blk in case we need one. We must do our 8514 * allocations before acquiring locks to prevent a deadlock 8515 * in the kmem allocator on the mapping list lock. 8516 */ 8517 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 8518 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 8519 8520 /* 8521 * Serialize ISM mappings with the ISM busy flag, and also the 8522 * trap handlers. 8523 */ 8524 sfmmu_ismhat_enter(sfmmup, 0); 8525 8526 /* 8527 * Allocate an ism map blk if necessary. 8528 */ 8529 if (sfmmup->sfmmu_iblk == NULL) { 8530 sfmmup->sfmmu_iblk = new_iblk; 8531 bzero(new_iblk, sizeof (*new_iblk)); 8532 new_iblk->iblk_nextpa = (uint64_t)-1; 8533 membar_stst(); /* make sure next ptr visible to all CPUs */ 8534 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 8535 reload_mmu = 1; 8536 new_iblk = NULL; 8537 } 8538 8539 #ifdef DEBUG 8540 /* 8541 * Make sure mapping does not already exist. 8542 */ 8543 ism_blkp = sfmmup->sfmmu_iblk; 8544 while (ism_blkp != NULL) { 8545 ism_map = ism_blkp->iblk_maps; 8546 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 8547 if ((addr >= ism_start(ism_map[i]) && 8548 addr < ism_end(ism_map[i])) || 8549 eaddr > ism_start(ism_map[i]) && 8550 eaddr <= ism_end(ism_map[i])) { 8551 panic("sfmmu_share: Already mapped!"); 8552 } 8553 } 8554 ism_blkp = ism_blkp->iblk_next; 8555 } 8556 #endif /* DEBUG */ 8557 8558 ASSERT(ismszc >= TTE4M); 8559 if (ismszc == TTE4M) { 8560 ismhatflag = HAT_4M_FLAG; 8561 } else if (ismszc == TTE32M) { 8562 ismhatflag = HAT_32M_FLAG; 8563 } else if (ismszc == TTE256M) { 8564 ismhatflag = HAT_256M_FLAG; 8565 } 8566 /* 8567 * Add mapping to first available mapping slot. 8568 */ 8569 ism_blkp = sfmmup->sfmmu_iblk; 8570 added = 0; 8571 while (!added) { 8572 ism_map = ism_blkp->iblk_maps; 8573 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8574 if (ism_map[i].imap_ismhat == NULL) { 8575 8576 ism_map[i].imap_ismhat = ism_hatid; 8577 ism_map[i].imap_vb_shift = (uchar_t)ismshift; 8578 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8579 ism_map[i].imap_hatflags = ismhatflag; 8580 ism_map[i].imap_sz_mask = ismmask; 8581 /* 8582 * imap_seg is checked in ISM_CHECK to see if 8583 * non-NULL, then other info assumed valid. 8584 */ 8585 membar_stst(); 8586 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 8587 ism_map[i].imap_ment = ism_ment; 8588 8589 /* 8590 * Now add ourselves to the ism_hat's 8591 * mapping list. 8592 */ 8593 ism_ment->iment_hat = sfmmup; 8594 ism_ment->iment_base_va = addr; 8595 ism_hatid->sfmmu_ismhat = 1; 8596 mutex_enter(&ism_mlist_lock); 8597 iment_add(ism_ment, ism_hatid); 8598 mutex_exit(&ism_mlist_lock); 8599 added = 1; 8600 break; 8601 } 8602 } 8603 if (!added && ism_blkp->iblk_next == NULL) { 8604 ism_blkp->iblk_next = new_iblk; 8605 new_iblk = NULL; 8606 bzero(ism_blkp->iblk_next, 8607 sizeof (*ism_blkp->iblk_next)); 8608 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 8609 membar_stst(); 8610 ism_blkp->iblk_nextpa = 8611 va_to_pa((caddr_t)ism_blkp->iblk_next); 8612 } 8613 ism_blkp = ism_blkp->iblk_next; 8614 } 8615 8616 /* 8617 * After calling hat_join_region, sfmmup may join a new SCD or 8618 * move from the old scd to a new scd, in which case, we want to 8619 * shrink the sfmmup's private tsb size, i.e., pass shrink to 8620 * sfmmu_check_page_sizes at the end of this routine. 8621 */ 8622 old_scdp = sfmmup->sfmmu_scdp; 8623 8624 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0, 8625 PROT_ALL, ismszc, NULL, HAT_REGION_ISM); 8626 if (rcookie != HAT_INVALID_REGION_COOKIE) { 8627 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie); 8628 } 8629 /* 8630 * Update our counters for this sfmmup's ism mappings. 8631 */ 8632 for (i = 0; i <= ismszc; i++) { 8633 if (!(disable_ism_large_pages & (1 << i))) 8634 (void) ism_tsb_entries(sfmmup, i); 8635 } 8636 8637 /* 8638 * For ISM and DISM we do not support 512K pages, so we only only 8639 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the 8640 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 8641 * 8642 * Need to set 32M/256M ISM flags to make sure 8643 * sfmmu_check_page_sizes() enables them on Panther. 8644 */ 8645 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 8646 8647 switch (ismszc) { 8648 case TTE256M: 8649 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) { 8650 hatlockp = sfmmu_hat_enter(sfmmup); 8651 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM); 8652 sfmmu_hat_exit(hatlockp); 8653 } 8654 break; 8655 case TTE32M: 8656 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) { 8657 hatlockp = sfmmu_hat_enter(sfmmup); 8658 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM); 8659 sfmmu_hat_exit(hatlockp); 8660 } 8661 break; 8662 default: 8663 break; 8664 } 8665 8666 /* 8667 * If we updated the ismblkpa for this HAT we must make 8668 * sure all CPUs running this process reload their tsbmiss area. 8669 * Otherwise they will fail to load the mappings in the tsbmiss 8670 * handler and will loop calling pagefault(). 8671 */ 8672 if (reload_mmu) { 8673 hatlockp = sfmmu_hat_enter(sfmmup); 8674 sfmmu_sync_mmustate(sfmmup); 8675 sfmmu_hat_exit(hatlockp); 8676 } 8677 8678 sfmmu_ismhat_exit(sfmmup, 0); 8679 8680 /* 8681 * Free up ismblk if we didn't use it. 8682 */ 8683 if (new_iblk != NULL) 8684 kmem_cache_free(ism_blk_cache, new_iblk); 8685 8686 /* 8687 * Check TSB and TLB page sizes. 8688 */ 8689 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) { 8690 sfmmu_check_page_sizes(sfmmup, 0); 8691 } else { 8692 sfmmu_check_page_sizes(sfmmup, 1); 8693 } 8694 return (0); 8695 } 8696 8697 /* 8698 * hat_unshare removes exactly one ism_map from 8699 * this process's as. It expects multiple calls 8700 * to hat_unshare for multiple shm segments. 8701 */ 8702 void 8703 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 8704 { 8705 ism_map_t *ism_map; 8706 ism_ment_t *free_ment = NULL; 8707 ism_blk_t *ism_blkp; 8708 struct hat *ism_hatid; 8709 int found, i; 8710 hatlock_t *hatlockp; 8711 struct tsb_info *tsbinfo; 8712 uint_t ismshift = page_get_shift(ismszc); 8713 size_t sh_size = ISM_SHIFT(ismshift, len); 8714 uchar_t ism_rid; 8715 sf_scd_t *old_scdp; 8716 8717 ASSERT(ISM_ALIGNED(ismshift, addr)); 8718 ASSERT(ISM_ALIGNED(ismshift, len)); 8719 ASSERT(sfmmup != NULL); 8720 ASSERT(sfmmup != ksfmmup); 8721 8722 if (sfmmup->sfmmu_xhat_provider) { 8723 XHAT_UNSHARE(sfmmup, addr, len); 8724 return; 8725 } else { 8726 /* 8727 * This must be a CPU HAT. If the address space has 8728 * XHATs attached, inform all XHATs that ISM segment 8729 * is going away 8730 */ 8731 ASSERT(sfmmup->sfmmu_as != NULL); 8732 if (sfmmup->sfmmu_as->a_xhat != NULL) 8733 xhat_unshare_all(sfmmup->sfmmu_as, addr, len); 8734 } 8735 8736 /* 8737 * Make sure that during the entire time ISM mappings are removed, 8738 * the trap handlers serialize behind us, and that no one else 8739 * can be mucking with ISM mappings. This also lets us get away 8740 * with not doing expensive cross calls to flush the TLB -- we 8741 * just discard the context, flush the entire TSB, and call it 8742 * a day. 8743 */ 8744 sfmmu_ismhat_enter(sfmmup, 0); 8745 8746 /* 8747 * Remove the mapping. 8748 * 8749 * We can't have any holes in the ism map. 8750 * The tsb miss code while searching the ism map will 8751 * stop on an empty map slot. So we must move 8752 * everyone past the hole up 1 if any. 8753 * 8754 * Also empty ism map blks are not freed until the 8755 * process exits. This is to prevent a MT race condition 8756 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 8757 */ 8758 found = 0; 8759 ism_blkp = sfmmup->sfmmu_iblk; 8760 while (!found && ism_blkp != NULL) { 8761 ism_map = ism_blkp->iblk_maps; 8762 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8763 if (addr == ism_start(ism_map[i]) && 8764 sh_size == (size_t)(ism_size(ism_map[i]))) { 8765 found = 1; 8766 break; 8767 } 8768 } 8769 if (!found) 8770 ism_blkp = ism_blkp->iblk_next; 8771 } 8772 8773 if (found) { 8774 ism_hatid = ism_map[i].imap_ismhat; 8775 ism_rid = ism_map[i].imap_rid; 8776 ASSERT(ism_hatid != NULL); 8777 ASSERT(ism_hatid->sfmmu_ismhat == 1); 8778 8779 /* 8780 * After hat_leave_region, the sfmmup may leave SCD, 8781 * in which case, we want to grow the private tsb size when 8782 * calling sfmmu_check_page_sizes at the end of the routine. 8783 */ 8784 old_scdp = sfmmup->sfmmu_scdp; 8785 /* 8786 * Then remove ourselves from the region. 8787 */ 8788 if (ism_rid != SFMMU_INVALID_ISMRID) { 8789 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid), 8790 HAT_REGION_ISM); 8791 } 8792 8793 /* 8794 * And now guarantee that any other cpu 8795 * that tries to process an ISM miss 8796 * will go to tl=0. 8797 */ 8798 hatlockp = sfmmu_hat_enter(sfmmup); 8799 sfmmu_invalidate_ctx(sfmmup); 8800 sfmmu_hat_exit(hatlockp); 8801 8802 /* 8803 * Remove ourselves from the ism mapping list. 8804 */ 8805 mutex_enter(&ism_mlist_lock); 8806 iment_sub(ism_map[i].imap_ment, ism_hatid); 8807 mutex_exit(&ism_mlist_lock); 8808 free_ment = ism_map[i].imap_ment; 8809 8810 /* 8811 * We delete the ism map by copying 8812 * the next map over the current one. 8813 * We will take the next one in the maps 8814 * array or from the next ism_blk. 8815 */ 8816 while (ism_blkp != NULL) { 8817 ism_map = ism_blkp->iblk_maps; 8818 while (i < (ISM_MAP_SLOTS - 1)) { 8819 ism_map[i] = ism_map[i + 1]; 8820 i++; 8821 } 8822 /* i == (ISM_MAP_SLOTS - 1) */ 8823 ism_blkp = ism_blkp->iblk_next; 8824 if (ism_blkp != NULL) { 8825 ism_map[i] = ism_blkp->iblk_maps[0]; 8826 i = 0; 8827 } else { 8828 ism_map[i].imap_seg = 0; 8829 ism_map[i].imap_vb_shift = 0; 8830 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8831 ism_map[i].imap_hatflags = 0; 8832 ism_map[i].imap_sz_mask = 0; 8833 ism_map[i].imap_ismhat = NULL; 8834 ism_map[i].imap_ment = NULL; 8835 } 8836 } 8837 8838 /* 8839 * Now flush entire TSB for the process, since 8840 * demapping page by page can be too expensive. 8841 * We don't have to flush the TLB here anymore 8842 * since we switch to a new TLB ctx instead. 8843 * Also, there is no need to flush if the process 8844 * is exiting since the TSB will be freed later. 8845 */ 8846 if (!sfmmup->sfmmu_free) { 8847 hatlockp = sfmmu_hat_enter(sfmmup); 8848 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 8849 tsbinfo = tsbinfo->tsb_next) { 8850 if (tsbinfo->tsb_flags & TSB_SWAPPED) 8851 continue; 8852 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) { 8853 tsbinfo->tsb_flags |= 8854 TSB_FLUSH_NEEDED; 8855 continue; 8856 } 8857 8858 sfmmu_inv_tsb(tsbinfo->tsb_va, 8859 TSB_BYTES(tsbinfo->tsb_szc)); 8860 } 8861 sfmmu_hat_exit(hatlockp); 8862 } 8863 } 8864 8865 /* 8866 * Update our counters for this sfmmup's ism mappings. 8867 */ 8868 for (i = 0; i <= ismszc; i++) { 8869 if (!(disable_ism_large_pages & (1 << i))) 8870 (void) ism_tsb_entries(sfmmup, i); 8871 } 8872 8873 sfmmu_ismhat_exit(sfmmup, 0); 8874 8875 /* 8876 * We must do our freeing here after dropping locks 8877 * to prevent a deadlock in the kmem allocator on the 8878 * mapping list lock. 8879 */ 8880 if (free_ment != NULL) 8881 kmem_cache_free(ism_ment_cache, free_ment); 8882 8883 /* 8884 * Check TSB and TLB page sizes if the process isn't exiting. 8885 */ 8886 if (!sfmmup->sfmmu_free) { 8887 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 8888 sfmmu_check_page_sizes(sfmmup, 1); 8889 } else { 8890 sfmmu_check_page_sizes(sfmmup, 0); 8891 } 8892 } 8893 } 8894 8895 /* ARGSUSED */ 8896 static int 8897 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8898 { 8899 /* void *buf is sfmmu_t pointer */ 8900 bzero(buf, sizeof (sfmmu_t)); 8901 8902 return (0); 8903 } 8904 8905 /* ARGSUSED */ 8906 static void 8907 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8908 { 8909 /* void *buf is sfmmu_t pointer */ 8910 } 8911 8912 /* 8913 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8914 * field to be the pa of this hmeblk 8915 */ 8916 /* ARGSUSED */ 8917 static int 8918 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8919 { 8920 struct hme_blk *hmeblkp; 8921 8922 bzero(buf, (size_t)cdrarg); 8923 hmeblkp = (struct hme_blk *)buf; 8924 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8925 8926 #ifdef HBLK_TRACE 8927 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8928 #endif /* HBLK_TRACE */ 8929 8930 return (0); 8931 } 8932 8933 /* ARGSUSED */ 8934 static void 8935 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8936 { 8937 8938 #ifdef HBLK_TRACE 8939 8940 struct hme_blk *hmeblkp; 8941 8942 hmeblkp = (struct hme_blk *)buf; 8943 mutex_destroy(&hmeblkp->hblk_audit_lock); 8944 8945 #endif /* HBLK_TRACE */ 8946 } 8947 8948 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8949 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8950 /* 8951 * The kmem allocator will callback into our reclaim routine when the system 8952 * is running low in memory. We traverse the hash and free up all unused but 8953 * still cached hme_blks. We also traverse the free list and free them up 8954 * as well. 8955 */ 8956 /*ARGSUSED*/ 8957 static void 8958 sfmmu_hblkcache_reclaim(void *cdrarg) 8959 { 8960 int i; 8961 struct hmehash_bucket *hmebp; 8962 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8963 static struct hmehash_bucket *uhmehash_reclaim_hand; 8964 static struct hmehash_bucket *khmehash_reclaim_hand; 8965 struct hme_blk *list = NULL, *last_hmeblkp; 8966 cpuset_t cpuset = cpu_ready_set; 8967 cpu_hme_pend_t *cpuhp; 8968 8969 /* Free up hmeblks on the cpu pending lists */ 8970 for (i = 0; i < NCPU; i++) { 8971 cpuhp = &cpu_hme_pend[i]; 8972 if (cpuhp->chp_listp != NULL) { 8973 mutex_enter(&cpuhp->chp_mutex); 8974 if (cpuhp->chp_listp == NULL) { 8975 mutex_exit(&cpuhp->chp_mutex); 8976 continue; 8977 } 8978 for (last_hmeblkp = cpuhp->chp_listp; 8979 last_hmeblkp->hblk_next != NULL; 8980 last_hmeblkp = last_hmeblkp->hblk_next) 8981 ; 8982 last_hmeblkp->hblk_next = list; 8983 list = cpuhp->chp_listp; 8984 cpuhp->chp_listp = NULL; 8985 cpuhp->chp_count = 0; 8986 mutex_exit(&cpuhp->chp_mutex); 8987 } 8988 8989 } 8990 8991 if (list != NULL) { 8992 kpreempt_disable(); 8993 CPUSET_DEL(cpuset, CPU->cpu_id); 8994 xt_sync(cpuset); 8995 xt_sync(cpuset); 8996 kpreempt_enable(); 8997 sfmmu_hblk_free(&list); 8998 list = NULL; 8999 } 9000 9001 hmebp = uhmehash_reclaim_hand; 9002 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 9003 uhmehash_reclaim_hand = hmebp = uhme_hash; 9004 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9005 9006 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9007 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9008 hmeblkp = hmebp->hmeblkp; 9009 pr_hblk = NULL; 9010 while (hmeblkp) { 9011 nx_hblk = hmeblkp->hblk_next; 9012 if (!hmeblkp->hblk_vcnt && 9013 !hmeblkp->hblk_hmecnt) { 9014 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9015 pr_hblk, &list, 0); 9016 } else { 9017 pr_hblk = hmeblkp; 9018 } 9019 hmeblkp = nx_hblk; 9020 } 9021 SFMMU_HASH_UNLOCK(hmebp); 9022 } 9023 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 9024 hmebp = uhme_hash; 9025 } 9026 9027 hmebp = khmehash_reclaim_hand; 9028 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 9029 khmehash_reclaim_hand = hmebp = khme_hash; 9030 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9031 9032 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9033 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9034 hmeblkp = hmebp->hmeblkp; 9035 pr_hblk = NULL; 9036 while (hmeblkp) { 9037 nx_hblk = hmeblkp->hblk_next; 9038 if (!hmeblkp->hblk_vcnt && 9039 !hmeblkp->hblk_hmecnt) { 9040 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9041 pr_hblk, &list, 0); 9042 } else { 9043 pr_hblk = hmeblkp; 9044 } 9045 hmeblkp = nx_hblk; 9046 } 9047 SFMMU_HASH_UNLOCK(hmebp); 9048 } 9049 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 9050 hmebp = khme_hash; 9051 } 9052 sfmmu_hblks_list_purge(&list, 0); 9053 } 9054 9055 /* 9056 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 9057 * same goes for sfmmu_get_addrvcolor(). 9058 * 9059 * This function will return the virtual color for the specified page. The 9060 * virtual color corresponds to this page current mapping or its last mapping. 9061 * It is used by memory allocators to choose addresses with the correct 9062 * alignment so vac consistency is automatically maintained. If the page 9063 * has no color it returns -1. 9064 */ 9065 /*ARGSUSED*/ 9066 int 9067 sfmmu_get_ppvcolor(struct page *pp) 9068 { 9069 #ifdef VAC 9070 int color; 9071 9072 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 9073 return (-1); 9074 } 9075 color = PP_GET_VCOLOR(pp); 9076 ASSERT(color < mmu_btop(shm_alignment)); 9077 return (color); 9078 #else 9079 return (-1); 9080 #endif /* VAC */ 9081 } 9082 9083 /* 9084 * This function will return the desired alignment for vac consistency 9085 * (vac color) given a virtual address. If no vac is present it returns -1. 9086 */ 9087 /*ARGSUSED*/ 9088 int 9089 sfmmu_get_addrvcolor(caddr_t vaddr) 9090 { 9091 #ifdef VAC 9092 if (cache & CACHE_VAC) { 9093 return (addr_to_vcolor(vaddr)); 9094 } else { 9095 return (-1); 9096 } 9097 #else 9098 return (-1); 9099 #endif /* VAC */ 9100 } 9101 9102 #ifdef VAC 9103 /* 9104 * Check for conflicts. 9105 * A conflict exists if the new and existent mappings do not match in 9106 * their "shm_alignment fields. If conflicts exist, the existant mappings 9107 * are flushed unless one of them is locked. If one of them is locked, then 9108 * the mappings are flushed and converted to non-cacheable mappings. 9109 */ 9110 static void 9111 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 9112 { 9113 struct hat *tmphat; 9114 struct sf_hment *sfhmep, *tmphme = NULL; 9115 struct hme_blk *hmeblkp; 9116 int vcolor; 9117 tte_t tte; 9118 9119 ASSERT(sfmmu_mlist_held(pp)); 9120 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 9121 9122 vcolor = addr_to_vcolor(addr); 9123 if (PP_NEWPAGE(pp)) { 9124 PP_SET_VCOLOR(pp, vcolor); 9125 return; 9126 } 9127 9128 if (PP_GET_VCOLOR(pp) == vcolor) { 9129 return; 9130 } 9131 9132 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 9133 /* 9134 * Previous user of page had a different color 9135 * but since there are no current users 9136 * we just flush the cache and change the color. 9137 */ 9138 SFMMU_STAT(sf_pgcolor_conflict); 9139 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9140 PP_SET_VCOLOR(pp, vcolor); 9141 return; 9142 } 9143 9144 /* 9145 * If we get here we have a vac conflict with a current 9146 * mapping. VAC conflict policy is as follows. 9147 * - The default is to unload the other mappings unless: 9148 * - If we have a large mapping we uncache the page. 9149 * We need to uncache the rest of the large page too. 9150 * - If any of the mappings are locked we uncache the page. 9151 * - If the requested mapping is inconsistent 9152 * with another mapping and that mapping 9153 * is in the same address space we have to 9154 * make it non-cached. The default thing 9155 * to do is unload the inconsistent mapping 9156 * but if they are in the same address space 9157 * we run the risk of unmapping the pc or the 9158 * stack which we will use as we return to the user, 9159 * in which case we can then fault on the thing 9160 * we just unloaded and get into an infinite loop. 9161 */ 9162 if (PP_ISMAPPED_LARGE(pp)) { 9163 int sz; 9164 9165 /* 9166 * Existing mapping is for big pages. We don't unload 9167 * existing big mappings to satisfy new mappings. 9168 * Always convert all mappings to TNC. 9169 */ 9170 sz = fnd_mapping_sz(pp); 9171 pp = PP_GROUPLEADER(pp, sz); 9172 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 9173 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 9174 TTEPAGES(sz)); 9175 9176 return; 9177 } 9178 9179 /* 9180 * check if any mapping is in same as or if it is locked 9181 * since in that case we need to uncache. 9182 */ 9183 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9184 tmphme = sfhmep->hme_next; 9185 if (IS_PAHME(sfhmep)) 9186 continue; 9187 hmeblkp = sfmmu_hmetohblk(sfhmep); 9188 if (hmeblkp->hblk_xhat_bit) 9189 continue; 9190 tmphat = hblktosfmmu(hmeblkp); 9191 sfmmu_copytte(&sfhmep->hme_tte, &tte); 9192 ASSERT(TTE_IS_VALID(&tte)); 9193 if (hmeblkp->hblk_shared || tmphat == hat || 9194 hmeblkp->hblk_lckcnt) { 9195 /* 9196 * We have an uncache conflict 9197 */ 9198 SFMMU_STAT(sf_uncache_conflict); 9199 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 9200 return; 9201 } 9202 } 9203 9204 /* 9205 * We have an unload conflict 9206 * We have already checked for LARGE mappings, therefore 9207 * the remaining mapping(s) must be TTE8K. 9208 */ 9209 SFMMU_STAT(sf_unload_conflict); 9210 9211 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9212 tmphme = sfhmep->hme_next; 9213 if (IS_PAHME(sfhmep)) 9214 continue; 9215 hmeblkp = sfmmu_hmetohblk(sfhmep); 9216 if (hmeblkp->hblk_xhat_bit) 9217 continue; 9218 ASSERT(!hmeblkp->hblk_shared); 9219 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 9220 } 9221 9222 if (PP_ISMAPPED_KPM(pp)) 9223 sfmmu_kpm_vac_unload(pp, addr); 9224 9225 /* 9226 * Unloads only do TLB flushes so we need to flush the 9227 * cache here. 9228 */ 9229 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9230 PP_SET_VCOLOR(pp, vcolor); 9231 } 9232 9233 /* 9234 * Whenever a mapping is unloaded and the page is in TNC state, 9235 * we see if the page can be made cacheable again. 'pp' is 9236 * the page that we just unloaded a mapping from, the size 9237 * of mapping that was unloaded is 'ottesz'. 9238 * Remark: 9239 * The recache policy for mpss pages can leave a performance problem 9240 * under the following circumstances: 9241 * . A large page in uncached mode has just been unmapped. 9242 * . All constituent pages are TNC due to a conflicting small mapping. 9243 * . There are many other, non conflicting, small mappings around for 9244 * a lot of the constituent pages. 9245 * . We're called w/ the "old" groupleader page and the old ottesz, 9246 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 9247 * we end up w/ TTE8K or npages == 1. 9248 * . We call tst_tnc w/ the old groupleader only, and if there is no 9249 * conflict, we re-cache only this page. 9250 * . All other small mappings are not checked and will be left in TNC mode. 9251 * The problem is not very serious because: 9252 * . mpss is actually only defined for heap and stack, so the probability 9253 * is not very high that a large page mapping exists in parallel to a small 9254 * one (this is possible, but seems to be bad programming style in the 9255 * appl). 9256 * . The problem gets a little bit more serious, when those TNC pages 9257 * have to be mapped into kernel space, e.g. for networking. 9258 * . When VAC alias conflicts occur in applications, this is regarded 9259 * as an application bug. So if kstat's show them, the appl should 9260 * be changed anyway. 9261 */ 9262 void 9263 conv_tnc(page_t *pp, int ottesz) 9264 { 9265 int cursz, dosz; 9266 pgcnt_t curnpgs, dopgs; 9267 pgcnt_t pg64k; 9268 page_t *pp2; 9269 9270 /* 9271 * Determine how big a range we check for TNC and find 9272 * leader page. cursz is the size of the biggest 9273 * mapping that still exist on 'pp'. 9274 */ 9275 if (PP_ISMAPPED_LARGE(pp)) { 9276 cursz = fnd_mapping_sz(pp); 9277 } else { 9278 cursz = TTE8K; 9279 } 9280 9281 if (ottesz >= cursz) { 9282 dosz = ottesz; 9283 pp2 = pp; 9284 } else { 9285 dosz = cursz; 9286 pp2 = PP_GROUPLEADER(pp, dosz); 9287 } 9288 9289 pg64k = TTEPAGES(TTE64K); 9290 dopgs = TTEPAGES(dosz); 9291 9292 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 9293 9294 while (dopgs != 0) { 9295 curnpgs = TTEPAGES(cursz); 9296 if (tst_tnc(pp2, curnpgs)) { 9297 SFMMU_STAT_ADD(sf_recache, curnpgs); 9298 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 9299 curnpgs); 9300 } 9301 9302 ASSERT(dopgs >= curnpgs); 9303 dopgs -= curnpgs; 9304 9305 if (dopgs == 0) { 9306 break; 9307 } 9308 9309 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 9310 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 9311 cursz = fnd_mapping_sz(pp2); 9312 } else { 9313 cursz = TTE8K; 9314 } 9315 } 9316 } 9317 9318 /* 9319 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 9320 * returns 0 otherwise. Note that oaddr argument is valid for only 9321 * 8k pages. 9322 */ 9323 int 9324 tst_tnc(page_t *pp, pgcnt_t npages) 9325 { 9326 struct sf_hment *sfhme; 9327 struct hme_blk *hmeblkp; 9328 tte_t tte; 9329 caddr_t vaddr; 9330 int clr_valid = 0; 9331 int color, color1, bcolor; 9332 int i, ncolors; 9333 9334 ASSERT(pp != NULL); 9335 ASSERT(!(cache & CACHE_WRITEBACK)); 9336 9337 if (npages > 1) { 9338 ncolors = CACHE_NUM_COLOR; 9339 } 9340 9341 for (i = 0; i < npages; i++) { 9342 ASSERT(sfmmu_mlist_held(pp)); 9343 ASSERT(PP_ISTNC(pp)); 9344 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 9345 9346 if (PP_ISPNC(pp)) { 9347 return (0); 9348 } 9349 9350 clr_valid = 0; 9351 if (PP_ISMAPPED_KPM(pp)) { 9352 caddr_t kpmvaddr; 9353 9354 ASSERT(kpm_enable); 9355 kpmvaddr = hat_kpm_page2va(pp, 1); 9356 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 9357 color1 = addr_to_vcolor(kpmvaddr); 9358 clr_valid = 1; 9359 } 9360 9361 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9362 if (IS_PAHME(sfhme)) 9363 continue; 9364 hmeblkp = sfmmu_hmetohblk(sfhme); 9365 if (hmeblkp->hblk_xhat_bit) 9366 continue; 9367 9368 sfmmu_copytte(&sfhme->hme_tte, &tte); 9369 ASSERT(TTE_IS_VALID(&tte)); 9370 9371 vaddr = tte_to_vaddr(hmeblkp, tte); 9372 color = addr_to_vcolor(vaddr); 9373 9374 if (npages > 1) { 9375 /* 9376 * If there is a big mapping, make sure 9377 * 8K mapping is consistent with the big 9378 * mapping. 9379 */ 9380 bcolor = i % ncolors; 9381 if (color != bcolor) { 9382 return (0); 9383 } 9384 } 9385 if (!clr_valid) { 9386 clr_valid = 1; 9387 color1 = color; 9388 } 9389 9390 if (color1 != color) { 9391 return (0); 9392 } 9393 } 9394 9395 pp = PP_PAGENEXT(pp); 9396 } 9397 9398 return (1); 9399 } 9400 9401 void 9402 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 9403 pgcnt_t npages) 9404 { 9405 kmutex_t *pmtx; 9406 int i, ncolors, bcolor; 9407 kpm_hlk_t *kpmp; 9408 cpuset_t cpuset; 9409 9410 ASSERT(pp != NULL); 9411 ASSERT(!(cache & CACHE_WRITEBACK)); 9412 9413 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 9414 pmtx = sfmmu_page_enter(pp); 9415 9416 /* 9417 * Fast path caching single unmapped page 9418 */ 9419 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 9420 flags == HAT_CACHE) { 9421 PP_CLRTNC(pp); 9422 PP_CLRPNC(pp); 9423 sfmmu_page_exit(pmtx); 9424 sfmmu_kpm_kpmp_exit(kpmp); 9425 return; 9426 } 9427 9428 /* 9429 * We need to capture all cpus in order to change cacheability 9430 * because we can't allow one cpu to access the same physical 9431 * page using a cacheable and a non-cachebale mapping at the same 9432 * time. Since we may end up walking the ism mapping list 9433 * have to grab it's lock now since we can't after all the 9434 * cpus have been captured. 9435 */ 9436 sfmmu_hat_lock_all(); 9437 mutex_enter(&ism_mlist_lock); 9438 kpreempt_disable(); 9439 cpuset = cpu_ready_set; 9440 xc_attention(cpuset); 9441 9442 if (npages > 1) { 9443 /* 9444 * Make sure all colors are flushed since the 9445 * sfmmu_page_cache() only flushes one color- 9446 * it does not know big pages. 9447 */ 9448 ncolors = CACHE_NUM_COLOR; 9449 if (flags & HAT_TMPNC) { 9450 for (i = 0; i < ncolors; i++) { 9451 sfmmu_cache_flushcolor(i, pp->p_pagenum); 9452 } 9453 cache_flush_flag = CACHE_NO_FLUSH; 9454 } 9455 } 9456 9457 for (i = 0; i < npages; i++) { 9458 9459 ASSERT(sfmmu_mlist_held(pp)); 9460 9461 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 9462 9463 if (npages > 1) { 9464 bcolor = i % ncolors; 9465 } else { 9466 bcolor = NO_VCOLOR; 9467 } 9468 9469 sfmmu_page_cache(pp, flags, cache_flush_flag, 9470 bcolor); 9471 } 9472 9473 pp = PP_PAGENEXT(pp); 9474 } 9475 9476 xt_sync(cpuset); 9477 xc_dismissed(cpuset); 9478 mutex_exit(&ism_mlist_lock); 9479 sfmmu_hat_unlock_all(); 9480 sfmmu_page_exit(pmtx); 9481 sfmmu_kpm_kpmp_exit(kpmp); 9482 kpreempt_enable(); 9483 } 9484 9485 /* 9486 * This function changes the virtual cacheability of all mappings to a 9487 * particular page. When changing from uncache to cacheable the mappings will 9488 * only be changed if all of them have the same virtual color. 9489 * We need to flush the cache in all cpus. It is possible that 9490 * a process referenced a page as cacheable but has sinced exited 9491 * and cleared the mapping list. We still to flush it but have no 9492 * state so all cpus is the only alternative. 9493 */ 9494 static void 9495 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 9496 { 9497 struct sf_hment *sfhme; 9498 struct hme_blk *hmeblkp; 9499 sfmmu_t *sfmmup; 9500 tte_t tte, ttemod; 9501 caddr_t vaddr; 9502 int ret, color; 9503 pfn_t pfn; 9504 9505 color = bcolor; 9506 pfn = pp->p_pagenum; 9507 9508 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9509 9510 if (IS_PAHME(sfhme)) 9511 continue; 9512 hmeblkp = sfmmu_hmetohblk(sfhme); 9513 9514 if (hmeblkp->hblk_xhat_bit) 9515 continue; 9516 9517 sfmmu_copytte(&sfhme->hme_tte, &tte); 9518 ASSERT(TTE_IS_VALID(&tte)); 9519 vaddr = tte_to_vaddr(hmeblkp, tte); 9520 color = addr_to_vcolor(vaddr); 9521 9522 #ifdef DEBUG 9523 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 9524 ASSERT(color == bcolor); 9525 } 9526 #endif 9527 9528 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 9529 9530 ttemod = tte; 9531 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 9532 TTE_CLR_VCACHEABLE(&ttemod); 9533 } else { /* flags & HAT_CACHE */ 9534 TTE_SET_VCACHEABLE(&ttemod); 9535 } 9536 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 9537 if (ret < 0) { 9538 /* 9539 * Since all cpus are captured modifytte should not 9540 * fail. 9541 */ 9542 panic("sfmmu_page_cache: write to tte failed"); 9543 } 9544 9545 sfmmup = hblktosfmmu(hmeblkp); 9546 if (cache_flush_flag == CACHE_FLUSH) { 9547 /* 9548 * Flush TSBs, TLBs and caches 9549 */ 9550 if (hmeblkp->hblk_shared) { 9551 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9552 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9553 sf_region_t *rgnp; 9554 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9555 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9556 ASSERT(srdp != NULL); 9557 rgnp = srdp->srd_hmergnp[rid]; 9558 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9559 srdp, rgnp, rid); 9560 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9561 hmeblkp, 0); 9562 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr)); 9563 } else if (sfmmup->sfmmu_ismhat) { 9564 if (flags & HAT_CACHE) { 9565 SFMMU_STAT(sf_ism_recache); 9566 } else { 9567 SFMMU_STAT(sf_ism_uncache); 9568 } 9569 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9570 pfn, CACHE_FLUSH); 9571 } else { 9572 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 9573 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 9574 } 9575 9576 /* 9577 * all cache entries belonging to this pfn are 9578 * now flushed. 9579 */ 9580 cache_flush_flag = CACHE_NO_FLUSH; 9581 } else { 9582 /* 9583 * Flush only TSBs and TLBs. 9584 */ 9585 if (hmeblkp->hblk_shared) { 9586 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9587 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9588 sf_region_t *rgnp; 9589 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9590 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9591 ASSERT(srdp != NULL); 9592 rgnp = srdp->srd_hmergnp[rid]; 9593 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9594 srdp, rgnp, rid); 9595 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9596 hmeblkp, 0); 9597 } else if (sfmmup->sfmmu_ismhat) { 9598 if (flags & HAT_CACHE) { 9599 SFMMU_STAT(sf_ism_recache); 9600 } else { 9601 SFMMU_STAT(sf_ism_uncache); 9602 } 9603 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9604 pfn, CACHE_NO_FLUSH); 9605 } else { 9606 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 9607 } 9608 } 9609 } 9610 9611 if (PP_ISMAPPED_KPM(pp)) 9612 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 9613 9614 switch (flags) { 9615 9616 default: 9617 panic("sfmmu_pagecache: unknown flags"); 9618 break; 9619 9620 case HAT_CACHE: 9621 PP_CLRTNC(pp); 9622 PP_CLRPNC(pp); 9623 PP_SET_VCOLOR(pp, color); 9624 break; 9625 9626 case HAT_TMPNC: 9627 PP_SETTNC(pp); 9628 PP_SET_VCOLOR(pp, NO_VCOLOR); 9629 break; 9630 9631 case HAT_UNCACHE: 9632 PP_SETPNC(pp); 9633 PP_CLRTNC(pp); 9634 PP_SET_VCOLOR(pp, NO_VCOLOR); 9635 break; 9636 } 9637 } 9638 #endif /* VAC */ 9639 9640 9641 /* 9642 * Wrapper routine used to return a context. 9643 * 9644 * It's the responsibility of the caller to guarantee that the 9645 * process serializes on calls here by taking the HAT lock for 9646 * the hat. 9647 * 9648 */ 9649 static void 9650 sfmmu_get_ctx(sfmmu_t *sfmmup) 9651 { 9652 mmu_ctx_t *mmu_ctxp; 9653 uint_t pstate_save; 9654 int ret; 9655 9656 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9657 ASSERT(sfmmup != ksfmmup); 9658 9659 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) { 9660 sfmmu_setup_tsbinfo(sfmmup); 9661 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID); 9662 } 9663 9664 kpreempt_disable(); 9665 9666 mmu_ctxp = CPU_MMU_CTXP(CPU); 9667 ASSERT(mmu_ctxp); 9668 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 9669 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 9670 9671 /* 9672 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 9673 */ 9674 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 9675 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE); 9676 9677 /* 9678 * Let the MMU set up the page sizes to use for 9679 * this context in the TLB. Don't program 2nd dtlb for ism hat. 9680 */ 9681 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 9682 mmu_set_ctx_page_sizes(sfmmup); 9683 } 9684 9685 /* 9686 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 9687 * interrupts disabled to prevent race condition with wrap-around 9688 * ctx invalidatation. In sun4v, ctx invalidation also involves 9689 * a HV call to set the number of TSBs to 0. If interrupts are not 9690 * disabled until after sfmmu_load_mmustate is complete TSBs may 9691 * become assigned to INVALID_CONTEXT. This is not allowed. 9692 */ 9693 pstate_save = sfmmu_disable_intrs(); 9694 9695 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) && 9696 sfmmup->sfmmu_scdp != NULL) { 9697 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 9698 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 9699 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED); 9700 /* debug purpose only */ 9701 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 9702 != INVALID_CONTEXT); 9703 } 9704 sfmmu_load_mmustate(sfmmup); 9705 9706 sfmmu_enable_intrs(pstate_save); 9707 9708 kpreempt_enable(); 9709 } 9710 9711 /* 9712 * When all cnums are used up in a MMU, cnum will wrap around to the 9713 * next generation and start from 2. 9714 */ 9715 static void 9716 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum) 9717 { 9718 9719 /* caller must have disabled the preemption */ 9720 ASSERT(curthread->t_preempt >= 1); 9721 ASSERT(mmu_ctxp != NULL); 9722 9723 /* acquire Per-MMU (PM) spin lock */ 9724 mutex_enter(&mmu_ctxp->mmu_lock); 9725 9726 /* re-check to see if wrap-around is needed */ 9727 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 9728 goto done; 9729 9730 SFMMU_MMU_STAT(mmu_wrap_around); 9731 9732 /* update gnum */ 9733 ASSERT(mmu_ctxp->mmu_gnum != 0); 9734 mmu_ctxp->mmu_gnum++; 9735 if (mmu_ctxp->mmu_gnum == 0 || 9736 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 9737 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 9738 (void *)mmu_ctxp); 9739 } 9740 9741 if (mmu_ctxp->mmu_ncpus > 1) { 9742 cpuset_t cpuset; 9743 9744 membar_enter(); /* make sure updated gnum visible */ 9745 9746 SFMMU_XCALL_STATS(NULL); 9747 9748 /* xcall to others on the same MMU to invalidate ctx */ 9749 cpuset = mmu_ctxp->mmu_cpuset; 9750 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum); 9751 CPUSET_DEL(cpuset, CPU->cpu_id); 9752 CPUSET_AND(cpuset, cpu_ready_set); 9753 9754 /* 9755 * Pass in INVALID_CONTEXT as the first parameter to 9756 * sfmmu_raise_tsb_exception, which invalidates the context 9757 * of any process running on the CPUs in the MMU. 9758 */ 9759 xt_some(cpuset, sfmmu_raise_tsb_exception, 9760 INVALID_CONTEXT, INVALID_CONTEXT); 9761 xt_sync(cpuset); 9762 9763 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 9764 } 9765 9766 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 9767 sfmmu_setctx_sec(INVALID_CONTEXT); 9768 sfmmu_clear_utsbinfo(); 9769 } 9770 9771 /* 9772 * No xcall is needed here. For sun4u systems all CPUs in context 9773 * domain share a single physical MMU therefore it's enough to flush 9774 * TLB on local CPU. On sun4v systems we use 1 global context 9775 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 9776 * handler. Note that vtag_flushall_uctxs() is called 9777 * for Ultra II machine, where the equivalent flushall functionality 9778 * is implemented in SW, and only user ctx TLB entries are flushed. 9779 */ 9780 if (&vtag_flushall_uctxs != NULL) { 9781 vtag_flushall_uctxs(); 9782 } else { 9783 vtag_flushall(); 9784 } 9785 9786 /* reset mmu cnum, skips cnum 0 and 1 */ 9787 if (reset_cnum == B_TRUE) 9788 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 9789 9790 done: 9791 mutex_exit(&mmu_ctxp->mmu_lock); 9792 } 9793 9794 9795 /* 9796 * For multi-threaded process, set the process context to INVALID_CONTEXT 9797 * so that it faults and reloads the MMU state from TL=0. For single-threaded 9798 * process, we can just load the MMU state directly without having to 9799 * set context invalid. Caller must hold the hat lock since we don't 9800 * acquire it here. 9801 */ 9802 static void 9803 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 9804 { 9805 uint_t cnum; 9806 uint_t pstate_save; 9807 9808 ASSERT(sfmmup != ksfmmup); 9809 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9810 9811 kpreempt_disable(); 9812 9813 /* 9814 * We check whether the pass'ed-in sfmmup is the same as the 9815 * current running proc. This is to makes sure the current proc 9816 * stays single-threaded if it already is. 9817 */ 9818 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 9819 (curthread->t_procp->p_lwpcnt == 1)) { 9820 /* single-thread */ 9821 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 9822 if (cnum != INVALID_CONTEXT) { 9823 uint_t curcnum; 9824 /* 9825 * Disable interrupts to prevent race condition 9826 * with sfmmu_ctx_wrap_around ctx invalidation. 9827 * In sun4v, ctx invalidation involves setting 9828 * TSB to NULL, hence, interrupts should be disabled 9829 * untill after sfmmu_load_mmustate is completed. 9830 */ 9831 pstate_save = sfmmu_disable_intrs(); 9832 curcnum = sfmmu_getctx_sec(); 9833 if (curcnum == cnum) 9834 sfmmu_load_mmustate(sfmmup); 9835 sfmmu_enable_intrs(pstate_save); 9836 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 9837 } 9838 } else { 9839 /* 9840 * multi-thread 9841 * or when sfmmup is not the same as the curproc. 9842 */ 9843 sfmmu_invalidate_ctx(sfmmup); 9844 } 9845 9846 kpreempt_enable(); 9847 } 9848 9849 9850 /* 9851 * Replace the specified TSB with a new TSB. This function gets called when 9852 * we grow, or shrink a TSB. When swapping in a TSB (TSB_SWAPIN), the 9853 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 9854 * (8K). 9855 * 9856 * Caller must hold the HAT lock, but should assume any tsb_info 9857 * pointers it has are no longer valid after calling this function. 9858 * 9859 * Return values: 9860 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 9861 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 9862 * something to this tsbinfo/TSB 9863 * TSB_SUCCESS Operation succeeded 9864 */ 9865 static tsb_replace_rc_t 9866 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 9867 hatlock_t *hatlockp, uint_t flags) 9868 { 9869 struct tsb_info *new_tsbinfo = NULL; 9870 struct tsb_info *curtsb, *prevtsb; 9871 uint_t tte_sz_mask; 9872 int i; 9873 9874 ASSERT(sfmmup != ksfmmup); 9875 ASSERT(sfmmup->sfmmu_ismhat == 0); 9876 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9877 ASSERT(szc <= tsb_max_growsize); 9878 9879 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 9880 return (TSB_LOSTRACE); 9881 9882 /* 9883 * Find the tsb_info ahead of this one in the list, and 9884 * also make sure that the tsb_info passed in really 9885 * exists! 9886 */ 9887 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9888 curtsb != old_tsbinfo && curtsb != NULL; 9889 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9890 ; 9891 ASSERT(curtsb != NULL); 9892 9893 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9894 /* 9895 * The process is swapped out, so just set the new size 9896 * code. When it swaps back in, we'll allocate a new one 9897 * of the new chosen size. 9898 */ 9899 curtsb->tsb_szc = szc; 9900 return (TSB_SUCCESS); 9901 } 9902 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 9903 9904 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 9905 9906 /* 9907 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 9908 * If we fail to allocate a TSB, exit. 9909 * 9910 * If tsb grows with new tsb size > 4M and old tsb size < 4M, 9911 * then try 4M slab after the initial alloc fails. 9912 * 9913 * If tsb swapin with tsb size > 4M, then try 4M after the 9914 * initial alloc fails. 9915 */ 9916 sfmmu_hat_exit(hatlockp); 9917 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, 9918 tte_sz_mask, flags, sfmmup) && 9919 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) || 9920 (!(flags & TSB_SWAPIN) && 9921 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) || 9922 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE, 9923 tte_sz_mask, flags, sfmmup))) { 9924 (void) sfmmu_hat_enter(sfmmup); 9925 if (!(flags & TSB_SWAPIN)) 9926 SFMMU_STAT(sf_tsb_resize_failures); 9927 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9928 return (TSB_ALLOCFAIL); 9929 } 9930 (void) sfmmu_hat_enter(sfmmup); 9931 9932 /* 9933 * Re-check to make sure somebody else didn't muck with us while we 9934 * didn't hold the HAT lock. If the process swapped out, fine, just 9935 * exit; this can happen if we try to shrink the TSB from the context 9936 * of another process (such as on an ISM unmap), though it is rare. 9937 */ 9938 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9939 SFMMU_STAT(sf_tsb_resize_failures); 9940 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9941 sfmmu_hat_exit(hatlockp); 9942 sfmmu_tsbinfo_free(new_tsbinfo); 9943 (void) sfmmu_hat_enter(sfmmup); 9944 return (TSB_LOSTRACE); 9945 } 9946 9947 #ifdef DEBUG 9948 /* Reverify that the tsb_info still exists.. for debugging only */ 9949 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9950 curtsb != old_tsbinfo && curtsb != NULL; 9951 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9952 ; 9953 ASSERT(curtsb != NULL); 9954 #endif /* DEBUG */ 9955 9956 /* 9957 * Quiesce any CPUs running this process on their next TLB miss 9958 * so they atomically see the new tsb_info. We temporarily set the 9959 * context to invalid context so new threads that come on processor 9960 * after we do the xcall to cpusran will also serialize behind the 9961 * HAT lock on TLB miss and will see the new TSB. Since this short 9962 * race with a new thread coming on processor is relatively rare, 9963 * this synchronization mechanism should be cheaper than always 9964 * pausing all CPUs for the duration of the setup, which is what 9965 * the old implementation did. This is particuarly true if we are 9966 * copying a huge chunk of memory around during that window. 9967 * 9968 * The memory barriers are to make sure things stay consistent 9969 * with resume() since it does not hold the HAT lock while 9970 * walking the list of tsb_info structures. 9971 */ 9972 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 9973 /* The TSB is either growing or shrinking. */ 9974 sfmmu_invalidate_ctx(sfmmup); 9975 } else { 9976 /* 9977 * It is illegal to swap in TSBs from a process other 9978 * than a process being swapped in. This in turn 9979 * implies we do not have a valid MMU context here 9980 * since a process needs one to resolve translation 9981 * misses. 9982 */ 9983 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9984 } 9985 9986 #ifdef DEBUG 9987 ASSERT(max_mmu_ctxdoms > 0); 9988 9989 /* 9990 * Process should have INVALID_CONTEXT on all MMUs 9991 */ 9992 for (i = 0; i < max_mmu_ctxdoms; i++) { 9993 9994 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9995 } 9996 #endif 9997 9998 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9999 membar_stst(); /* strict ordering required */ 10000 if (prevtsb) 10001 prevtsb->tsb_next = new_tsbinfo; 10002 else 10003 sfmmup->sfmmu_tsb = new_tsbinfo; 10004 membar_enter(); /* make sure new TSB globally visible */ 10005 10006 /* 10007 * We need to migrate TSB entries from the old TSB to the new TSB 10008 * if tsb_remap_ttes is set and the TSB is growing. 10009 */ 10010 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 10011 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 10012 10013 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 10014 10015 /* 10016 * Drop the HAT lock to free our old tsb_info. 10017 */ 10018 sfmmu_hat_exit(hatlockp); 10019 10020 if ((flags & TSB_GROW) == TSB_GROW) { 10021 SFMMU_STAT(sf_tsb_grow); 10022 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 10023 SFMMU_STAT(sf_tsb_shrink); 10024 } 10025 10026 sfmmu_tsbinfo_free(old_tsbinfo); 10027 10028 (void) sfmmu_hat_enter(sfmmup); 10029 return (TSB_SUCCESS); 10030 } 10031 10032 /* 10033 * This function will re-program hat pgsz array, and invalidate the 10034 * process' context, forcing the process to switch to another 10035 * context on the next TLB miss, and therefore start using the 10036 * TLB that is reprogrammed for the new page sizes. 10037 */ 10038 void 10039 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 10040 { 10041 int i; 10042 hatlock_t *hatlockp = NULL; 10043 10044 hatlockp = sfmmu_hat_enter(sfmmup); 10045 /* USIII+-IV+ optimization, requires hat lock */ 10046 if (tmp_pgsz) { 10047 for (i = 0; i < mmu_page_sizes; i++) 10048 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 10049 } 10050 SFMMU_STAT(sf_tlb_reprog_pgsz); 10051 10052 sfmmu_invalidate_ctx(sfmmup); 10053 10054 sfmmu_hat_exit(hatlockp); 10055 } 10056 10057 /* 10058 * The scd_rttecnt field in the SCD must be updated to take account of the 10059 * regions which it contains. 10060 */ 10061 static void 10062 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp) 10063 { 10064 uint_t rid; 10065 uint_t i, j; 10066 ulong_t w; 10067 sf_region_t *rgnp; 10068 10069 ASSERT(srdp != NULL); 10070 10071 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 10072 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 10073 continue; 10074 } 10075 10076 j = 0; 10077 while (w) { 10078 if (!(w & 0x1)) { 10079 j++; 10080 w >>= 1; 10081 continue; 10082 } 10083 rid = (i << BT_ULSHIFT) | j; 10084 j++; 10085 w >>= 1; 10086 10087 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 10088 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 10089 rgnp = srdp->srd_hmergnp[rid]; 10090 ASSERT(rgnp->rgn_refcnt > 0); 10091 ASSERT(rgnp->rgn_id == rid); 10092 10093 scdp->scd_rttecnt[rgnp->rgn_pgszc] += 10094 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 10095 10096 /* 10097 * Maintain the tsb0 inflation cnt for the regions 10098 * in the SCD. 10099 */ 10100 if (rgnp->rgn_pgszc >= TTE4M) { 10101 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt += 10102 rgnp->rgn_size >> 10103 (TTE_PAGE_SHIFT(TTE8K) + 2); 10104 } 10105 } 10106 } 10107 } 10108 10109 /* 10110 * This function assumes that there are either four or six supported page 10111 * sizes and at most two programmable TLBs, so we need to decide which 10112 * page sizes are most important and then tell the MMU layer so it 10113 * can adjust the TLB page sizes accordingly (if supported). 10114 * 10115 * If these assumptions change, this function will need to be 10116 * updated to support whatever the new limits are. 10117 * 10118 * The growing flag is nonzero if we are growing the address space, 10119 * and zero if it is shrinking. This allows us to decide whether 10120 * to grow or shrink our TSB, depending upon available memory 10121 * conditions. 10122 */ 10123 static void 10124 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 10125 { 10126 uint64_t ttecnt[MMU_PAGE_SIZES]; 10127 uint64_t tte8k_cnt, tte4m_cnt; 10128 uint8_t i; 10129 int sectsb_thresh; 10130 10131 /* 10132 * Kernel threads, processes with small address spaces not using 10133 * large pages, and dummy ISM HATs need not apply. 10134 */ 10135 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 10136 return; 10137 10138 if (!SFMMU_LGPGS_INUSE(sfmmup) && 10139 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 10140 return; 10141 10142 for (i = 0; i < mmu_page_sizes; i++) { 10143 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] + 10144 sfmmup->sfmmu_ismttecnt[i]; 10145 } 10146 10147 /* Check pagesizes in use, and possibly reprogram DTLB. */ 10148 if (&mmu_check_page_sizes) 10149 mmu_check_page_sizes(sfmmup, ttecnt); 10150 10151 /* 10152 * Calculate the number of 8k ttes to represent the span of these 10153 * pages. 10154 */ 10155 tte8k_cnt = ttecnt[TTE8K] + 10156 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 10157 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 10158 if (mmu_page_sizes == max_mmu_page_sizes) { 10159 tte4m_cnt = ttecnt[TTE4M] + 10160 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 10161 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 10162 } else { 10163 tte4m_cnt = ttecnt[TTE4M]; 10164 } 10165 10166 /* 10167 * Inflate tte8k_cnt to allow for region large page allocation failure. 10168 */ 10169 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt; 10170 10171 /* 10172 * Inflate TSB sizes by a factor of 2 if this process 10173 * uses 4M text pages to minimize extra conflict misses 10174 * in the first TSB since without counting text pages 10175 * 8K TSB may become too small. 10176 * 10177 * Also double the size of the second TSB to minimize 10178 * extra conflict misses due to competition between 4M text pages 10179 * and data pages. 10180 * 10181 * We need to adjust the second TSB allocation threshold by the 10182 * inflation factor, since there is no point in creating a second 10183 * TSB when we know all the mappings can fit in the I/D TLBs. 10184 */ 10185 sectsb_thresh = tsb_sectsb_threshold; 10186 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 10187 tte8k_cnt <<= 1; 10188 tte4m_cnt <<= 1; 10189 sectsb_thresh <<= 1; 10190 } 10191 10192 /* 10193 * Check to see if our TSB is the right size; we may need to 10194 * grow or shrink it. If the process is small, our work is 10195 * finished at this point. 10196 */ 10197 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 10198 return; 10199 } 10200 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 10201 } 10202 10203 static void 10204 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 10205 uint64_t tte4m_cnt, int sectsb_thresh) 10206 { 10207 int tsb_bits; 10208 uint_t tsb_szc; 10209 struct tsb_info *tsbinfop; 10210 hatlock_t *hatlockp = NULL; 10211 10212 hatlockp = sfmmu_hat_enter(sfmmup); 10213 ASSERT(hatlockp != NULL); 10214 tsbinfop = sfmmup->sfmmu_tsb; 10215 ASSERT(tsbinfop != NULL); 10216 10217 /* 10218 * If we're growing, select the size based on RSS. If we're 10219 * shrinking, leave some room so we don't have to turn around and 10220 * grow again immediately. 10221 */ 10222 if (growing) 10223 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 10224 else 10225 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 10226 10227 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10228 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10229 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10230 hatlockp, TSB_SHRINK); 10231 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 10232 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10233 hatlockp, TSB_GROW); 10234 } 10235 tsbinfop = sfmmup->sfmmu_tsb; 10236 10237 /* 10238 * With the TLB and first TSB out of the way, we need to see if 10239 * we need a second TSB for 4M pages. If we managed to reprogram 10240 * the TLB page sizes above, the process will start using this new 10241 * TSB right away; otherwise, it will start using it on the next 10242 * context switch. Either way, it's no big deal so there's no 10243 * synchronization with the trap handlers here unless we grow the 10244 * TSB (in which case it's required to prevent using the old one 10245 * after it's freed). Note: second tsb is required for 32M/256M 10246 * page sizes. 10247 */ 10248 if (tte4m_cnt > sectsb_thresh) { 10249 /* 10250 * If we're growing, select the size based on RSS. If we're 10251 * shrinking, leave some room so we don't have to turn 10252 * around and grow again immediately. 10253 */ 10254 if (growing) 10255 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 10256 else 10257 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 10258 if (tsbinfop->tsb_next == NULL) { 10259 struct tsb_info *newtsb; 10260 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 10261 0 : TSB_ALLOC; 10262 10263 sfmmu_hat_exit(hatlockp); 10264 10265 /* 10266 * Try to allocate a TSB for 4[32|256]M pages. If we 10267 * can't get the size we want, retry w/a minimum sized 10268 * TSB. If that still didn't work, give up; we can 10269 * still run without one. 10270 */ 10271 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 10272 TSB4M|TSB32M|TSB256M:TSB4M; 10273 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 10274 allocflags, sfmmup)) && 10275 (tsb_szc <= TSB_4M_SZCODE || 10276 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 10277 tsb_bits, allocflags, sfmmup)) && 10278 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 10279 tsb_bits, allocflags, sfmmup)) { 10280 return; 10281 } 10282 10283 hatlockp = sfmmu_hat_enter(sfmmup); 10284 10285 sfmmu_invalidate_ctx(sfmmup); 10286 10287 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 10288 sfmmup->sfmmu_tsb->tsb_next = newtsb; 10289 SFMMU_STAT(sf_tsb_sectsb_create); 10290 sfmmu_hat_exit(hatlockp); 10291 return; 10292 } else { 10293 /* 10294 * It's annoying, but possible for us 10295 * to get here.. we dropped the HAT lock 10296 * because of locking order in the kmem 10297 * allocator, and while we were off getting 10298 * our memory, some other thread decided to 10299 * do us a favor and won the race to get a 10300 * second TSB for this process. Sigh. 10301 */ 10302 sfmmu_hat_exit(hatlockp); 10303 sfmmu_tsbinfo_free(newtsb); 10304 return; 10305 } 10306 } 10307 10308 /* 10309 * We have a second TSB, see if it's big enough. 10310 */ 10311 tsbinfop = tsbinfop->tsb_next; 10312 10313 /* 10314 * Check to see if our second TSB is the right size; 10315 * we may need to grow or shrink it. 10316 * To prevent thrashing (e.g. growing the TSB on a 10317 * subsequent map operation), only try to shrink if 10318 * the TSB reach exceeds twice the virtual address 10319 * space size. 10320 */ 10321 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10322 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10323 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10324 tsb_szc, hatlockp, TSB_SHRINK); 10325 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 10326 TSB_OK_GROW()) { 10327 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10328 tsb_szc, hatlockp, TSB_GROW); 10329 } 10330 } 10331 10332 sfmmu_hat_exit(hatlockp); 10333 } 10334 10335 /* 10336 * Free up a sfmmu 10337 * Since the sfmmu is currently embedded in the hat struct we simply zero 10338 * out our fields and free up the ism map blk list if any. 10339 */ 10340 static void 10341 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 10342 { 10343 ism_blk_t *blkp, *nx_blkp; 10344 #ifdef DEBUG 10345 ism_map_t *map; 10346 int i; 10347 #endif 10348 10349 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 10350 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 10351 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 10352 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 10353 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 10354 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 10355 ASSERT(SF_RGNMAP_ISNULL(sfmmup)); 10356 10357 sfmmup->sfmmu_free = 0; 10358 sfmmup->sfmmu_ismhat = 0; 10359 10360 blkp = sfmmup->sfmmu_iblk; 10361 sfmmup->sfmmu_iblk = NULL; 10362 10363 while (blkp) { 10364 #ifdef DEBUG 10365 map = blkp->iblk_maps; 10366 for (i = 0; i < ISM_MAP_SLOTS; i++) { 10367 ASSERT(map[i].imap_seg == 0); 10368 ASSERT(map[i].imap_ismhat == NULL); 10369 ASSERT(map[i].imap_ment == NULL); 10370 } 10371 #endif 10372 nx_blkp = blkp->iblk_next; 10373 blkp->iblk_next = NULL; 10374 blkp->iblk_nextpa = (uint64_t)-1; 10375 kmem_cache_free(ism_blk_cache, blkp); 10376 blkp = nx_blkp; 10377 } 10378 } 10379 10380 /* 10381 * Locking primitves accessed by HATLOCK macros 10382 */ 10383 10384 #define SFMMU_SPL_MTX (0x0) 10385 #define SFMMU_ML_MTX (0x1) 10386 10387 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 10388 SPL_HASH(pg) : MLIST_HASH(pg)) 10389 10390 kmutex_t * 10391 sfmmu_page_enter(struct page *pp) 10392 { 10393 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 10394 } 10395 10396 void 10397 sfmmu_page_exit(kmutex_t *spl) 10398 { 10399 mutex_exit(spl); 10400 } 10401 10402 int 10403 sfmmu_page_spl_held(struct page *pp) 10404 { 10405 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 10406 } 10407 10408 kmutex_t * 10409 sfmmu_mlist_enter(struct page *pp) 10410 { 10411 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 10412 } 10413 10414 void 10415 sfmmu_mlist_exit(kmutex_t *mml) 10416 { 10417 mutex_exit(mml); 10418 } 10419 10420 int 10421 sfmmu_mlist_held(struct page *pp) 10422 { 10423 10424 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 10425 } 10426 10427 /* 10428 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 10429 * sfmmu_mlist_enter() case mml_table lock array is used and for 10430 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 10431 * 10432 * The lock is taken on a root page so that it protects an operation on all 10433 * constituent pages of a large page pp belongs to. 10434 * 10435 * The routine takes a lock from the appropriate array. The lock is determined 10436 * by hashing the root page. After taking the lock this routine checks if the 10437 * root page has the same size code that was used to determine the root (i.e 10438 * that root hasn't changed). If root page has the expected p_szc field we 10439 * have the right lock and it's returned to the caller. If root's p_szc 10440 * decreased we release the lock and retry from the beginning. This case can 10441 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 10442 * value and taking the lock. The number of retries due to p_szc decrease is 10443 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 10444 * determined by hashing pp itself. 10445 * 10446 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 10447 * possible that p_szc can increase. To increase p_szc a thread has to lock 10448 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 10449 * callers that don't hold a page locked recheck if hmeblk through which pp 10450 * was found still maps this pp. If it doesn't map it anymore returned lock 10451 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 10452 * p_szc increase after taking the lock it returns this lock without further 10453 * retries because in this case the caller doesn't care about which lock was 10454 * taken. The caller will drop it right away. 10455 * 10456 * After the routine returns it's guaranteed that hat_page_demote() can't 10457 * change p_szc field of any of constituent pages of a large page pp belongs 10458 * to as long as pp was either locked at least SHARED prior to this call or 10459 * the caller finds that hment that pointed to this pp still references this 10460 * pp (this also assumes that the caller holds hme hash bucket lock so that 10461 * the same pp can't be remapped into the same hmeblk after it was unmapped by 10462 * hat_pageunload()). 10463 */ 10464 static kmutex_t * 10465 sfmmu_mlspl_enter(struct page *pp, int type) 10466 { 10467 kmutex_t *mtx; 10468 uint_t prev_rszc = UINT_MAX; 10469 page_t *rootpp; 10470 uint_t szc; 10471 uint_t rszc; 10472 uint_t pszc = pp->p_szc; 10473 10474 ASSERT(pp != NULL); 10475 10476 again: 10477 if (pszc == 0) { 10478 mtx = SFMMU_MLSPL_MTX(type, pp); 10479 mutex_enter(mtx); 10480 return (mtx); 10481 } 10482 10483 /* The lock lives in the root page */ 10484 rootpp = PP_GROUPLEADER(pp, pszc); 10485 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10486 mutex_enter(mtx); 10487 10488 /* 10489 * Return mml in the following 3 cases: 10490 * 10491 * 1) If pp itself is root since if its p_szc decreased before we took 10492 * the lock pp is still the root of smaller szc page. And if its p_szc 10493 * increased it doesn't matter what lock we return (see comment in 10494 * front of this routine). 10495 * 10496 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 10497 * large page we have the right lock since any previous potential 10498 * hat_page_demote() is done demoting from greater than current root's 10499 * p_szc because hat_page_demote() changes root's p_szc last. No 10500 * further hat_page_demote() can start or be in progress since it 10501 * would need the same lock we currently hold. 10502 * 10503 * 3) If rootpp's p_szc increased since previous iteration it doesn't 10504 * matter what lock we return (see comment in front of this routine). 10505 */ 10506 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 10507 rszc >= prev_rszc) { 10508 return (mtx); 10509 } 10510 10511 /* 10512 * hat_page_demote() could have decreased root's p_szc. 10513 * In this case pp's p_szc must also be smaller than pszc. 10514 * Retry. 10515 */ 10516 if (rszc < pszc) { 10517 szc = pp->p_szc; 10518 if (szc < pszc) { 10519 mutex_exit(mtx); 10520 pszc = szc; 10521 goto again; 10522 } 10523 /* 10524 * pp's p_szc increased after it was decreased. 10525 * page cannot be mapped. Return current lock. The caller 10526 * will drop it right away. 10527 */ 10528 return (mtx); 10529 } 10530 10531 /* 10532 * root's p_szc is greater than pp's p_szc. 10533 * hat_page_demote() is not done with all pages 10534 * yet. Wait for it to complete. 10535 */ 10536 mutex_exit(mtx); 10537 rootpp = PP_GROUPLEADER(rootpp, rszc); 10538 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10539 mutex_enter(mtx); 10540 mutex_exit(mtx); 10541 prev_rszc = rszc; 10542 goto again; 10543 } 10544 10545 static int 10546 sfmmu_mlspl_held(struct page *pp, int type) 10547 { 10548 kmutex_t *mtx; 10549 10550 ASSERT(pp != NULL); 10551 /* The lock lives in the root page */ 10552 pp = PP_PAGEROOT(pp); 10553 ASSERT(pp != NULL); 10554 10555 mtx = SFMMU_MLSPL_MTX(type, pp); 10556 return (MUTEX_HELD(mtx)); 10557 } 10558 10559 static uint_t 10560 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 10561 { 10562 struct hme_blk *hblkp; 10563 10564 10565 if (freehblkp != NULL) { 10566 mutex_enter(&freehblkp_lock); 10567 if (freehblkp != NULL) { 10568 /* 10569 * If the current thread is owning hblk_reserve OR 10570 * critical request from sfmmu_hblk_steal() 10571 * let it succeed even if freehblkcnt is really low. 10572 */ 10573 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 10574 SFMMU_STAT(sf_get_free_throttle); 10575 mutex_exit(&freehblkp_lock); 10576 return (0); 10577 } 10578 freehblkcnt--; 10579 *hmeblkpp = freehblkp; 10580 hblkp = *hmeblkpp; 10581 freehblkp = hblkp->hblk_next; 10582 mutex_exit(&freehblkp_lock); 10583 hblkp->hblk_next = NULL; 10584 SFMMU_STAT(sf_get_free_success); 10585 10586 ASSERT(hblkp->hblk_hmecnt == 0); 10587 ASSERT(hblkp->hblk_vcnt == 0); 10588 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp)); 10589 10590 return (1); 10591 } 10592 mutex_exit(&freehblkp_lock); 10593 } 10594 10595 /* Check cpu hblk pending queues */ 10596 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) { 10597 hblkp = *hmeblkpp; 10598 hblkp->hblk_next = NULL; 10599 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp); 10600 10601 ASSERT(hblkp->hblk_hmecnt == 0); 10602 ASSERT(hblkp->hblk_vcnt == 0); 10603 10604 return (1); 10605 } 10606 10607 SFMMU_STAT(sf_get_free_fail); 10608 return (0); 10609 } 10610 10611 static uint_t 10612 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 10613 { 10614 struct hme_blk *hblkp; 10615 10616 ASSERT(hmeblkp->hblk_hmecnt == 0); 10617 ASSERT(hmeblkp->hblk_vcnt == 0); 10618 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10619 10620 /* 10621 * If the current thread is mapping into kernel space, 10622 * let it succede even if freehblkcnt is max 10623 * so that it will avoid freeing it to kmem. 10624 * This will prevent stack overflow due to 10625 * possible recursion since kmem_cache_free() 10626 * might require creation of a slab which 10627 * in turn needs an hmeblk to map that slab; 10628 * let's break this vicious chain at the first 10629 * opportunity. 10630 */ 10631 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10632 mutex_enter(&freehblkp_lock); 10633 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10634 SFMMU_STAT(sf_put_free_success); 10635 freehblkcnt++; 10636 hmeblkp->hblk_next = freehblkp; 10637 freehblkp = hmeblkp; 10638 mutex_exit(&freehblkp_lock); 10639 return (1); 10640 } 10641 mutex_exit(&freehblkp_lock); 10642 } 10643 10644 /* 10645 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 10646 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 10647 * we are not in the process of mapping into kernel space. 10648 */ 10649 ASSERT(!critical); 10650 while (freehblkcnt > HBLK_RESERVE_CNT) { 10651 mutex_enter(&freehblkp_lock); 10652 if (freehblkcnt > HBLK_RESERVE_CNT) { 10653 freehblkcnt--; 10654 hblkp = freehblkp; 10655 freehblkp = hblkp->hblk_next; 10656 mutex_exit(&freehblkp_lock); 10657 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 10658 kmem_cache_free(sfmmu8_cache, hblkp); 10659 continue; 10660 } 10661 mutex_exit(&freehblkp_lock); 10662 } 10663 SFMMU_STAT(sf_put_free_fail); 10664 return (0); 10665 } 10666 10667 static void 10668 sfmmu_hblk_swap(struct hme_blk *new) 10669 { 10670 struct hme_blk *old, *hblkp, *prev; 10671 uint64_t newpa; 10672 caddr_t base, vaddr, endaddr; 10673 struct hmehash_bucket *hmebp; 10674 struct sf_hment *osfhme, *nsfhme; 10675 page_t *pp; 10676 kmutex_t *pml; 10677 tte_t tte; 10678 struct hme_blk *list = NULL; 10679 10680 #ifdef DEBUG 10681 hmeblk_tag hblktag; 10682 struct hme_blk *found; 10683 #endif 10684 old = HBLK_RESERVE; 10685 ASSERT(!old->hblk_shared); 10686 10687 /* 10688 * save pa before bcopy clobbers it 10689 */ 10690 newpa = new->hblk_nextpa; 10691 10692 base = (caddr_t)get_hblk_base(old); 10693 endaddr = base + get_hblk_span(old); 10694 10695 /* 10696 * acquire hash bucket lock. 10697 */ 10698 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K, 10699 SFMMU_INVALID_SHMERID); 10700 10701 /* 10702 * copy contents from old to new 10703 */ 10704 bcopy((void *)old, (void *)new, HME8BLK_SZ); 10705 10706 /* 10707 * add new to hash chain 10708 */ 10709 sfmmu_hblk_hash_add(hmebp, new, newpa); 10710 10711 /* 10712 * search hash chain for hblk_reserve; this needs to be performed 10713 * after adding new, otherwise prev won't correspond to the hblk which 10714 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to 10715 * remove old later. 10716 */ 10717 for (prev = NULL, 10718 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old; 10719 prev = hblkp, hblkp = hblkp->hblk_next) 10720 ; 10721 10722 if (hblkp != old) 10723 panic("sfmmu_hblk_swap: hblk_reserve not found"); 10724 10725 /* 10726 * p_mapping list is still pointing to hments in hblk_reserve; 10727 * fix up p_mapping list so that they point to hments in new. 10728 * 10729 * Since all these mappings are created by hblk_reserve_thread 10730 * on the way and it's using at least one of the buffers from each of 10731 * the newly minted slabs, there is no danger of any of these 10732 * mappings getting unloaded by another thread. 10733 * 10734 * tsbmiss could only modify ref/mod bits of hments in old/new. 10735 * Since all of these hments hold mappings established by segkmem 10736 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 10737 * have no meaning for the mappings in hblk_reserve. hments in 10738 * old and new are identical except for ref/mod bits. 10739 */ 10740 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 10741 10742 HBLKTOHME(osfhme, old, vaddr); 10743 sfmmu_copytte(&osfhme->hme_tte, &tte); 10744 10745 if (TTE_IS_VALID(&tte)) { 10746 if ((pp = osfhme->hme_page) == NULL) 10747 panic("sfmmu_hblk_swap: page not mapped"); 10748 10749 pml = sfmmu_mlist_enter(pp); 10750 10751 if (pp != osfhme->hme_page) 10752 panic("sfmmu_hblk_swap: mapping changed"); 10753 10754 HBLKTOHME(nsfhme, new, vaddr); 10755 10756 HME_ADD(nsfhme, pp); 10757 HME_SUB(osfhme, pp); 10758 10759 sfmmu_mlist_exit(pml); 10760 } 10761 } 10762 10763 /* 10764 * remove old from hash chain 10765 */ 10766 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1); 10767 10768 #ifdef DEBUG 10769 10770 hblktag.htag_id = ksfmmup; 10771 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 10772 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 10773 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 10774 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 10775 10776 if (found != new) 10777 panic("sfmmu_hblk_swap: new hblk not found"); 10778 #endif 10779 10780 SFMMU_HASH_UNLOCK(hmebp); 10781 10782 /* 10783 * Reset hblk_reserve 10784 */ 10785 bzero((void *)old, HME8BLK_SZ); 10786 old->hblk_nextpa = va_to_pa((caddr_t)old); 10787 } 10788 10789 /* 10790 * Grab the mlist mutex for both pages passed in. 10791 * 10792 * low and high will be returned as pointers to the mutexes for these pages. 10793 * low refers to the mutex residing in the lower bin of the mlist hash, while 10794 * high refers to the mutex residing in the higher bin of the mlist hash. This 10795 * is due to the locking order restrictions on the same thread grabbing 10796 * multiple mlist mutexes. The low lock must be acquired before the high lock. 10797 * 10798 * If both pages hash to the same mutex, only grab that single mutex, and 10799 * high will be returned as NULL 10800 * If the pages hash to different bins in the hash, grab the lower addressed 10801 * lock first and then the higher addressed lock in order to follow the locking 10802 * rules involved with the same thread grabbing multiple mlist mutexes. 10803 * low and high will both have non-NULL values. 10804 */ 10805 static void 10806 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 10807 kmutex_t **low, kmutex_t **high) 10808 { 10809 kmutex_t *mml_targ, *mml_repl; 10810 10811 /* 10812 * no need to do the dance around szc as in sfmmu_mlist_enter() 10813 * because this routine is only called by hat_page_relocate() and all 10814 * targ and repl pages are already locked EXCL so szc can't change. 10815 */ 10816 10817 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 10818 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 10819 10820 if (mml_targ == mml_repl) { 10821 *low = mml_targ; 10822 *high = NULL; 10823 } else { 10824 if (mml_targ < mml_repl) { 10825 *low = mml_targ; 10826 *high = mml_repl; 10827 } else { 10828 *low = mml_repl; 10829 *high = mml_targ; 10830 } 10831 } 10832 10833 mutex_enter(*low); 10834 if (*high) 10835 mutex_enter(*high); 10836 } 10837 10838 static void 10839 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 10840 { 10841 if (high) 10842 mutex_exit(high); 10843 mutex_exit(low); 10844 } 10845 10846 static hatlock_t * 10847 sfmmu_hat_enter(sfmmu_t *sfmmup) 10848 { 10849 hatlock_t *hatlockp; 10850 10851 if (sfmmup != ksfmmup) { 10852 hatlockp = TSB_HASH(sfmmup); 10853 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 10854 return (hatlockp); 10855 } 10856 return (NULL); 10857 } 10858 10859 static hatlock_t * 10860 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 10861 { 10862 hatlock_t *hatlockp; 10863 10864 if (sfmmup != ksfmmup) { 10865 hatlockp = TSB_HASH(sfmmup); 10866 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 10867 return (NULL); 10868 return (hatlockp); 10869 } 10870 return (NULL); 10871 } 10872 10873 static void 10874 sfmmu_hat_exit(hatlock_t *hatlockp) 10875 { 10876 if (hatlockp != NULL) 10877 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 10878 } 10879 10880 static void 10881 sfmmu_hat_lock_all(void) 10882 { 10883 int i; 10884 for (i = 0; i < SFMMU_NUM_LOCK; i++) 10885 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 10886 } 10887 10888 static void 10889 sfmmu_hat_unlock_all(void) 10890 { 10891 int i; 10892 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 10893 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 10894 } 10895 10896 int 10897 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 10898 { 10899 ASSERT(sfmmup != ksfmmup); 10900 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 10901 } 10902 10903 /* 10904 * Locking primitives to provide consistency between ISM unmap 10905 * and other operations. Since ISM unmap can take a long time, we 10906 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 10907 * contention on the hatlock buckets while ISM segments are being 10908 * unmapped. The tradeoff is that the flags don't prevent priority 10909 * inversion from occurring, so we must request kernel priority in 10910 * case we have to sleep to keep from getting buried while holding 10911 * the HAT_ISMBUSY flag set, which in turn could block other kernel 10912 * threads from running (for example, in sfmmu_uvatopfn()). 10913 */ 10914 static void 10915 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 10916 { 10917 hatlock_t *hatlockp; 10918 10919 THREAD_KPRI_REQUEST(); 10920 if (!hatlock_held) 10921 hatlockp = sfmmu_hat_enter(sfmmup); 10922 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 10923 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10924 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 10925 if (!hatlock_held) 10926 sfmmu_hat_exit(hatlockp); 10927 } 10928 10929 static void 10930 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 10931 { 10932 hatlock_t *hatlockp; 10933 10934 if (!hatlock_held) 10935 hatlockp = sfmmu_hat_enter(sfmmup); 10936 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 10937 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 10938 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10939 if (!hatlock_held) 10940 sfmmu_hat_exit(hatlockp); 10941 THREAD_KPRI_RELEASE(); 10942 } 10943 10944 /* 10945 * 10946 * Algorithm: 10947 * 10948 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 10949 * hblks. 10950 * 10951 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 10952 * 10953 * (a) try to return an hblk from reserve pool of free hblks; 10954 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 10955 * and return hblk_reserve. 10956 * 10957 * (3) call kmem_cache_alloc() to allocate hblk; 10958 * 10959 * (a) if hblk_reserve_lock is held by the current thread, 10960 * atomically replace hblk_reserve by the hblk that is 10961 * returned by kmem_cache_alloc; release hblk_reserve_lock 10962 * and call kmem_cache_alloc() again. 10963 * (b) if reserve pool is not full, add the hblk that is 10964 * returned by kmem_cache_alloc to reserve pool and 10965 * call kmem_cache_alloc again. 10966 * 10967 */ 10968 static struct hme_blk * 10969 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 10970 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 10971 uint_t flags, uint_t rid) 10972 { 10973 struct hme_blk *hmeblkp = NULL; 10974 struct hme_blk *newhblkp; 10975 struct hme_blk *shw_hblkp = NULL; 10976 struct kmem_cache *sfmmu_cache = NULL; 10977 uint64_t hblkpa; 10978 ulong_t index; 10979 uint_t owner; /* set to 1 if using hblk_reserve */ 10980 uint_t forcefree; 10981 int sleep; 10982 sf_srd_t *srdp; 10983 sf_region_t *rgnp; 10984 10985 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10986 ASSERT(hblktag.htag_rid == rid); 10987 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 10988 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 10989 IS_P2ALIGNED(vaddr, TTEBYTES(size))); 10990 10991 /* 10992 * If segkmem is not created yet, allocate from static hmeblks 10993 * created at the end of startup_modules(). See the block comment 10994 * in startup_modules() describing how we estimate the number of 10995 * static hmeblks that will be needed during re-map. 10996 */ 10997 if (!hblk_alloc_dynamic) { 10998 10999 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11000 11001 if (size == TTE8K) { 11002 index = nucleus_hblk8.index; 11003 if (index >= nucleus_hblk8.len) { 11004 /* 11005 * If we panic here, see startup_modules() to 11006 * make sure that we are calculating the 11007 * number of hblk8's that we need correctly. 11008 */ 11009 prom_panic("no nucleus hblk8 to allocate"); 11010 } 11011 hmeblkp = 11012 (struct hme_blk *)&nucleus_hblk8.list[index]; 11013 nucleus_hblk8.index++; 11014 SFMMU_STAT(sf_hblk8_nalloc); 11015 } else { 11016 index = nucleus_hblk1.index; 11017 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 11018 /* 11019 * If we panic here, see startup_modules(). 11020 * Most likely you need to update the 11021 * calculation of the number of hblk1 elements 11022 * that the kernel needs to boot. 11023 */ 11024 prom_panic("no nucleus hblk1 to allocate"); 11025 } 11026 hmeblkp = 11027 (struct hme_blk *)&nucleus_hblk1.list[index]; 11028 nucleus_hblk1.index++; 11029 SFMMU_STAT(sf_hblk1_nalloc); 11030 } 11031 11032 goto hblk_init; 11033 } 11034 11035 SFMMU_HASH_UNLOCK(hmebp); 11036 11037 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) { 11038 if (mmu_page_sizes == max_mmu_page_sizes) { 11039 if (size < TTE256M) 11040 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11041 size, flags); 11042 } else { 11043 if (size < TTE4M) 11044 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11045 size, flags); 11046 } 11047 } else if (SFMMU_IS_SHMERID_VALID(rid)) { 11048 /* 11049 * Shared hmes use per region bitmaps in rgn_hmeflag 11050 * rather than shadow hmeblks to keep track of the 11051 * mapping sizes which have been allocated for the region. 11052 * Here we cleanup old invalid hmeblks with this rid, 11053 * which may be left around by pageunload(). 11054 */ 11055 int ttesz; 11056 caddr_t va; 11057 caddr_t eva = vaddr + TTEBYTES(size); 11058 11059 ASSERT(sfmmup != KHATID); 11060 11061 srdp = sfmmup->sfmmu_srdp; 11062 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11063 rgnp = srdp->srd_hmergnp[rid]; 11064 ASSERT(rgnp != NULL && rgnp->rgn_id == rid); 11065 ASSERT(rgnp->rgn_refcnt != 0); 11066 ASSERT(size <= rgnp->rgn_pgszc); 11067 11068 ttesz = HBLK_MIN_TTESZ; 11069 do { 11070 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) { 11071 continue; 11072 } 11073 11074 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) { 11075 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz); 11076 } else if (ttesz < size) { 11077 for (va = vaddr; va < eva; 11078 va += TTEBYTES(ttesz)) { 11079 sfmmu_cleanup_rhblk(srdp, va, rid, 11080 ttesz); 11081 } 11082 } 11083 } while (++ttesz <= rgnp->rgn_pgszc); 11084 } 11085 11086 fill_hblk: 11087 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 11088 11089 if (owner && size == TTE8K) { 11090 11091 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11092 /* 11093 * We are really in a tight spot. We already own 11094 * hblk_reserve and we need another hblk. In anticipation 11095 * of this kind of scenario, we specifically set aside 11096 * HBLK_RESERVE_MIN number of hblks to be used exclusively 11097 * by owner of hblk_reserve. 11098 */ 11099 SFMMU_STAT(sf_hblk_recurse_cnt); 11100 11101 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 11102 panic("sfmmu_hblk_alloc: reserve list is empty"); 11103 11104 goto hblk_verify; 11105 } 11106 11107 ASSERT(!owner); 11108 11109 if ((flags & HAT_NO_KALLOC) == 0) { 11110 11111 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 11112 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 11113 11114 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 11115 hmeblkp = sfmmu_hblk_steal(size); 11116 } else { 11117 /* 11118 * if we are the owner of hblk_reserve, 11119 * swap hblk_reserve with hmeblkp and 11120 * start a fresh life. Hope things go 11121 * better this time. 11122 */ 11123 if (hblk_reserve_thread == curthread) { 11124 ASSERT(sfmmu_cache == sfmmu8_cache); 11125 sfmmu_hblk_swap(hmeblkp); 11126 hblk_reserve_thread = NULL; 11127 mutex_exit(&hblk_reserve_lock); 11128 goto fill_hblk; 11129 } 11130 /* 11131 * let's donate this hblk to our reserve list if 11132 * we are not mapping kernel range 11133 */ 11134 if (size == TTE8K && sfmmup != KHATID) { 11135 if (sfmmu_put_free_hblk(hmeblkp, 0)) 11136 goto fill_hblk; 11137 } 11138 } 11139 } else { 11140 /* 11141 * We are here to map the slab in sfmmu8_cache; let's 11142 * check if we could tap our reserve list; if successful, 11143 * this will avoid the pain of going thru sfmmu_hblk_swap 11144 */ 11145 SFMMU_STAT(sf_hblk_slab_cnt); 11146 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 11147 /* 11148 * let's start hblk_reserve dance 11149 */ 11150 SFMMU_STAT(sf_hblk_reserve_cnt); 11151 owner = 1; 11152 mutex_enter(&hblk_reserve_lock); 11153 hmeblkp = HBLK_RESERVE; 11154 hblk_reserve_thread = curthread; 11155 } 11156 } 11157 11158 hblk_verify: 11159 ASSERT(hmeblkp != NULL); 11160 set_hblk_sz(hmeblkp, size); 11161 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 11162 SFMMU_HASH_LOCK(hmebp); 11163 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11164 if (newhblkp != NULL) { 11165 SFMMU_HASH_UNLOCK(hmebp); 11166 if (hmeblkp != HBLK_RESERVE) { 11167 /* 11168 * This is really tricky! 11169 * 11170 * vmem_alloc(vmem_seg_arena) 11171 * vmem_alloc(vmem_internal_arena) 11172 * segkmem_alloc(heap_arena) 11173 * vmem_alloc(heap_arena) 11174 * page_create() 11175 * hat_memload() 11176 * kmem_cache_free() 11177 * kmem_cache_alloc() 11178 * kmem_slab_create() 11179 * vmem_alloc(kmem_internal_arena) 11180 * segkmem_alloc(heap_arena) 11181 * vmem_alloc(heap_arena) 11182 * page_create() 11183 * hat_memload() 11184 * kmem_cache_free() 11185 * ... 11186 * 11187 * Thus, hat_memload() could call kmem_cache_free 11188 * for enough number of times that we could easily 11189 * hit the bottom of the stack or run out of reserve 11190 * list of vmem_seg structs. So, we must donate 11191 * this hblk to reserve list if it's allocated 11192 * from sfmmu8_cache *and* mapping kernel range. 11193 * We don't need to worry about freeing hmeblk1's 11194 * to kmem since they don't map any kmem slabs. 11195 * 11196 * Note: When segkmem supports largepages, we must 11197 * free hmeblk1's to reserve list as well. 11198 */ 11199 forcefree = (sfmmup == KHATID) ? 1 : 0; 11200 if (size == TTE8K && 11201 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 11202 goto re_verify; 11203 } 11204 ASSERT(sfmmup != KHATID); 11205 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11206 } else { 11207 /* 11208 * Hey! we don't need hblk_reserve any more. 11209 */ 11210 ASSERT(owner); 11211 hblk_reserve_thread = NULL; 11212 mutex_exit(&hblk_reserve_lock); 11213 owner = 0; 11214 } 11215 re_verify: 11216 /* 11217 * let's check if the goodies are still present 11218 */ 11219 SFMMU_HASH_LOCK(hmebp); 11220 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11221 if (newhblkp != NULL) { 11222 /* 11223 * return newhblkp if it's not hblk_reserve; 11224 * if newhblkp is hblk_reserve, return it 11225 * _only if_ we are the owner of hblk_reserve. 11226 */ 11227 if (newhblkp != HBLK_RESERVE || owner) { 11228 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11229 newhblkp->hblk_shared); 11230 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || 11231 !newhblkp->hblk_shared); 11232 return (newhblkp); 11233 } else { 11234 /* 11235 * we just hit hblk_reserve in the hash and 11236 * we are not the owner of that; 11237 * 11238 * block until hblk_reserve_thread completes 11239 * swapping hblk_reserve and try the dance 11240 * once again. 11241 */ 11242 SFMMU_HASH_UNLOCK(hmebp); 11243 mutex_enter(&hblk_reserve_lock); 11244 mutex_exit(&hblk_reserve_lock); 11245 SFMMU_STAT(sf_hblk_reserve_hit); 11246 goto fill_hblk; 11247 } 11248 } else { 11249 /* 11250 * it's no more! try the dance once again. 11251 */ 11252 SFMMU_HASH_UNLOCK(hmebp); 11253 goto fill_hblk; 11254 } 11255 } 11256 11257 hblk_init: 11258 if (SFMMU_IS_SHMERID_VALID(rid)) { 11259 uint16_t tteflag = 0x1 << 11260 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size); 11261 11262 if (!(rgnp->rgn_hmeflags & tteflag)) { 11263 atomic_or_16(&rgnp->rgn_hmeflags, tteflag); 11264 } 11265 hmeblkp->hblk_shared = 1; 11266 } else { 11267 hmeblkp->hblk_shared = 0; 11268 } 11269 set_hblk_sz(hmeblkp, size); 11270 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11271 hmeblkp->hblk_next = (struct hme_blk *)NULL; 11272 hmeblkp->hblk_tag = hblktag; 11273 hmeblkp->hblk_shadow = shw_hblkp; 11274 hblkpa = hmeblkp->hblk_nextpa; 11275 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 11276 11277 ASSERT(get_hblk_ttesz(hmeblkp) == size); 11278 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 11279 ASSERT(hmeblkp->hblk_hmecnt == 0); 11280 ASSERT(hmeblkp->hblk_vcnt == 0); 11281 ASSERT(hmeblkp->hblk_lckcnt == 0); 11282 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11283 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 11284 return (hmeblkp); 11285 } 11286 11287 /* 11288 * This function cleans up the hme_blk and returns it to the free list. 11289 */ 11290 /* ARGSUSED */ 11291 static void 11292 sfmmu_hblk_free(struct hme_blk **listp) 11293 { 11294 struct hme_blk *hmeblkp, *next_hmeblkp; 11295 int size; 11296 uint_t critical; 11297 uint64_t hblkpa; 11298 11299 ASSERT(*listp != NULL); 11300 11301 hmeblkp = *listp; 11302 while (hmeblkp != NULL) { 11303 next_hmeblkp = hmeblkp->hblk_next; 11304 ASSERT(!hmeblkp->hblk_hmecnt); 11305 ASSERT(!hmeblkp->hblk_vcnt); 11306 ASSERT(!hmeblkp->hblk_lckcnt); 11307 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 11308 ASSERT(hmeblkp->hblk_shared == 0); 11309 ASSERT(hmeblkp->hblk_shw_bit == 0); 11310 ASSERT(hmeblkp->hblk_shadow == NULL); 11311 11312 hblkpa = va_to_pa((caddr_t)hmeblkp); 11313 ASSERT(hblkpa != (uint64_t)-1); 11314 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 11315 11316 size = get_hblk_ttesz(hmeblkp); 11317 hmeblkp->hblk_next = NULL; 11318 hmeblkp->hblk_nextpa = hblkpa; 11319 11320 if (hmeblkp->hblk_nuc_bit == 0) { 11321 11322 if (size != TTE8K || 11323 !sfmmu_put_free_hblk(hmeblkp, critical)) 11324 kmem_cache_free(get_hblk_cache(hmeblkp), 11325 hmeblkp); 11326 } 11327 hmeblkp = next_hmeblkp; 11328 } 11329 } 11330 11331 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 11332 #define SFMMU_HBLK_STEAL_THRESHOLD 5 11333 11334 static uint_t sfmmu_hblk_steal_twice; 11335 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 11336 11337 /* 11338 * Steal a hmeblk from user or kernel hme hash lists. 11339 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 11340 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 11341 * tap into critical reserve of freehblkp. 11342 * Note: We remain looping in this routine until we find one. 11343 */ 11344 static struct hme_blk * 11345 sfmmu_hblk_steal(int size) 11346 { 11347 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 11348 struct hmehash_bucket *hmebp; 11349 struct hme_blk *hmeblkp = NULL, *pr_hblk; 11350 uint64_t hblkpa; 11351 int i; 11352 uint_t loop_cnt = 0, critical; 11353 11354 for (;;) { 11355 /* Check cpu hblk pending queues */ 11356 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) { 11357 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 11358 ASSERT(hmeblkp->hblk_hmecnt == 0); 11359 ASSERT(hmeblkp->hblk_vcnt == 0); 11360 return (hmeblkp); 11361 } 11362 11363 if (size == TTE8K) { 11364 critical = 11365 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 11366 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 11367 return (hmeblkp); 11368 } 11369 11370 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 11371 uhmehash_steal_hand; 11372 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 11373 11374 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 11375 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 11376 SFMMU_HASH_LOCK(hmebp); 11377 hmeblkp = hmebp->hmeblkp; 11378 hblkpa = hmebp->hmeh_nextpa; 11379 pr_hblk = NULL; 11380 while (hmeblkp) { 11381 /* 11382 * check if it is a hmeblk that is not locked 11383 * and not shared. skip shadow hmeblks with 11384 * shadow_mask set i.e valid count non zero. 11385 */ 11386 if ((get_hblk_ttesz(hmeblkp) == size) && 11387 (hmeblkp->hblk_shw_bit == 0 || 11388 hmeblkp->hblk_vcnt == 0) && 11389 (hmeblkp->hblk_lckcnt == 0)) { 11390 /* 11391 * there is a high probability that we 11392 * will find a free one. search some 11393 * buckets for a free hmeblk initially 11394 * before unloading a valid hmeblk. 11395 */ 11396 if ((hmeblkp->hblk_vcnt == 0 && 11397 hmeblkp->hblk_hmecnt == 0) || (i >= 11398 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 11399 if (sfmmu_steal_this_hblk(hmebp, 11400 hmeblkp, hblkpa, pr_hblk)) { 11401 /* 11402 * Hblk is unloaded 11403 * successfully 11404 */ 11405 break; 11406 } 11407 } 11408 } 11409 pr_hblk = hmeblkp; 11410 hblkpa = hmeblkp->hblk_nextpa; 11411 hmeblkp = hmeblkp->hblk_next; 11412 } 11413 11414 SFMMU_HASH_UNLOCK(hmebp); 11415 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 11416 hmebp = uhme_hash; 11417 } 11418 uhmehash_steal_hand = hmebp; 11419 11420 if (hmeblkp != NULL) 11421 break; 11422 11423 /* 11424 * in the worst case, look for a free one in the kernel 11425 * hash table. 11426 */ 11427 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 11428 SFMMU_HASH_LOCK(hmebp); 11429 hmeblkp = hmebp->hmeblkp; 11430 hblkpa = hmebp->hmeh_nextpa; 11431 pr_hblk = NULL; 11432 while (hmeblkp) { 11433 /* 11434 * check if it is free hmeblk 11435 */ 11436 if ((get_hblk_ttesz(hmeblkp) == size) && 11437 (hmeblkp->hblk_lckcnt == 0) && 11438 (hmeblkp->hblk_vcnt == 0) && 11439 (hmeblkp->hblk_hmecnt == 0)) { 11440 if (sfmmu_steal_this_hblk(hmebp, 11441 hmeblkp, hblkpa, pr_hblk)) { 11442 break; 11443 } else { 11444 /* 11445 * Cannot fail since we have 11446 * hash lock. 11447 */ 11448 panic("fail to steal?"); 11449 } 11450 } 11451 11452 pr_hblk = hmeblkp; 11453 hblkpa = hmeblkp->hblk_nextpa; 11454 hmeblkp = hmeblkp->hblk_next; 11455 } 11456 11457 SFMMU_HASH_UNLOCK(hmebp); 11458 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 11459 hmebp = khme_hash; 11460 } 11461 11462 if (hmeblkp != NULL) 11463 break; 11464 sfmmu_hblk_steal_twice++; 11465 } 11466 return (hmeblkp); 11467 } 11468 11469 /* 11470 * This routine does real work to prepare a hblk to be "stolen" by 11471 * unloading the mappings, updating shadow counts .... 11472 * It returns 1 if the block is ready to be reused (stolen), or 0 11473 * means the block cannot be stolen yet- pageunload is still working 11474 * on this hblk. 11475 */ 11476 static int 11477 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11478 uint64_t hblkpa, struct hme_blk *pr_hblk) 11479 { 11480 int shw_size, vshift; 11481 struct hme_blk *shw_hblkp; 11482 caddr_t vaddr; 11483 uint_t shw_mask, newshw_mask; 11484 struct hme_blk *list = NULL; 11485 11486 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11487 11488 /* 11489 * check if the hmeblk is free, unload if necessary 11490 */ 11491 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11492 sfmmu_t *sfmmup; 11493 demap_range_t dmr; 11494 11495 sfmmup = hblktosfmmu(hmeblkp); 11496 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) { 11497 return (0); 11498 } 11499 DEMAP_RANGE_INIT(sfmmup, &dmr); 11500 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 11501 (caddr_t)get_hblk_base(hmeblkp), 11502 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 11503 DEMAP_RANGE_FLUSH(&dmr); 11504 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11505 /* 11506 * Pageunload is working on the same hblk. 11507 */ 11508 return (0); 11509 } 11510 11511 sfmmu_hblk_steal_unload_count++; 11512 } 11513 11514 ASSERT(hmeblkp->hblk_lckcnt == 0); 11515 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 11516 11517 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1); 11518 hmeblkp->hblk_nextpa = hblkpa; 11519 11520 shw_hblkp = hmeblkp->hblk_shadow; 11521 if (shw_hblkp) { 11522 ASSERT(!hmeblkp->hblk_shared); 11523 shw_size = get_hblk_ttesz(shw_hblkp); 11524 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11525 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11526 ASSERT(vshift < 8); 11527 /* 11528 * Atomically clear shadow mask bit 11529 */ 11530 do { 11531 shw_mask = shw_hblkp->hblk_shw_mask; 11532 ASSERT(shw_mask & (1 << vshift)); 11533 newshw_mask = shw_mask & ~(1 << vshift); 11534 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 11535 shw_mask, newshw_mask); 11536 } while (newshw_mask != shw_mask); 11537 hmeblkp->hblk_shadow = NULL; 11538 } 11539 11540 /* 11541 * remove shadow bit if we are stealing an unused shadow hmeblk. 11542 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 11543 * we are indeed allocating a shadow hmeblk. 11544 */ 11545 hmeblkp->hblk_shw_bit = 0; 11546 11547 if (hmeblkp->hblk_shared) { 11548 sf_srd_t *srdp; 11549 sf_region_t *rgnp; 11550 uint_t rid; 11551 11552 srdp = hblktosrd(hmeblkp); 11553 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11554 rid = hmeblkp->hblk_tag.htag_rid; 11555 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11556 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11557 rgnp = srdp->srd_hmergnp[rid]; 11558 ASSERT(rgnp != NULL); 11559 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11560 hmeblkp->hblk_shared = 0; 11561 } 11562 11563 sfmmu_hblk_steal_count++; 11564 SFMMU_STAT(sf_steal_count); 11565 11566 return (1); 11567 } 11568 11569 struct hme_blk * 11570 sfmmu_hmetohblk(struct sf_hment *sfhme) 11571 { 11572 struct hme_blk *hmeblkp; 11573 struct sf_hment *sfhme0; 11574 struct hme_blk *hblk_dummy = 0; 11575 11576 /* 11577 * No dummy sf_hments, please. 11578 */ 11579 ASSERT(sfhme->hme_tte.ll != 0); 11580 11581 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 11582 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 11583 (uintptr_t)&hblk_dummy->hblk_hme[0]); 11584 11585 return (hmeblkp); 11586 } 11587 11588 /* 11589 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 11590 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 11591 * KM_SLEEP allocation. 11592 * 11593 * Return 0 on success, -1 otherwise. 11594 */ 11595 static void 11596 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11597 { 11598 struct tsb_info *tsbinfop, *next; 11599 tsb_replace_rc_t rc; 11600 boolean_t gotfirst = B_FALSE; 11601 11602 ASSERT(sfmmup != ksfmmup); 11603 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11604 11605 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 11606 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11607 } 11608 11609 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11610 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 11611 } else { 11612 return; 11613 } 11614 11615 ASSERT(sfmmup->sfmmu_tsb != NULL); 11616 11617 /* 11618 * Loop over all tsbinfo's replacing them with ones that actually have 11619 * a TSB. If any of the replacements ever fail, bail out of the loop. 11620 */ 11621 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 11622 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 11623 next = tsbinfop->tsb_next; 11624 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 11625 hatlockp, TSB_SWAPIN); 11626 if (rc != TSB_SUCCESS) { 11627 break; 11628 } 11629 gotfirst = B_TRUE; 11630 } 11631 11632 switch (rc) { 11633 case TSB_SUCCESS: 11634 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11635 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11636 return; 11637 case TSB_LOSTRACE: 11638 break; 11639 case TSB_ALLOCFAIL: 11640 break; 11641 default: 11642 panic("sfmmu_replace_tsb returned unrecognized failure code " 11643 "%d", rc); 11644 } 11645 11646 /* 11647 * In this case, we failed to get one of our TSBs. If we failed to 11648 * get the first TSB, get one of minimum size (8KB). Walk the list 11649 * and throw away the tsbinfos, starting where the allocation failed; 11650 * we can get by with just one TSB as long as we don't leave the 11651 * SWAPPED tsbinfo structures lying around. 11652 */ 11653 tsbinfop = sfmmup->sfmmu_tsb; 11654 next = tsbinfop->tsb_next; 11655 tsbinfop->tsb_next = NULL; 11656 11657 sfmmu_hat_exit(hatlockp); 11658 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 11659 next = tsbinfop->tsb_next; 11660 sfmmu_tsbinfo_free(tsbinfop); 11661 } 11662 hatlockp = sfmmu_hat_enter(sfmmup); 11663 11664 /* 11665 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 11666 * pages. 11667 */ 11668 if (!gotfirst) { 11669 tsbinfop = sfmmup->sfmmu_tsb; 11670 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 11671 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 11672 ASSERT(rc == TSB_SUCCESS); 11673 } 11674 11675 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11676 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11677 } 11678 11679 static int 11680 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw) 11681 { 11682 ulong_t bix = 0; 11683 uint_t rid; 11684 sf_region_t *rgnp; 11685 11686 ASSERT(srdp != NULL); 11687 ASSERT(srdp->srd_refcnt != 0); 11688 11689 w <<= BT_ULSHIFT; 11690 while (bmw) { 11691 if (!(bmw & 0x1)) { 11692 bix++; 11693 bmw >>= 1; 11694 continue; 11695 } 11696 rid = w | bix; 11697 rgnp = srdp->srd_hmergnp[rid]; 11698 ASSERT(rgnp->rgn_refcnt > 0); 11699 ASSERT(rgnp->rgn_id == rid); 11700 if (addr < rgnp->rgn_saddr || 11701 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) { 11702 bix++; 11703 bmw >>= 1; 11704 } else { 11705 return (1); 11706 } 11707 } 11708 return (0); 11709 } 11710 11711 /* 11712 * Handle exceptions for low level tsb_handler. 11713 * 11714 * There are many scenarios that could land us here: 11715 * 11716 * If the context is invalid we land here. The context can be invalid 11717 * for 3 reasons: 1) we couldn't allocate a new context and now need to 11718 * perform a wrap around operation in order to allocate a new context. 11719 * 2) Context was invalidated to change pagesize programming 3) ISMs or 11720 * TSBs configuration is changeing for this process and we are forced into 11721 * here to do a syncronization operation. If the context is valid we can 11722 * be here from window trap hanlder. In this case just call trap to handle 11723 * the fault. 11724 * 11725 * Note that the process will run in INVALID_CONTEXT before 11726 * faulting into here and subsequently loading the MMU registers 11727 * (including the TSB base register) associated with this process. 11728 * For this reason, the trap handlers must all test for 11729 * INVALID_CONTEXT before attempting to access any registers other 11730 * than the context registers. 11731 */ 11732 void 11733 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 11734 { 11735 sfmmu_t *sfmmup, *shsfmmup; 11736 uint_t ctxtype; 11737 klwp_id_t lwp; 11738 char lwp_save_state; 11739 hatlock_t *hatlockp, *shatlockp; 11740 struct tsb_info *tsbinfop; 11741 struct tsbmiss *tsbmp; 11742 sf_scd_t *scdp; 11743 11744 SFMMU_STAT(sf_tsb_exceptions); 11745 SFMMU_MMU_STAT(mmu_tsb_exceptions); 11746 sfmmup = astosfmmu(curthread->t_procp->p_as); 11747 /* 11748 * note that in sun4u, tagacces register contains ctxnum 11749 * while sun4v passes ctxtype in the tagaccess register. 11750 */ 11751 ctxtype = tagaccess & TAGACC_CTX_MASK; 11752 11753 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 11754 ASSERT(sfmmup->sfmmu_ismhat == 0); 11755 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 11756 ctxtype == INVALID_CONTEXT); 11757 11758 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) { 11759 /* 11760 * We may land here because shme bitmap and pagesize 11761 * flags are updated lazily in tsbmiss area on other cpus. 11762 * If we detect here that tsbmiss area is out of sync with 11763 * sfmmu update it and retry the trapped instruction. 11764 * Otherwise call trap(). 11765 */ 11766 int ret = 0; 11767 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K); 11768 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK); 11769 11770 /* 11771 * Must set lwp state to LWP_SYS before 11772 * trying to acquire any adaptive lock 11773 */ 11774 lwp = ttolwp(curthread); 11775 ASSERT(lwp); 11776 lwp_save_state = lwp->lwp_state; 11777 lwp->lwp_state = LWP_SYS; 11778 11779 hatlockp = sfmmu_hat_enter(sfmmup); 11780 kpreempt_disable(); 11781 tsbmp = &tsbmiss_area[CPU->cpu_id]; 11782 ASSERT(sfmmup == tsbmp->usfmmup); 11783 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) & 11784 ~tteflag_mask) || 11785 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) & 11786 ~tteflag_mask)) { 11787 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags; 11788 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags; 11789 ret = 1; 11790 } 11791 if (sfmmup->sfmmu_srdp != NULL) { 11792 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap; 11793 ulong_t *tm = tsbmp->shmermap; 11794 ulong_t i; 11795 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 11796 ulong_t d = tm[i] ^ sm[i]; 11797 if (d) { 11798 if (d & sm[i]) { 11799 if (!ret && sfmmu_is_rgnva( 11800 sfmmup->sfmmu_srdp, 11801 addr, i, d & sm[i])) { 11802 ret = 1; 11803 } 11804 } 11805 tm[i] = sm[i]; 11806 } 11807 } 11808 } 11809 kpreempt_enable(); 11810 sfmmu_hat_exit(hatlockp); 11811 lwp->lwp_state = lwp_save_state; 11812 if (ret) { 11813 return; 11814 } 11815 } else if (ctxtype == INVALID_CONTEXT) { 11816 /* 11817 * First, make sure we come out of here with a valid ctx, 11818 * since if we don't get one we'll simply loop on the 11819 * faulting instruction. 11820 * 11821 * If the ISM mappings are changing, the TSB is relocated, 11822 * the process is swapped, the process is joining SCD or 11823 * leaving SCD or shared regions we serialize behind the 11824 * controlling thread with hat lock, sfmmu_flags and 11825 * sfmmu_tsb_cv condition variable. 11826 */ 11827 11828 /* 11829 * Must set lwp state to LWP_SYS before 11830 * trying to acquire any adaptive lock 11831 */ 11832 lwp = ttolwp(curthread); 11833 ASSERT(lwp); 11834 lwp_save_state = lwp->lwp_state; 11835 lwp->lwp_state = LWP_SYS; 11836 11837 hatlockp = sfmmu_hat_enter(sfmmup); 11838 retry: 11839 if ((scdp = sfmmup->sfmmu_scdp) != NULL) { 11840 shsfmmup = scdp->scd_sfmmup; 11841 ASSERT(shsfmmup != NULL); 11842 11843 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL; 11844 tsbinfop = tsbinfop->tsb_next) { 11845 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11846 /* drop the private hat lock */ 11847 sfmmu_hat_exit(hatlockp); 11848 /* acquire the shared hat lock */ 11849 shatlockp = sfmmu_hat_enter(shsfmmup); 11850 /* 11851 * recheck to see if anything changed 11852 * after we drop the private hat lock. 11853 */ 11854 if (sfmmup->sfmmu_scdp == scdp && 11855 shsfmmup == scdp->scd_sfmmup) { 11856 sfmmu_tsb_chk_reloc(shsfmmup, 11857 shatlockp); 11858 } 11859 sfmmu_hat_exit(shatlockp); 11860 hatlockp = sfmmu_hat_enter(sfmmup); 11861 goto retry; 11862 } 11863 } 11864 } 11865 11866 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 11867 tsbinfop = tsbinfop->tsb_next) { 11868 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11869 cv_wait(&sfmmup->sfmmu_tsb_cv, 11870 HATLOCK_MUTEXP(hatlockp)); 11871 goto retry; 11872 } 11873 } 11874 11875 /* 11876 * Wait for ISM maps to be updated. 11877 */ 11878 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 11879 cv_wait(&sfmmup->sfmmu_tsb_cv, 11880 HATLOCK_MUTEXP(hatlockp)); 11881 goto retry; 11882 } 11883 11884 /* Is this process joining an SCD? */ 11885 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11886 /* 11887 * Flush private TSB and setup shared TSB. 11888 * sfmmu_finish_join_scd() does not drop the 11889 * hat lock. 11890 */ 11891 sfmmu_finish_join_scd(sfmmup); 11892 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 11893 } 11894 11895 /* 11896 * If we're swapping in, get TSB(s). Note that we must do 11897 * this before we get a ctx or load the MMU state. Once 11898 * we swap in we have to recheck to make sure the TSB(s) and 11899 * ISM mappings didn't change while we slept. 11900 */ 11901 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11902 sfmmu_tsb_swapin(sfmmup, hatlockp); 11903 goto retry; 11904 } 11905 11906 sfmmu_get_ctx(sfmmup); 11907 11908 sfmmu_hat_exit(hatlockp); 11909 /* 11910 * Must restore lwp_state if not calling 11911 * trap() for further processing. Restore 11912 * it anyway. 11913 */ 11914 lwp->lwp_state = lwp_save_state; 11915 return; 11916 } 11917 trap(rp, (caddr_t)tagaccess, traptype, 0); 11918 } 11919 11920 static void 11921 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11922 { 11923 struct tsb_info *tp; 11924 11925 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11926 11927 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) { 11928 if (tp->tsb_flags & TSB_RELOC_FLAG) { 11929 cv_wait(&sfmmup->sfmmu_tsb_cv, 11930 HATLOCK_MUTEXP(hatlockp)); 11931 break; 11932 } 11933 } 11934 } 11935 11936 /* 11937 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 11938 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 11939 * rather than spinning to avoid send mondo timeouts with 11940 * interrupts enabled. When the lock is acquired it is immediately 11941 * released and we return back to sfmmu_vatopfn just after 11942 * the GET_TTE call. 11943 */ 11944 void 11945 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 11946 { 11947 struct page **pp; 11948 11949 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11950 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11951 } 11952 11953 /* 11954 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 11955 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 11956 * cross traps which cannot be handled while spinning in the 11957 * trap handlers. Simply enter and exit the kpr_suspendlock spin 11958 * mutex, which is held by the holder of the suspend bit, and then 11959 * retry the trapped instruction after unwinding. 11960 */ 11961 /*ARGSUSED*/ 11962 void 11963 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 11964 { 11965 ASSERT(curthread != kreloc_thread); 11966 mutex_enter(&kpr_suspendlock); 11967 mutex_exit(&kpr_suspendlock); 11968 } 11969 11970 /* 11971 * This routine could be optimized to reduce the number of xcalls by flushing 11972 * the entire TLBs if region reference count is above some threshold but the 11973 * tradeoff will depend on the size of the TLB. So for now flush the specific 11974 * page a context at a time. 11975 * 11976 * If uselocks is 0 then it's called after all cpus were captured and all the 11977 * hat locks were taken. In this case don't take the region lock by relying on 11978 * the order of list region update operations in hat_join_region(), 11979 * hat_leave_region() and hat_dup_region(). The ordering in those routines 11980 * guarantees that list is always forward walkable and reaches active sfmmus 11981 * regardless of where xc_attention() captures a cpu. 11982 */ 11983 cpuset_t 11984 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, 11985 struct hme_blk *hmeblkp, int uselocks) 11986 { 11987 sfmmu_t *sfmmup; 11988 cpuset_t cpuset; 11989 cpuset_t rcpuset; 11990 hatlock_t *hatlockp; 11991 uint_t rid = rgnp->rgn_id; 11992 sf_rgn_link_t *rlink; 11993 sf_scd_t *scdp; 11994 11995 ASSERT(hmeblkp->hblk_shared); 11996 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11997 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11998 11999 CPUSET_ZERO(rcpuset); 12000 if (uselocks) { 12001 mutex_enter(&rgnp->rgn_mutex); 12002 } 12003 sfmmup = rgnp->rgn_sfmmu_head; 12004 while (sfmmup != NULL) { 12005 if (uselocks) { 12006 hatlockp = sfmmu_hat_enter(sfmmup); 12007 } 12008 12009 /* 12010 * When an SCD is created the SCD hat is linked on the sfmmu 12011 * region lists for each hme region which is part of the 12012 * SCD. If we find an SCD hat, when walking these lists, 12013 * then we flush the shared TSBs, if we find a private hat, 12014 * which is part of an SCD, but where the region 12015 * is not part of the SCD then we flush the private TSBs. 12016 */ 12017 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12018 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 12019 scdp = sfmmup->sfmmu_scdp; 12020 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 12021 if (uselocks) { 12022 sfmmu_hat_exit(hatlockp); 12023 } 12024 goto next; 12025 } 12026 } 12027 12028 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12029 12030 kpreempt_disable(); 12031 cpuset = sfmmup->sfmmu_cpusran; 12032 CPUSET_AND(cpuset, cpu_ready_set); 12033 CPUSET_DEL(cpuset, CPU->cpu_id); 12034 SFMMU_XCALL_STATS(sfmmup); 12035 xt_some(cpuset, vtag_flushpage_tl1, 12036 (uint64_t)addr, (uint64_t)sfmmup); 12037 vtag_flushpage(addr, (uint64_t)sfmmup); 12038 if (uselocks) { 12039 sfmmu_hat_exit(hatlockp); 12040 } 12041 kpreempt_enable(); 12042 CPUSET_OR(rcpuset, cpuset); 12043 12044 next: 12045 /* LINTED: constant in conditional context */ 12046 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 12047 ASSERT(rlink != NULL); 12048 sfmmup = rlink->next; 12049 } 12050 if (uselocks) { 12051 mutex_exit(&rgnp->rgn_mutex); 12052 } 12053 return (rcpuset); 12054 } 12055 12056 /* 12057 * This routine takes an sfmmu pointer and the va for an adddress in an 12058 * ISM region as input and returns the corresponding region id in ism_rid. 12059 * The return value of 1 indicates that a region has been found and ism_rid 12060 * is valid, otherwise 0 is returned. 12061 */ 12062 static int 12063 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid) 12064 { 12065 ism_blk_t *ism_blkp; 12066 int i; 12067 ism_map_t *ism_map; 12068 #ifdef DEBUG 12069 struct hat *ism_hatid; 12070 #endif 12071 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12072 12073 ism_blkp = sfmmup->sfmmu_iblk; 12074 while (ism_blkp != NULL) { 12075 ism_map = ism_blkp->iblk_maps; 12076 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 12077 if ((va >= ism_start(ism_map[i])) && 12078 (va < ism_end(ism_map[i]))) { 12079 12080 *ism_rid = ism_map[i].imap_rid; 12081 #ifdef DEBUG 12082 ism_hatid = ism_map[i].imap_ismhat; 12083 ASSERT(ism_hatid == ism_sfmmup); 12084 ASSERT(ism_hatid->sfmmu_ismhat); 12085 #endif 12086 return (1); 12087 } 12088 } 12089 ism_blkp = ism_blkp->iblk_next; 12090 } 12091 return (0); 12092 } 12093 12094 /* 12095 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 12096 * This routine may be called with all cpu's captured. Therefore, the 12097 * caller is responsible for holding all locks and disabling kernel 12098 * preemption. 12099 */ 12100 /* ARGSUSED */ 12101 static void 12102 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 12103 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 12104 { 12105 cpuset_t cpuset; 12106 caddr_t va; 12107 ism_ment_t *ment; 12108 sfmmu_t *sfmmup; 12109 #ifdef VAC 12110 int vcolor; 12111 #endif 12112 12113 sf_scd_t *scdp; 12114 uint_t ism_rid; 12115 12116 ASSERT(!hmeblkp->hblk_shared); 12117 /* 12118 * Walk the ism_hat's mapping list and flush the page 12119 * from every hat sharing this ism_hat. This routine 12120 * may be called while all cpu's have been captured. 12121 * Therefore we can't attempt to grab any locks. For now 12122 * this means we will protect the ism mapping list under 12123 * a single lock which will be grabbed by the caller. 12124 * If hat_share/unshare scalibility becomes a performance 12125 * problem then we may need to re-think ism mapping list locking. 12126 */ 12127 ASSERT(ism_sfmmup->sfmmu_ismhat); 12128 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 12129 addr = addr - ISMID_STARTADDR; 12130 12131 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 12132 12133 sfmmup = ment->iment_hat; 12134 12135 va = ment->iment_base_va; 12136 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 12137 12138 /* 12139 * When an SCD is created the SCD hat is linked on the ism 12140 * mapping lists for each ISM segment which is part of the 12141 * SCD. If we find an SCD hat, when walking these lists, 12142 * then we flush the shared TSBs, if we find a private hat, 12143 * which is part of an SCD, but where the region 12144 * corresponding to this va is not part of the SCD then we 12145 * flush the private TSBs. 12146 */ 12147 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12148 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && 12149 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12150 if (!find_ism_rid(sfmmup, ism_sfmmup, va, 12151 &ism_rid)) { 12152 cmn_err(CE_PANIC, 12153 "can't find matching ISM rid!"); 12154 } 12155 12156 scdp = sfmmup->sfmmu_scdp; 12157 if (SFMMU_IS_ISMRID_VALID(ism_rid) && 12158 SF_RGNMAP_TEST(scdp->scd_ismregion_map, 12159 ism_rid)) { 12160 continue; 12161 } 12162 } 12163 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1); 12164 12165 cpuset = sfmmup->sfmmu_cpusran; 12166 CPUSET_AND(cpuset, cpu_ready_set); 12167 CPUSET_DEL(cpuset, CPU->cpu_id); 12168 SFMMU_XCALL_STATS(sfmmup); 12169 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 12170 (uint64_t)sfmmup); 12171 vtag_flushpage(va, (uint64_t)sfmmup); 12172 12173 #ifdef VAC 12174 /* 12175 * Flush D$ 12176 * When flushing D$ we must flush all 12177 * cpu's. See sfmmu_cache_flush(). 12178 */ 12179 if (cache_flush_flag == CACHE_FLUSH) { 12180 cpuset = cpu_ready_set; 12181 CPUSET_DEL(cpuset, CPU->cpu_id); 12182 12183 SFMMU_XCALL_STATS(sfmmup); 12184 vcolor = addr_to_vcolor(va); 12185 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12186 vac_flushpage(pfnum, vcolor); 12187 } 12188 #endif /* VAC */ 12189 } 12190 } 12191 12192 /* 12193 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 12194 * a particular virtual address and ctx. If noflush is set we do not 12195 * flush the TLB/TSB. This function may or may not be called with the 12196 * HAT lock held. 12197 */ 12198 static void 12199 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12200 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 12201 int hat_lock_held) 12202 { 12203 #ifdef VAC 12204 int vcolor; 12205 #endif 12206 cpuset_t cpuset; 12207 hatlock_t *hatlockp; 12208 12209 ASSERT(!hmeblkp->hblk_shared); 12210 12211 #if defined(lint) && !defined(VAC) 12212 pfnum = pfnum; 12213 cpu_flag = cpu_flag; 12214 cache_flush_flag = cache_flush_flag; 12215 #endif 12216 12217 /* 12218 * There is no longer a need to protect against ctx being 12219 * stolen here since we don't store the ctx in the TSB anymore. 12220 */ 12221 #ifdef VAC 12222 vcolor = addr_to_vcolor(addr); 12223 #endif 12224 12225 /* 12226 * We must hold the hat lock during the flush of TLB, 12227 * to avoid a race with sfmmu_invalidate_ctx(), where 12228 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12229 * causing TLB demap routine to skip flush on that MMU. 12230 * If the context on a MMU has already been set to 12231 * INVALID_CONTEXT, we just get an extra flush on 12232 * that MMU. 12233 */ 12234 if (!hat_lock_held && !tlb_noflush) 12235 hatlockp = sfmmu_hat_enter(sfmmup); 12236 12237 kpreempt_disable(); 12238 if (!tlb_noflush) { 12239 /* 12240 * Flush the TSB and TLB. 12241 */ 12242 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12243 12244 cpuset = sfmmup->sfmmu_cpusran; 12245 CPUSET_AND(cpuset, cpu_ready_set); 12246 CPUSET_DEL(cpuset, CPU->cpu_id); 12247 12248 SFMMU_XCALL_STATS(sfmmup); 12249 12250 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 12251 (uint64_t)sfmmup); 12252 12253 vtag_flushpage(addr, (uint64_t)sfmmup); 12254 } 12255 12256 if (!hat_lock_held && !tlb_noflush) 12257 sfmmu_hat_exit(hatlockp); 12258 12259 #ifdef VAC 12260 /* 12261 * Flush the D$ 12262 * 12263 * Even if the ctx is stolen, we need to flush the 12264 * cache. Our ctx stealer only flushes the TLBs. 12265 */ 12266 if (cache_flush_flag == CACHE_FLUSH) { 12267 if (cpu_flag & FLUSH_ALL_CPUS) { 12268 cpuset = cpu_ready_set; 12269 } else { 12270 cpuset = sfmmup->sfmmu_cpusran; 12271 CPUSET_AND(cpuset, cpu_ready_set); 12272 } 12273 CPUSET_DEL(cpuset, CPU->cpu_id); 12274 SFMMU_XCALL_STATS(sfmmup); 12275 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12276 vac_flushpage(pfnum, vcolor); 12277 } 12278 #endif /* VAC */ 12279 kpreempt_enable(); 12280 } 12281 12282 /* 12283 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 12284 * address and ctx. If noflush is set we do not currently do anything. 12285 * This function may or may not be called with the HAT lock held. 12286 */ 12287 static void 12288 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12289 int tlb_noflush, int hat_lock_held) 12290 { 12291 cpuset_t cpuset; 12292 hatlock_t *hatlockp; 12293 12294 ASSERT(!hmeblkp->hblk_shared); 12295 12296 /* 12297 * If the process is exiting we have nothing to do. 12298 */ 12299 if (tlb_noflush) 12300 return; 12301 12302 /* 12303 * Flush TSB. 12304 */ 12305 if (!hat_lock_held) 12306 hatlockp = sfmmu_hat_enter(sfmmup); 12307 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12308 12309 kpreempt_disable(); 12310 12311 cpuset = sfmmup->sfmmu_cpusran; 12312 CPUSET_AND(cpuset, cpu_ready_set); 12313 CPUSET_DEL(cpuset, CPU->cpu_id); 12314 12315 SFMMU_XCALL_STATS(sfmmup); 12316 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 12317 12318 vtag_flushpage(addr, (uint64_t)sfmmup); 12319 12320 if (!hat_lock_held) 12321 sfmmu_hat_exit(hatlockp); 12322 12323 kpreempt_enable(); 12324 12325 } 12326 12327 /* 12328 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 12329 * call handler that can flush a range of pages to save on xcalls. 12330 */ 12331 static int sfmmu_xcall_save; 12332 12333 /* 12334 * this routine is never used for demaping addresses backed by SRD hmeblks. 12335 */ 12336 static void 12337 sfmmu_tlb_range_demap(demap_range_t *dmrp) 12338 { 12339 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 12340 hatlock_t *hatlockp; 12341 cpuset_t cpuset; 12342 uint64_t sfmmu_pgcnt; 12343 pgcnt_t pgcnt = 0; 12344 int pgunload = 0; 12345 int dirtypg = 0; 12346 caddr_t addr = dmrp->dmr_addr; 12347 caddr_t eaddr; 12348 uint64_t bitvec = dmrp->dmr_bitvec; 12349 12350 ASSERT(bitvec & 1); 12351 12352 /* 12353 * Flush TSB and calculate number of pages to flush. 12354 */ 12355 while (bitvec != 0) { 12356 dirtypg = 0; 12357 /* 12358 * Find the first page to flush and then count how many 12359 * pages there are after it that also need to be flushed. 12360 * This way the number of TSB flushes is minimized. 12361 */ 12362 while ((bitvec & 1) == 0) { 12363 pgcnt++; 12364 addr += MMU_PAGESIZE; 12365 bitvec >>= 1; 12366 } 12367 while (bitvec & 1) { 12368 dirtypg++; 12369 bitvec >>= 1; 12370 } 12371 eaddr = addr + ptob(dirtypg); 12372 hatlockp = sfmmu_hat_enter(sfmmup); 12373 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 12374 sfmmu_hat_exit(hatlockp); 12375 pgunload += dirtypg; 12376 addr = eaddr; 12377 pgcnt += dirtypg; 12378 } 12379 12380 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 12381 if (sfmmup->sfmmu_free == 0) { 12382 addr = dmrp->dmr_addr; 12383 bitvec = dmrp->dmr_bitvec; 12384 12385 /* 12386 * make sure it has SFMMU_PGCNT_SHIFT bits only, 12387 * as it will be used to pack argument for xt_some 12388 */ 12389 ASSERT((pgcnt > 0) && 12390 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 12391 12392 /* 12393 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 12394 * the low 6 bits of sfmmup. This is doable since pgcnt 12395 * always >= 1. 12396 */ 12397 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 12398 sfmmu_pgcnt = (uint64_t)sfmmup | 12399 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 12400 12401 /* 12402 * We must hold the hat lock during the flush of TLB, 12403 * to avoid a race with sfmmu_invalidate_ctx(), where 12404 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12405 * causing TLB demap routine to skip flush on that MMU. 12406 * If the context on a MMU has already been set to 12407 * INVALID_CONTEXT, we just get an extra flush on 12408 * that MMU. 12409 */ 12410 hatlockp = sfmmu_hat_enter(sfmmup); 12411 kpreempt_disable(); 12412 12413 cpuset = sfmmup->sfmmu_cpusran; 12414 CPUSET_AND(cpuset, cpu_ready_set); 12415 CPUSET_DEL(cpuset, CPU->cpu_id); 12416 12417 SFMMU_XCALL_STATS(sfmmup); 12418 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 12419 sfmmu_pgcnt); 12420 12421 for (; bitvec != 0; bitvec >>= 1) { 12422 if (bitvec & 1) 12423 vtag_flushpage(addr, (uint64_t)sfmmup); 12424 addr += MMU_PAGESIZE; 12425 } 12426 kpreempt_enable(); 12427 sfmmu_hat_exit(hatlockp); 12428 12429 sfmmu_xcall_save += (pgunload-1); 12430 } 12431 dmrp->dmr_bitvec = 0; 12432 } 12433 12434 /* 12435 * In cases where we need to synchronize with TLB/TSB miss trap 12436 * handlers, _and_ need to flush the TLB, it's a lot easier to 12437 * throw away the context from the process than to do a 12438 * special song and dance to keep things consistent for the 12439 * handlers. 12440 * 12441 * Since the process suddenly ends up without a context and our caller 12442 * holds the hat lock, threads that fault after this function is called 12443 * will pile up on the lock. We can then do whatever we need to 12444 * atomically from the context of the caller. The first blocked thread 12445 * to resume executing will get the process a new context, and the 12446 * process will resume executing. 12447 * 12448 * One added advantage of this approach is that on MMUs that 12449 * support a "flush all" operation, we will delay the flush until 12450 * cnum wrap-around, and then flush the TLB one time. This 12451 * is rather rare, so it's a lot less expensive than making 8000 12452 * x-calls to flush the TLB 8000 times. 12453 * 12454 * A per-process (PP) lock is used to synchronize ctx allocations in 12455 * resume() and ctx invalidations here. 12456 */ 12457 static void 12458 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 12459 { 12460 cpuset_t cpuset; 12461 int cnum, currcnum; 12462 mmu_ctx_t *mmu_ctxp; 12463 int i; 12464 uint_t pstate_save; 12465 12466 SFMMU_STAT(sf_ctx_inv); 12467 12468 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12469 ASSERT(sfmmup != ksfmmup); 12470 12471 kpreempt_disable(); 12472 12473 mmu_ctxp = CPU_MMU_CTXP(CPU); 12474 ASSERT(mmu_ctxp); 12475 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 12476 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 12477 12478 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 12479 12480 pstate_save = sfmmu_disable_intrs(); 12481 12482 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 12483 /* set HAT cnum invalid across all context domains. */ 12484 for (i = 0; i < max_mmu_ctxdoms; i++) { 12485 12486 cnum = sfmmup->sfmmu_ctxs[i].cnum; 12487 if (cnum == INVALID_CONTEXT) { 12488 continue; 12489 } 12490 12491 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 12492 } 12493 membar_enter(); /* make sure globally visible to all CPUs */ 12494 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 12495 12496 sfmmu_enable_intrs(pstate_save); 12497 12498 cpuset = sfmmup->sfmmu_cpusran; 12499 CPUSET_DEL(cpuset, CPU->cpu_id); 12500 CPUSET_AND(cpuset, cpu_ready_set); 12501 if (!CPUSET_ISNULL(cpuset)) { 12502 SFMMU_XCALL_STATS(sfmmup); 12503 xt_some(cpuset, sfmmu_raise_tsb_exception, 12504 (uint64_t)sfmmup, INVALID_CONTEXT); 12505 xt_sync(cpuset); 12506 SFMMU_STAT(sf_tsb_raise_exception); 12507 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 12508 } 12509 12510 /* 12511 * If the hat to-be-invalidated is the same as the current 12512 * process on local CPU we need to invalidate 12513 * this CPU context as well. 12514 */ 12515 if ((sfmmu_getctx_sec() == currcnum) && 12516 (currcnum != INVALID_CONTEXT)) { 12517 /* sets shared context to INVALID too */ 12518 sfmmu_setctx_sec(INVALID_CONTEXT); 12519 sfmmu_clear_utsbinfo(); 12520 } 12521 12522 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID); 12523 12524 kpreempt_enable(); 12525 12526 /* 12527 * we hold the hat lock, so nobody should allocate a context 12528 * for us yet 12529 */ 12530 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 12531 } 12532 12533 #ifdef VAC 12534 /* 12535 * We need to flush the cache in all cpus. It is possible that 12536 * a process referenced a page as cacheable but has sinced exited 12537 * and cleared the mapping list. We still to flush it but have no 12538 * state so all cpus is the only alternative. 12539 */ 12540 void 12541 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 12542 { 12543 cpuset_t cpuset; 12544 12545 kpreempt_disable(); 12546 cpuset = cpu_ready_set; 12547 CPUSET_DEL(cpuset, CPU->cpu_id); 12548 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12549 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12550 xt_sync(cpuset); 12551 vac_flushpage(pfnum, vcolor); 12552 kpreempt_enable(); 12553 } 12554 12555 void 12556 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 12557 { 12558 cpuset_t cpuset; 12559 12560 ASSERT(vcolor >= 0); 12561 12562 kpreempt_disable(); 12563 cpuset = cpu_ready_set; 12564 CPUSET_DEL(cpuset, CPU->cpu_id); 12565 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12566 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 12567 xt_sync(cpuset); 12568 vac_flushcolor(vcolor, pfnum); 12569 kpreempt_enable(); 12570 } 12571 #endif /* VAC */ 12572 12573 /* 12574 * We need to prevent processes from accessing the TSB using a cached physical 12575 * address. It's alright if they try to access the TSB via virtual address 12576 * since they will just fault on that virtual address once the mapping has 12577 * been suspended. 12578 */ 12579 #pragma weak sendmondo_in_recover 12580 12581 /* ARGSUSED */ 12582 static int 12583 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 12584 { 12585 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12586 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12587 hatlock_t *hatlockp; 12588 sf_scd_t *scdp; 12589 12590 if (flags != HAT_PRESUSPEND) 12591 return (0); 12592 12593 /* 12594 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must 12595 * be a shared hat, then set SCD's tsbinfo's flag. 12596 * If tsb is not shared, sfmmup is a private hat, then set 12597 * its private tsbinfo's flag. 12598 */ 12599 hatlockp = sfmmu_hat_enter(sfmmup); 12600 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 12601 12602 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) { 12603 sfmmu_tsb_inv_ctx(sfmmup); 12604 sfmmu_hat_exit(hatlockp); 12605 } else { 12606 /* release lock on the shared hat */ 12607 sfmmu_hat_exit(hatlockp); 12608 /* sfmmup is a shared hat */ 12609 ASSERT(sfmmup->sfmmu_scdhat); 12610 scdp = sfmmup->sfmmu_scdp; 12611 ASSERT(scdp != NULL); 12612 /* get private hat from the scd list */ 12613 mutex_enter(&scdp->scd_mutex); 12614 sfmmup = scdp->scd_sf_list; 12615 while (sfmmup != NULL) { 12616 hatlockp = sfmmu_hat_enter(sfmmup); 12617 /* 12618 * We do not call sfmmu_tsb_inv_ctx here because 12619 * sendmondo_in_recover check is only needed for 12620 * sun4u. 12621 */ 12622 sfmmu_invalidate_ctx(sfmmup); 12623 sfmmu_hat_exit(hatlockp); 12624 sfmmup = sfmmup->sfmmu_scd_link.next; 12625 12626 } 12627 mutex_exit(&scdp->scd_mutex); 12628 } 12629 return (0); 12630 } 12631 12632 static void 12633 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup) 12634 { 12635 extern uint32_t sendmondo_in_recover; 12636 12637 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12638 12639 /* 12640 * For Cheetah+ Erratum 25: 12641 * Wait for any active recovery to finish. We can't risk 12642 * relocating the TSB of the thread running mondo_recover_proc() 12643 * since, if we did that, we would deadlock. The scenario we are 12644 * trying to avoid is as follows: 12645 * 12646 * THIS CPU RECOVER CPU 12647 * -------- ----------- 12648 * Begins recovery, walking through TSB 12649 * hat_pagesuspend() TSB TTE 12650 * TLB miss on TSB TTE, spins at TL1 12651 * xt_sync() 12652 * send_mondo_timeout() 12653 * mondo_recover_proc() 12654 * ((deadlocked)) 12655 * 12656 * The second half of the workaround is that mondo_recover_proc() 12657 * checks to see if the tsb_info has the RELOC flag set, and if it 12658 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 12659 * and hence avoiding the TLB miss that could result in a deadlock. 12660 */ 12661 if (&sendmondo_in_recover) { 12662 membar_enter(); /* make sure RELOC flag visible */ 12663 while (sendmondo_in_recover) { 12664 drv_usecwait(1); 12665 membar_consumer(); 12666 } 12667 } 12668 12669 sfmmu_invalidate_ctx(sfmmup); 12670 } 12671 12672 /* ARGSUSED */ 12673 static int 12674 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 12675 void *tsbinfo, pfn_t newpfn) 12676 { 12677 hatlock_t *hatlockp; 12678 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12679 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12680 12681 if (flags != HAT_POSTUNSUSPEND) 12682 return (0); 12683 12684 hatlockp = sfmmu_hat_enter(sfmmup); 12685 12686 SFMMU_STAT(sf_tsb_reloc); 12687 12688 /* 12689 * The process may have swapped out while we were relocating one 12690 * of its TSBs. If so, don't bother doing the setup since the 12691 * process can't be using the memory anymore. 12692 */ 12693 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 12694 ASSERT(va == tsbinfop->tsb_va); 12695 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 12696 12697 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 12698 sfmmu_inv_tsb(tsbinfop->tsb_va, 12699 TSB_BYTES(tsbinfop->tsb_szc)); 12700 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 12701 } 12702 } 12703 12704 membar_exit(); 12705 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 12706 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 12707 12708 sfmmu_hat_exit(hatlockp); 12709 12710 return (0); 12711 } 12712 12713 /* 12714 * Allocate and initialize a tsb_info structure. Note that we may or may not 12715 * allocate a TSB here, depending on the flags passed in. 12716 */ 12717 static int 12718 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 12719 uint_t flags, sfmmu_t *sfmmup) 12720 { 12721 int err; 12722 12723 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 12724 sfmmu_tsbinfo_cache, KM_SLEEP); 12725 12726 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 12727 tsb_szc, flags, sfmmup)) != 0) { 12728 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 12729 SFMMU_STAT(sf_tsb_allocfail); 12730 *tsbinfopp = NULL; 12731 return (err); 12732 } 12733 SFMMU_STAT(sf_tsb_alloc); 12734 12735 /* 12736 * Bump the TSB size counters for this TSB size. 12737 */ 12738 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 12739 return (0); 12740 } 12741 12742 static void 12743 sfmmu_tsb_free(struct tsb_info *tsbinfo) 12744 { 12745 caddr_t tsbva = tsbinfo->tsb_va; 12746 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 12747 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 12748 vmem_t *vmp = tsbinfo->tsb_vmp; 12749 12750 /* 12751 * If we allocated this TSB from relocatable kernel memory, then we 12752 * need to uninstall the callback handler. 12753 */ 12754 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 12755 uintptr_t slab_mask; 12756 caddr_t slab_vaddr; 12757 page_t **ppl; 12758 int ret; 12759 12760 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena); 12761 if (tsb_size > MMU_PAGESIZE4M) 12762 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12763 else 12764 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12765 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 12766 12767 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 12768 ASSERT(ret == 0); 12769 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 12770 0, NULL); 12771 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 12772 } 12773 12774 if (kmem_cachep != NULL) { 12775 kmem_cache_free(kmem_cachep, tsbva); 12776 } else { 12777 vmem_xfree(vmp, (void *)tsbva, tsb_size); 12778 } 12779 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 12780 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 12781 } 12782 12783 static void 12784 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 12785 { 12786 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 12787 sfmmu_tsb_free(tsbinfo); 12788 } 12789 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 12790 12791 } 12792 12793 /* 12794 * Setup all the references to physical memory for this tsbinfo. 12795 * The underlying page(s) must be locked. 12796 */ 12797 static void 12798 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 12799 { 12800 ASSERT(pfn != PFN_INVALID); 12801 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 12802 12803 #ifndef sun4v 12804 if (tsbinfo->tsb_szc == 0) { 12805 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 12806 PROT_WRITE|PROT_READ, TTE8K); 12807 } else { 12808 /* 12809 * Round down PA and use a large mapping; the handlers will 12810 * compute the TSB pointer at the correct offset into the 12811 * big virtual page. NOTE: this assumes all TSBs larger 12812 * than 8K must come from physically contiguous slabs of 12813 * size tsb_slab_size. 12814 */ 12815 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 12816 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 12817 } 12818 tsbinfo->tsb_pa = ptob(pfn); 12819 12820 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 12821 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 12822 12823 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 12824 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 12825 #else /* sun4v */ 12826 tsbinfo->tsb_pa = ptob(pfn); 12827 #endif /* sun4v */ 12828 } 12829 12830 12831 /* 12832 * Returns zero on success, ENOMEM if over the high water mark, 12833 * or EAGAIN if the caller needs to retry with a smaller TSB 12834 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 12835 * 12836 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 12837 * is specified and the TSB requested is PAGESIZE, though it 12838 * may sleep waiting for memory if sufficient memory is not 12839 * available. 12840 */ 12841 static int 12842 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 12843 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 12844 { 12845 caddr_t vaddr = NULL; 12846 caddr_t slab_vaddr; 12847 uintptr_t slab_mask; 12848 int tsbbytes = TSB_BYTES(tsbcode); 12849 int lowmem = 0; 12850 struct kmem_cache *kmem_cachep = NULL; 12851 vmem_t *vmp = NULL; 12852 lgrp_id_t lgrpid = LGRP_NONE; 12853 pfn_t pfn; 12854 uint_t cbflags = HAC_SLEEP; 12855 page_t **pplist; 12856 int ret; 12857 12858 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena); 12859 if (tsbbytes > MMU_PAGESIZE4M) 12860 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12861 else 12862 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12863 12864 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 12865 flags |= TSB_ALLOC; 12866 12867 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 12868 12869 tsbinfo->tsb_sfmmu = sfmmup; 12870 12871 /* 12872 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 12873 * return. 12874 */ 12875 if ((flags & TSB_ALLOC) == 0) { 12876 tsbinfo->tsb_szc = tsbcode; 12877 tsbinfo->tsb_ttesz_mask = tteszmask; 12878 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 12879 tsbinfo->tsb_pa = -1; 12880 tsbinfo->tsb_tte.ll = 0; 12881 tsbinfo->tsb_next = NULL; 12882 tsbinfo->tsb_flags = TSB_SWAPPED; 12883 tsbinfo->tsb_cache = NULL; 12884 tsbinfo->tsb_vmp = NULL; 12885 return (0); 12886 } 12887 12888 #ifdef DEBUG 12889 /* 12890 * For debugging: 12891 * Randomly force allocation failures every tsb_alloc_mtbf 12892 * tries if TSB_FORCEALLOC is not specified. This will 12893 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 12894 * it is even, to allow testing of both failure paths... 12895 */ 12896 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 12897 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 12898 tsb_alloc_count = 0; 12899 tsb_alloc_fail_mtbf++; 12900 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 12901 } 12902 #endif /* DEBUG */ 12903 12904 /* 12905 * Enforce high water mark if we are not doing a forced allocation 12906 * and are not shrinking a process' TSB. 12907 */ 12908 if ((flags & TSB_SHRINK) == 0 && 12909 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 12910 if ((flags & TSB_FORCEALLOC) == 0) 12911 return (ENOMEM); 12912 lowmem = 1; 12913 } 12914 12915 /* 12916 * Allocate from the correct location based upon the size of the TSB 12917 * compared to the base page size, and what memory conditions dictate. 12918 * Note we always do nonblocking allocations from the TSB arena since 12919 * we don't want memory fragmentation to cause processes to block 12920 * indefinitely waiting for memory; until the kernel algorithms that 12921 * coalesce large pages are improved this is our best option. 12922 * 12923 * Algorithm: 12924 * If allocating a "large" TSB (>8K), allocate from the 12925 * appropriate kmem_tsb_default_arena vmem arena 12926 * else if low on memory or the TSB_FORCEALLOC flag is set or 12927 * tsb_forceheap is set 12928 * Allocate from kernel heap via sfmmu_tsb8k_cache with 12929 * KM_SLEEP (never fails) 12930 * else 12931 * Allocate from appropriate sfmmu_tsb_cache with 12932 * KM_NOSLEEP 12933 * endif 12934 */ 12935 if (tsb_lgrp_affinity) 12936 lgrpid = lgrp_home_id(curthread); 12937 if (lgrpid == LGRP_NONE) 12938 lgrpid = 0; /* use lgrp of boot CPU */ 12939 12940 if (tsbbytes > MMU_PAGESIZE) { 12941 if (tsbbytes > MMU_PAGESIZE4M) { 12942 vmp = kmem_bigtsb_default_arena[lgrpid]; 12943 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12944 0, 0, NULL, NULL, VM_NOSLEEP); 12945 } else { 12946 vmp = kmem_tsb_default_arena[lgrpid]; 12947 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12948 0, 0, NULL, NULL, VM_NOSLEEP); 12949 } 12950 #ifdef DEBUG 12951 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 12952 #else /* !DEBUG */ 12953 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 12954 #endif /* DEBUG */ 12955 kmem_cachep = sfmmu_tsb8k_cache; 12956 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 12957 ASSERT(vaddr != NULL); 12958 } else { 12959 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 12960 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 12961 } 12962 12963 tsbinfo->tsb_cache = kmem_cachep; 12964 tsbinfo->tsb_vmp = vmp; 12965 12966 if (vaddr == NULL) { 12967 return (EAGAIN); 12968 } 12969 12970 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 12971 kmem_cachep = tsbinfo->tsb_cache; 12972 12973 /* 12974 * If we are allocating from outside the cage, then we need to 12975 * register a relocation callback handler. Note that for now 12976 * since pseudo mappings always hang off of the slab's root page, 12977 * we need only lock the first 8K of the TSB slab. This is a bit 12978 * hacky but it is good for performance. 12979 */ 12980 if (kmem_cachep != sfmmu_tsb8k_cache) { 12981 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 12982 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 12983 ASSERT(ret == 0); 12984 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 12985 cbflags, (void *)tsbinfo, &pfn, NULL); 12986 12987 /* 12988 * Need to free up resources if we could not successfully 12989 * add the callback function and return an error condition. 12990 */ 12991 if (ret != 0) { 12992 if (kmem_cachep) { 12993 kmem_cache_free(kmem_cachep, vaddr); 12994 } else { 12995 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 12996 } 12997 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 12998 S_WRITE); 12999 return (EAGAIN); 13000 } 13001 } else { 13002 /* 13003 * Since allocation of 8K TSBs from heap is rare and occurs 13004 * during memory pressure we allocate them from permanent 13005 * memory rather than using callbacks to get the PFN. 13006 */ 13007 pfn = hat_getpfnum(kas.a_hat, vaddr); 13008 } 13009 13010 tsbinfo->tsb_va = vaddr; 13011 tsbinfo->tsb_szc = tsbcode; 13012 tsbinfo->tsb_ttesz_mask = tteszmask; 13013 tsbinfo->tsb_next = NULL; 13014 tsbinfo->tsb_flags = 0; 13015 13016 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 13017 13018 sfmmu_inv_tsb(vaddr, tsbbytes); 13019 13020 if (kmem_cachep != sfmmu_tsb8k_cache) { 13021 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 13022 } 13023 13024 return (0); 13025 } 13026 13027 /* 13028 * Initialize per cpu tsb and per cpu tsbmiss_area 13029 */ 13030 void 13031 sfmmu_init_tsbs(void) 13032 { 13033 int i; 13034 struct tsbmiss *tsbmissp; 13035 struct kpmtsbm *kpmtsbmp; 13036 #ifndef sun4v 13037 extern int dcache_line_mask; 13038 #endif /* sun4v */ 13039 extern uint_t vac_colors; 13040 13041 /* 13042 * Init. tsb miss area. 13043 */ 13044 tsbmissp = tsbmiss_area; 13045 13046 for (i = 0; i < NCPU; tsbmissp++, i++) { 13047 /* 13048 * initialize the tsbmiss area. 13049 * Do this for all possible CPUs as some may be added 13050 * while the system is running. There is no cost to this. 13051 */ 13052 tsbmissp->ksfmmup = ksfmmup; 13053 #ifndef sun4v 13054 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 13055 #endif /* sun4v */ 13056 tsbmissp->khashstart = 13057 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 13058 tsbmissp->uhashstart = 13059 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 13060 tsbmissp->khashsz = khmehash_num; 13061 tsbmissp->uhashsz = uhmehash_num; 13062 } 13063 13064 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 13065 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 13066 13067 if (kpm_enable == 0) 13068 return; 13069 13070 /* -- Begin KPM specific init -- */ 13071 13072 if (kpm_smallpages) { 13073 /* 13074 * If we're using base pagesize pages for seg_kpm 13075 * mappings, we use the kernel TSB since we can't afford 13076 * to allocate a second huge TSB for these mappings. 13077 */ 13078 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13079 kpm_tsbsz = ktsb_szcode; 13080 kpmsm_tsbbase = kpm_tsbbase; 13081 kpmsm_tsbsz = kpm_tsbsz; 13082 } else { 13083 /* 13084 * In VAC conflict case, just put the entries in the 13085 * kernel 8K indexed TSB for now so we can find them. 13086 * This could really be changed in the future if we feel 13087 * the need... 13088 */ 13089 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13090 kpmsm_tsbsz = ktsb_szcode; 13091 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 13092 kpm_tsbsz = ktsb4m_szcode; 13093 } 13094 13095 kpmtsbmp = kpmtsbm_area; 13096 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 13097 /* 13098 * Initialize the kpmtsbm area. 13099 * Do this for all possible CPUs as some may be added 13100 * while the system is running. There is no cost to this. 13101 */ 13102 kpmtsbmp->vbase = kpm_vbase; 13103 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 13104 kpmtsbmp->sz_shift = kpm_size_shift; 13105 kpmtsbmp->kpmp_shift = kpmp_shift; 13106 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 13107 if (kpm_smallpages == 0) { 13108 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 13109 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 13110 } else { 13111 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 13112 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 13113 } 13114 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 13115 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 13116 #ifdef DEBUG 13117 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 13118 #endif /* DEBUG */ 13119 if (ktsb_phys) 13120 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 13121 } 13122 13123 /* -- End KPM specific init -- */ 13124 } 13125 13126 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 13127 struct tsb_info ktsb_info[2]; 13128 13129 /* 13130 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 13131 */ 13132 void 13133 sfmmu_init_ktsbinfo() 13134 { 13135 ASSERT(ksfmmup != NULL); 13136 ASSERT(ksfmmup->sfmmu_tsb == NULL); 13137 /* 13138 * Allocate tsbinfos for kernel and copy in data 13139 * to make debug easier and sun4v setup easier. 13140 */ 13141 ktsb_info[0].tsb_sfmmu = ksfmmup; 13142 ktsb_info[0].tsb_szc = ktsb_szcode; 13143 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 13144 ktsb_info[0].tsb_va = ktsb_base; 13145 ktsb_info[0].tsb_pa = ktsb_pbase; 13146 ktsb_info[0].tsb_flags = 0; 13147 ktsb_info[0].tsb_tte.ll = 0; 13148 ktsb_info[0].tsb_cache = NULL; 13149 13150 ktsb_info[1].tsb_sfmmu = ksfmmup; 13151 ktsb_info[1].tsb_szc = ktsb4m_szcode; 13152 ktsb_info[1].tsb_ttesz_mask = TSB4M; 13153 ktsb_info[1].tsb_va = ktsb4m_base; 13154 ktsb_info[1].tsb_pa = ktsb4m_pbase; 13155 ktsb_info[1].tsb_flags = 0; 13156 ktsb_info[1].tsb_tte.ll = 0; 13157 ktsb_info[1].tsb_cache = NULL; 13158 13159 /* Link them into ksfmmup. */ 13160 ktsb_info[0].tsb_next = &ktsb_info[1]; 13161 ktsb_info[1].tsb_next = NULL; 13162 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 13163 13164 sfmmu_setup_tsbinfo(ksfmmup); 13165 } 13166 13167 /* 13168 * Cache the last value returned from va_to_pa(). If the VA specified 13169 * in the current call to cached_va_to_pa() maps to the same Page (as the 13170 * previous call to cached_va_to_pa()), then compute the PA using 13171 * cached info, else call va_to_pa(). 13172 * 13173 * Note: this function is neither MT-safe nor consistent in the presence 13174 * of multiple, interleaved threads. This function was created to enable 13175 * an optimization used during boot (at a point when there's only one thread 13176 * executing on the "boot CPU", and before startup_vm() has been called). 13177 */ 13178 static uint64_t 13179 cached_va_to_pa(void *vaddr) 13180 { 13181 static uint64_t prev_vaddr_base = 0; 13182 static uint64_t prev_pfn = 0; 13183 13184 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 13185 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 13186 } else { 13187 uint64_t pa = va_to_pa(vaddr); 13188 13189 if (pa != ((uint64_t)-1)) { 13190 /* 13191 * Computed physical address is valid. Cache its 13192 * related info for the next cached_va_to_pa() call. 13193 */ 13194 prev_pfn = pa & MMU_PAGEMASK; 13195 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 13196 } 13197 13198 return (pa); 13199 } 13200 } 13201 13202 /* 13203 * Carve up our nucleus hblk region. We may allocate more hblks than 13204 * asked due to rounding errors but we are guaranteed to have at least 13205 * enough space to allocate the requested number of hblk8's and hblk1's. 13206 */ 13207 void 13208 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 13209 { 13210 struct hme_blk *hmeblkp; 13211 size_t hme8blk_sz, hme1blk_sz; 13212 size_t i; 13213 size_t hblk8_bound; 13214 ulong_t j = 0, k = 0; 13215 13216 ASSERT(addr != NULL && size != 0); 13217 13218 /* Need to use proper structure alignment */ 13219 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 13220 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 13221 13222 nucleus_hblk8.list = (void *)addr; 13223 nucleus_hblk8.index = 0; 13224 13225 /* 13226 * Use as much memory as possible for hblk8's since we 13227 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 13228 * We need to hold back enough space for the hblk1's which 13229 * we'll allocate next. 13230 */ 13231 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 13232 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 13233 hmeblkp = (struct hme_blk *)addr; 13234 addr += hme8blk_sz; 13235 hmeblkp->hblk_nuc_bit = 1; 13236 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13237 } 13238 nucleus_hblk8.len = j; 13239 ASSERT(j >= nhblk8); 13240 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 13241 13242 nucleus_hblk1.list = (void *)addr; 13243 nucleus_hblk1.index = 0; 13244 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 13245 hmeblkp = (struct hme_blk *)addr; 13246 addr += hme1blk_sz; 13247 hmeblkp->hblk_nuc_bit = 1; 13248 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13249 } 13250 ASSERT(k >= nhblk1); 13251 nucleus_hblk1.len = k; 13252 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 13253 } 13254 13255 /* 13256 * This function is currently not supported on this platform. For what 13257 * it's supposed to do, see hat.c and hat_srmmu.c 13258 */ 13259 /* ARGSUSED */ 13260 faultcode_t 13261 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 13262 uint_t flags) 13263 { 13264 ASSERT(hat->sfmmu_xhat_provider == NULL); 13265 return (FC_NOSUPPORT); 13266 } 13267 13268 /* 13269 * Searchs the mapping list of the page for a mapping of the same size. If not 13270 * found the corresponding bit is cleared in the p_index field. When large 13271 * pages are more prevalent in the system, we can maintain the mapping list 13272 * in order and we don't have to traverse the list each time. Just check the 13273 * next and prev entries, and if both are of different size, we clear the bit. 13274 */ 13275 static void 13276 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 13277 { 13278 struct sf_hment *sfhmep; 13279 struct hme_blk *hmeblkp; 13280 int index; 13281 pgcnt_t npgs; 13282 13283 ASSERT(ttesz > TTE8K); 13284 13285 ASSERT(sfmmu_mlist_held(pp)); 13286 13287 ASSERT(PP_ISMAPPED_LARGE(pp)); 13288 13289 /* 13290 * Traverse mapping list looking for another mapping of same size. 13291 * since we only want to clear index field if all mappings of 13292 * that size are gone. 13293 */ 13294 13295 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 13296 if (IS_PAHME(sfhmep)) 13297 continue; 13298 hmeblkp = sfmmu_hmetohblk(sfhmep); 13299 if (hmeblkp->hblk_xhat_bit) 13300 continue; 13301 if (hme_size(sfhmep) == ttesz) { 13302 /* 13303 * another mapping of the same size. don't clear index. 13304 */ 13305 return; 13306 } 13307 } 13308 13309 /* 13310 * Clear the p_index bit for large page. 13311 */ 13312 index = PAGESZ_TO_INDEX(ttesz); 13313 npgs = TTEPAGES(ttesz); 13314 while (npgs-- > 0) { 13315 ASSERT(pp->p_index & index); 13316 pp->p_index &= ~index; 13317 pp = PP_PAGENEXT(pp); 13318 } 13319 } 13320 13321 /* 13322 * return supported features 13323 */ 13324 /* ARGSUSED */ 13325 int 13326 hat_supported(enum hat_features feature, void *arg) 13327 { 13328 switch (feature) { 13329 case HAT_SHARED_PT: 13330 case HAT_DYNAMIC_ISM_UNMAP: 13331 case HAT_VMODSORT: 13332 return (1); 13333 case HAT_SHARED_REGIONS: 13334 if (shctx_on) 13335 return (1); 13336 else 13337 return (0); 13338 default: 13339 return (0); 13340 } 13341 } 13342 13343 void 13344 hat_enter(struct hat *hat) 13345 { 13346 hatlock_t *hatlockp; 13347 13348 if (hat != ksfmmup) { 13349 hatlockp = TSB_HASH(hat); 13350 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 13351 } 13352 } 13353 13354 void 13355 hat_exit(struct hat *hat) 13356 { 13357 hatlock_t *hatlockp; 13358 13359 if (hat != ksfmmup) { 13360 hatlockp = TSB_HASH(hat); 13361 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 13362 } 13363 } 13364 13365 /*ARGSUSED*/ 13366 void 13367 hat_reserve(struct as *as, caddr_t addr, size_t len) 13368 { 13369 } 13370 13371 static void 13372 hat_kstat_init(void) 13373 { 13374 kstat_t *ksp; 13375 13376 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 13377 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 13378 KSTAT_FLAG_VIRTUAL); 13379 if (ksp) { 13380 ksp->ks_data = (void *) &sfmmu_global_stat; 13381 kstat_install(ksp); 13382 } 13383 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 13384 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 13385 KSTAT_FLAG_VIRTUAL); 13386 if (ksp) { 13387 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 13388 kstat_install(ksp); 13389 } 13390 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 13391 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 13392 KSTAT_FLAG_WRITABLE); 13393 if (ksp) { 13394 ksp->ks_update = sfmmu_kstat_percpu_update; 13395 kstat_install(ksp); 13396 } 13397 } 13398 13399 /* ARGSUSED */ 13400 static int 13401 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 13402 { 13403 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 13404 struct tsbmiss *tsbm = tsbmiss_area; 13405 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 13406 int i; 13407 13408 ASSERT(cpu_kstat); 13409 if (rw == KSTAT_READ) { 13410 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 13411 cpu_kstat->sf_itlb_misses = 0; 13412 cpu_kstat->sf_dtlb_misses = 0; 13413 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 13414 tsbm->uprot_traps; 13415 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 13416 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 13417 cpu_kstat->sf_tsb_hits = 0; 13418 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 13419 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 13420 } 13421 } else { 13422 /* KSTAT_WRITE is used to clear stats */ 13423 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 13424 tsbm->utsb_misses = 0; 13425 tsbm->ktsb_misses = 0; 13426 tsbm->uprot_traps = 0; 13427 tsbm->kprot_traps = 0; 13428 kpmtsbm->kpm_dtlb_misses = 0; 13429 kpmtsbm->kpm_tsb_misses = 0; 13430 } 13431 } 13432 return (0); 13433 } 13434 13435 #ifdef DEBUG 13436 13437 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 13438 13439 /* 13440 * A tte checker. *orig_old is the value we read before cas. 13441 * *cur is the value returned by cas. 13442 * *new is the desired value when we do the cas. 13443 * 13444 * *hmeblkp is currently unused. 13445 */ 13446 13447 /* ARGSUSED */ 13448 void 13449 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 13450 { 13451 pfn_t i, j, k; 13452 int cpuid = CPU->cpu_id; 13453 13454 gorig[cpuid] = orig_old; 13455 gcur[cpuid] = cur; 13456 gnew[cpuid] = new; 13457 13458 #ifdef lint 13459 hmeblkp = hmeblkp; 13460 #endif 13461 13462 if (TTE_IS_VALID(orig_old)) { 13463 if (TTE_IS_VALID(cur)) { 13464 i = TTE_TO_TTEPFN(orig_old); 13465 j = TTE_TO_TTEPFN(cur); 13466 k = TTE_TO_TTEPFN(new); 13467 if (i != j) { 13468 /* remap error? */ 13469 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 13470 } 13471 13472 if (i != k) { 13473 /* remap error? */ 13474 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 13475 } 13476 } else { 13477 if (TTE_IS_VALID(new)) { 13478 panic("chk_tte: invalid cur? "); 13479 } 13480 13481 i = TTE_TO_TTEPFN(orig_old); 13482 k = TTE_TO_TTEPFN(new); 13483 if (i != k) { 13484 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 13485 } 13486 } 13487 } else { 13488 if (TTE_IS_VALID(cur)) { 13489 j = TTE_TO_TTEPFN(cur); 13490 if (TTE_IS_VALID(new)) { 13491 k = TTE_TO_TTEPFN(new); 13492 if (j != k) { 13493 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 13494 j, k); 13495 } 13496 } else { 13497 panic("chk_tte: why here?"); 13498 } 13499 } else { 13500 if (!TTE_IS_VALID(new)) { 13501 panic("chk_tte: why here2 ?"); 13502 } 13503 } 13504 } 13505 } 13506 13507 #endif /* DEBUG */ 13508 13509 extern void prefetch_tsbe_read(struct tsbe *); 13510 extern void prefetch_tsbe_write(struct tsbe *); 13511 13512 13513 /* 13514 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 13515 * us optimal performance on Cheetah+. You can only have 8 outstanding 13516 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 13517 * prefetch to make the most utilization of the prefetch capability. 13518 */ 13519 #define TSBE_PREFETCH_STRIDE (7) 13520 13521 void 13522 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 13523 { 13524 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 13525 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 13526 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 13527 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 13528 struct tsbe *old; 13529 struct tsbe *new; 13530 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 13531 uint64_t va; 13532 int new_offset; 13533 int i; 13534 int vpshift; 13535 int last_prefetch; 13536 13537 if (old_bytes == new_bytes) { 13538 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 13539 } else { 13540 13541 /* 13542 * A TSBE is 16 bytes which means there are four TSBE's per 13543 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 13544 */ 13545 old = (struct tsbe *)old_tsbinfo->tsb_va; 13546 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 13547 for (i = 0; i < old_entries; i++, old++) { 13548 if (((i & (4-1)) == 0) && (i < last_prefetch)) 13549 prefetch_tsbe_read(old); 13550 if (!old->tte_tag.tag_invalid) { 13551 /* 13552 * We have a valid TTE to remap. Check the 13553 * size. We won't remap 64K or 512K TTEs 13554 * because they span more than one TSB entry 13555 * and are indexed using an 8K virt. page. 13556 * Ditto for 32M and 256M TTEs. 13557 */ 13558 if (TTE_CSZ(&old->tte_data) == TTE64K || 13559 TTE_CSZ(&old->tte_data) == TTE512K) 13560 continue; 13561 if (mmu_page_sizes == max_mmu_page_sizes) { 13562 if (TTE_CSZ(&old->tte_data) == TTE32M || 13563 TTE_CSZ(&old->tte_data) == TTE256M) 13564 continue; 13565 } 13566 13567 /* clear the lower 22 bits of the va */ 13568 va = *(uint64_t *)old << 22; 13569 /* turn va into a virtual pfn */ 13570 va >>= 22 - TSB_START_SIZE; 13571 /* 13572 * or in bits from the offset in the tsb 13573 * to get the real virtual pfn. These 13574 * correspond to bits [21:13] in the va 13575 */ 13576 vpshift = 13577 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 13578 0x1ff; 13579 va |= (i << vpshift); 13580 va >>= vpshift; 13581 new_offset = va & (new_entries - 1); 13582 new = new_base + new_offset; 13583 prefetch_tsbe_write(new); 13584 *new = *old; 13585 } 13586 } 13587 } 13588 } 13589 13590 /* 13591 * unused in sfmmu 13592 */ 13593 void 13594 hat_dump(void) 13595 { 13596 } 13597 13598 /* 13599 * Called when a thread is exiting and we have switched to the kernel address 13600 * space. Perform the same VM initialization resume() uses when switching 13601 * processes. 13602 * 13603 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 13604 * we call it anyway in case the semantics change in the future. 13605 */ 13606 /*ARGSUSED*/ 13607 void 13608 hat_thread_exit(kthread_t *thd) 13609 { 13610 uint_t pgsz_cnum; 13611 uint_t pstate_save; 13612 13613 ASSERT(thd->t_procp->p_as == &kas); 13614 13615 pgsz_cnum = KCONTEXT; 13616 #ifdef sun4u 13617 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 13618 #endif 13619 13620 /* 13621 * Note that sfmmu_load_mmustate() is currently a no-op for 13622 * kernel threads. We need to disable interrupts here, 13623 * simply because otherwise sfmmu_load_mmustate() would panic 13624 * if the caller does not disable interrupts. 13625 */ 13626 pstate_save = sfmmu_disable_intrs(); 13627 13628 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */ 13629 sfmmu_setctx_sec(pgsz_cnum); 13630 sfmmu_load_mmustate(ksfmmup); 13631 sfmmu_enable_intrs(pstate_save); 13632 } 13633 13634 13635 /* 13636 * SRD support 13637 */ 13638 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \ 13639 (((uintptr_t)(vp)) >> 11)) & \ 13640 srd_hashmask) 13641 13642 /* 13643 * Attach the process to the srd struct associated with the exec vnode 13644 * from which the process is started. 13645 */ 13646 void 13647 hat_join_srd(struct hat *sfmmup, vnode_t *evp) 13648 { 13649 uint_t hash = SRD_HASH_FUNCTION(evp); 13650 sf_srd_t *srdp; 13651 sf_srd_t *newsrdp; 13652 13653 ASSERT(sfmmup != ksfmmup); 13654 ASSERT(sfmmup->sfmmu_srdp == NULL); 13655 13656 if (!shctx_on) { 13657 return; 13658 } 13659 13660 VN_HOLD(evp); 13661 13662 if (srd_buckets[hash].srdb_srdp != NULL) { 13663 mutex_enter(&srd_buckets[hash].srdb_lock); 13664 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13665 srdp = srdp->srd_hash) { 13666 if (srdp->srd_evp == evp) { 13667 ASSERT(srdp->srd_refcnt >= 0); 13668 sfmmup->sfmmu_srdp = srdp; 13669 atomic_add_32( 13670 (volatile uint_t *)&srdp->srd_refcnt, 1); 13671 mutex_exit(&srd_buckets[hash].srdb_lock); 13672 return; 13673 } 13674 } 13675 mutex_exit(&srd_buckets[hash].srdb_lock); 13676 } 13677 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP); 13678 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0); 13679 13680 newsrdp->srd_evp = evp; 13681 newsrdp->srd_refcnt = 1; 13682 newsrdp->srd_hmergnfree = NULL; 13683 newsrdp->srd_ismrgnfree = NULL; 13684 13685 mutex_enter(&srd_buckets[hash].srdb_lock); 13686 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13687 srdp = srdp->srd_hash) { 13688 if (srdp->srd_evp == evp) { 13689 ASSERT(srdp->srd_refcnt >= 0); 13690 sfmmup->sfmmu_srdp = srdp; 13691 atomic_add_32((volatile uint_t *)&srdp->srd_refcnt, 1); 13692 mutex_exit(&srd_buckets[hash].srdb_lock); 13693 kmem_cache_free(srd_cache, newsrdp); 13694 return; 13695 } 13696 } 13697 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp; 13698 srd_buckets[hash].srdb_srdp = newsrdp; 13699 sfmmup->sfmmu_srdp = newsrdp; 13700 13701 mutex_exit(&srd_buckets[hash].srdb_lock); 13702 13703 } 13704 13705 static void 13706 sfmmu_leave_srd(sfmmu_t *sfmmup) 13707 { 13708 vnode_t *evp; 13709 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13710 uint_t hash; 13711 sf_srd_t **prev_srdpp; 13712 sf_region_t *rgnp; 13713 sf_region_t *nrgnp; 13714 #ifdef DEBUG 13715 int rgns = 0; 13716 #endif 13717 int i; 13718 13719 ASSERT(sfmmup != ksfmmup); 13720 ASSERT(srdp != NULL); 13721 ASSERT(srdp->srd_refcnt > 0); 13722 ASSERT(sfmmup->sfmmu_scdp == NULL); 13723 ASSERT(sfmmup->sfmmu_free == 1); 13724 13725 sfmmup->sfmmu_srdp = NULL; 13726 evp = srdp->srd_evp; 13727 ASSERT(evp != NULL); 13728 if (atomic_add_32_nv( 13729 (volatile uint_t *)&srdp->srd_refcnt, -1)) { 13730 VN_RELE(evp); 13731 return; 13732 } 13733 13734 hash = SRD_HASH_FUNCTION(evp); 13735 mutex_enter(&srd_buckets[hash].srdb_lock); 13736 for (prev_srdpp = &srd_buckets[hash].srdb_srdp; 13737 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) { 13738 if (srdp->srd_evp == evp) { 13739 break; 13740 } 13741 } 13742 if (srdp == NULL || srdp->srd_refcnt) { 13743 mutex_exit(&srd_buckets[hash].srdb_lock); 13744 VN_RELE(evp); 13745 return; 13746 } 13747 *prev_srdpp = srdp->srd_hash; 13748 mutex_exit(&srd_buckets[hash].srdb_lock); 13749 13750 ASSERT(srdp->srd_refcnt == 0); 13751 VN_RELE(evp); 13752 13753 #ifdef DEBUG 13754 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) { 13755 ASSERT(srdp->srd_rgnhash[i] == NULL); 13756 } 13757 #endif /* DEBUG */ 13758 13759 /* free each hme regions in the srd */ 13760 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) { 13761 nrgnp = rgnp->rgn_next; 13762 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid); 13763 ASSERT(rgnp->rgn_refcnt == 0); 13764 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13765 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13766 ASSERT(rgnp->rgn_hmeflags == 0); 13767 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp); 13768 #ifdef DEBUG 13769 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13770 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13771 } 13772 rgns++; 13773 #endif /* DEBUG */ 13774 kmem_cache_free(region_cache, rgnp); 13775 } 13776 ASSERT(rgns == srdp->srd_next_hmerid); 13777 13778 #ifdef DEBUG 13779 rgns = 0; 13780 #endif 13781 /* free each ism rgns in the srd */ 13782 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) { 13783 nrgnp = rgnp->rgn_next; 13784 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid); 13785 ASSERT(rgnp->rgn_refcnt == 0); 13786 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13787 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13788 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp); 13789 #ifdef DEBUG 13790 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13791 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13792 } 13793 rgns++; 13794 #endif /* DEBUG */ 13795 kmem_cache_free(region_cache, rgnp); 13796 } 13797 ASSERT(rgns == srdp->srd_next_ismrid); 13798 ASSERT(srdp->srd_ismbusyrgns == 0); 13799 ASSERT(srdp->srd_hmebusyrgns == 0); 13800 13801 srdp->srd_next_ismrid = 0; 13802 srdp->srd_next_hmerid = 0; 13803 13804 bzero((void *)srdp->srd_ismrgnp, 13805 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS); 13806 bzero((void *)srdp->srd_hmergnp, 13807 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS); 13808 13809 ASSERT(srdp->srd_scdp == NULL); 13810 kmem_cache_free(srd_cache, srdp); 13811 } 13812 13813 /* ARGSUSED */ 13814 static int 13815 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags) 13816 { 13817 sf_srd_t *srdp = (sf_srd_t *)buf; 13818 bzero(buf, sizeof (*srdp)); 13819 13820 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL); 13821 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL); 13822 return (0); 13823 } 13824 13825 /* ARGSUSED */ 13826 static void 13827 sfmmu_srdcache_destructor(void *buf, void *cdrarg) 13828 { 13829 sf_srd_t *srdp = (sf_srd_t *)buf; 13830 13831 mutex_destroy(&srdp->srd_mutex); 13832 mutex_destroy(&srdp->srd_scd_mutex); 13833 } 13834 13835 /* 13836 * The caller makes sure hat_join_region()/hat_leave_region() can't be called 13837 * at the same time for the same process and address range. This is ensured by 13838 * the fact that address space is locked as writer when a process joins the 13839 * regions. Therefore there's no need to hold an srd lock during the entire 13840 * execution of hat_join_region()/hat_leave_region(). 13841 */ 13842 13843 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \ 13844 (((uintptr_t)(obj)) >> 11)) & \ 13845 srd_rgn_hashmask) 13846 /* 13847 * This routine implements the shared context functionality required when 13848 * attaching a segment to an address space. It must be called from 13849 * hat_share() for D(ISM) segments and from segvn_create() for segments 13850 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie 13851 * which is saved in the private segment data for hme segments and 13852 * the ism_map structure for ism segments. 13853 */ 13854 hat_region_cookie_t 13855 hat_join_region(struct hat *sfmmup, 13856 caddr_t r_saddr, 13857 size_t r_size, 13858 void *r_obj, 13859 u_offset_t r_objoff, 13860 uchar_t r_perm, 13861 uchar_t r_pgszc, 13862 hat_rgn_cb_func_t r_cb_function, 13863 uint_t flags) 13864 { 13865 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13866 uint_t rhash; 13867 uint_t rid; 13868 hatlock_t *hatlockp; 13869 sf_region_t *rgnp; 13870 sf_region_t *new_rgnp = NULL; 13871 int i; 13872 uint16_t *nextidp; 13873 sf_region_t **freelistp; 13874 int maxids; 13875 sf_region_t **rarrp; 13876 uint16_t *busyrgnsp; 13877 ulong_t rttecnt; 13878 uchar_t tteflag; 13879 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 13880 int text = (r_type == HAT_REGION_TEXT); 13881 13882 if (srdp == NULL || r_size == 0) { 13883 return (HAT_INVALID_REGION_COOKIE); 13884 } 13885 13886 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 13887 ASSERT(sfmmup != ksfmmup); 13888 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 13889 ASSERT(srdp->srd_refcnt > 0); 13890 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 13891 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 13892 ASSERT(r_pgszc < mmu_page_sizes); 13893 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) || 13894 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) { 13895 panic("hat_join_region: region addr or size is not aligned\n"); 13896 } 13897 13898 13899 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 13900 SFMMU_REGION_HME; 13901 /* 13902 * Currently only support shared hmes for the read only main text 13903 * region. 13904 */ 13905 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) || 13906 (r_perm & PROT_WRITE))) { 13907 return (HAT_INVALID_REGION_COOKIE); 13908 } 13909 13910 rhash = RGN_HASH_FUNCTION(r_obj); 13911 13912 if (r_type == SFMMU_REGION_ISM) { 13913 nextidp = &srdp->srd_next_ismrid; 13914 freelistp = &srdp->srd_ismrgnfree; 13915 maxids = SFMMU_MAX_ISM_REGIONS; 13916 rarrp = srdp->srd_ismrgnp; 13917 busyrgnsp = &srdp->srd_ismbusyrgns; 13918 } else { 13919 nextidp = &srdp->srd_next_hmerid; 13920 freelistp = &srdp->srd_hmergnfree; 13921 maxids = SFMMU_MAX_HME_REGIONS; 13922 rarrp = srdp->srd_hmergnp; 13923 busyrgnsp = &srdp->srd_hmebusyrgns; 13924 } 13925 13926 mutex_enter(&srdp->srd_mutex); 13927 13928 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 13929 rgnp = rgnp->rgn_hash) { 13930 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size && 13931 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff && 13932 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) { 13933 break; 13934 } 13935 } 13936 13937 rfound: 13938 if (rgnp != NULL) { 13939 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 13940 ASSERT(rgnp->rgn_cb_function == r_cb_function); 13941 ASSERT(rgnp->rgn_refcnt >= 0); 13942 rid = rgnp->rgn_id; 13943 ASSERT(rid < maxids); 13944 ASSERT(rarrp[rid] == rgnp); 13945 ASSERT(rid < *nextidp); 13946 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 13947 mutex_exit(&srdp->srd_mutex); 13948 if (new_rgnp != NULL) { 13949 kmem_cache_free(region_cache, new_rgnp); 13950 } 13951 if (r_type == SFMMU_REGION_HME) { 13952 int myjoin = 13953 (sfmmup == astosfmmu(curthread->t_procp->p_as)); 13954 13955 sfmmu_link_to_hmeregion(sfmmup, rgnp); 13956 /* 13957 * bitmap should be updated after linking sfmmu on 13958 * region list so that pageunload() doesn't skip 13959 * TSB/TLB flush. As soon as bitmap is updated another 13960 * thread in this process can already start accessing 13961 * this region. 13962 */ 13963 /* 13964 * Normally ttecnt accounting is done as part of 13965 * pagefault handling. But a process may not take any 13966 * pagefaults on shared hmeblks created by some other 13967 * process. To compensate for this assume that the 13968 * entire region will end up faulted in using 13969 * the region's pagesize. 13970 * 13971 */ 13972 if (r_pgszc > TTE8K) { 13973 tteflag = 1 << r_pgszc; 13974 if (disable_large_pages & tteflag) { 13975 tteflag = 0; 13976 } 13977 } else { 13978 tteflag = 0; 13979 } 13980 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { 13981 hatlockp = sfmmu_hat_enter(sfmmup); 13982 sfmmup->sfmmu_rtteflags |= tteflag; 13983 sfmmu_hat_exit(hatlockp); 13984 } 13985 hatlockp = sfmmu_hat_enter(sfmmup); 13986 13987 /* 13988 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M 13989 * region to allow for large page allocation failure. 13990 */ 13991 if (r_pgszc >= TTE4M) { 13992 sfmmup->sfmmu_tsb0_4minflcnt += 13993 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 13994 } 13995 13996 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 13997 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 13998 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 13999 rttecnt); 14000 14001 if (text && r_pgszc >= TTE4M && 14002 (tteflag || ((disable_large_pages >> TTE4M) & 14003 ((1 << (r_pgszc - TTE4M + 1)) - 1))) && 14004 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 14005 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 14006 } 14007 14008 sfmmu_hat_exit(hatlockp); 14009 /* 14010 * On Panther we need to make sure TLB is programmed 14011 * to accept 32M/256M pages. Call 14012 * sfmmu_check_page_sizes() now to make sure TLB is 14013 * setup before making hmeregions visible to other 14014 * threads. 14015 */ 14016 sfmmu_check_page_sizes(sfmmup, 1); 14017 hatlockp = sfmmu_hat_enter(sfmmup); 14018 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14019 14020 /* 14021 * if context is invalid tsb miss exception code will 14022 * call sfmmu_check_page_sizes() and update tsbmiss 14023 * area later. 14024 */ 14025 kpreempt_disable(); 14026 if (myjoin && 14027 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 14028 != INVALID_CONTEXT)) { 14029 struct tsbmiss *tsbmp; 14030 14031 tsbmp = &tsbmiss_area[CPU->cpu_id]; 14032 ASSERT(sfmmup == tsbmp->usfmmup); 14033 BT_SET(tsbmp->shmermap, rid); 14034 if (r_pgszc > TTE64K) { 14035 tsbmp->uhat_rtteflags |= tteflag; 14036 } 14037 14038 } 14039 kpreempt_enable(); 14040 14041 sfmmu_hat_exit(hatlockp); 14042 ASSERT((hat_region_cookie_t)((uint64_t)rid) != 14043 HAT_INVALID_REGION_COOKIE); 14044 } else { 14045 hatlockp = sfmmu_hat_enter(sfmmup); 14046 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid); 14047 sfmmu_hat_exit(hatlockp); 14048 } 14049 ASSERT(rid < maxids); 14050 14051 if (r_type == SFMMU_REGION_ISM) { 14052 sfmmu_find_scd(sfmmup); 14053 } 14054 return ((hat_region_cookie_t)((uint64_t)rid)); 14055 } 14056 14057 ASSERT(new_rgnp == NULL); 14058 14059 if (*busyrgnsp >= maxids) { 14060 mutex_exit(&srdp->srd_mutex); 14061 return (HAT_INVALID_REGION_COOKIE); 14062 } 14063 14064 ASSERT(MUTEX_HELD(&srdp->srd_mutex)); 14065 if (*freelistp != NULL) { 14066 rgnp = *freelistp; 14067 *freelistp = rgnp->rgn_next; 14068 ASSERT(rgnp->rgn_id < *nextidp); 14069 ASSERT(rgnp->rgn_id < maxids); 14070 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 14071 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) 14072 == r_type); 14073 ASSERT(rarrp[rgnp->rgn_id] == rgnp); 14074 ASSERT(rgnp->rgn_hmeflags == 0); 14075 } else { 14076 /* 14077 * release local locks before memory allocation. 14078 */ 14079 mutex_exit(&srdp->srd_mutex); 14080 14081 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP); 14082 14083 mutex_enter(&srdp->srd_mutex); 14084 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14085 rgnp = rgnp->rgn_hash) { 14086 if (rgnp->rgn_saddr == r_saddr && 14087 rgnp->rgn_size == r_size && 14088 rgnp->rgn_obj == r_obj && 14089 rgnp->rgn_objoff == r_objoff && 14090 rgnp->rgn_perm == r_perm && 14091 rgnp->rgn_pgszc == r_pgszc) { 14092 break; 14093 } 14094 } 14095 if (rgnp != NULL) { 14096 goto rfound; 14097 } 14098 14099 if (*nextidp >= maxids) { 14100 mutex_exit(&srdp->srd_mutex); 14101 goto fail; 14102 } 14103 rgnp = new_rgnp; 14104 new_rgnp = NULL; 14105 rgnp->rgn_id = (*nextidp)++; 14106 ASSERT(rgnp->rgn_id < maxids); 14107 ASSERT(rarrp[rgnp->rgn_id] == NULL); 14108 rarrp[rgnp->rgn_id] = rgnp; 14109 } 14110 14111 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14112 ASSERT(rgnp->rgn_hmeflags == 0); 14113 #ifdef DEBUG 14114 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14115 ASSERT(rgnp->rgn_ttecnt[i] == 0); 14116 } 14117 #endif 14118 rgnp->rgn_saddr = r_saddr; 14119 rgnp->rgn_size = r_size; 14120 rgnp->rgn_obj = r_obj; 14121 rgnp->rgn_objoff = r_objoff; 14122 rgnp->rgn_perm = r_perm; 14123 rgnp->rgn_pgszc = r_pgszc; 14124 rgnp->rgn_flags = r_type; 14125 rgnp->rgn_refcnt = 0; 14126 rgnp->rgn_cb_function = r_cb_function; 14127 rgnp->rgn_hash = srdp->srd_rgnhash[rhash]; 14128 srdp->srd_rgnhash[rhash] = rgnp; 14129 (*busyrgnsp)++; 14130 ASSERT(*busyrgnsp <= maxids); 14131 goto rfound; 14132 14133 fail: 14134 ASSERT(new_rgnp != NULL); 14135 kmem_cache_free(region_cache, new_rgnp); 14136 return (HAT_INVALID_REGION_COOKIE); 14137 } 14138 14139 /* 14140 * This function implements the shared context functionality required 14141 * when detaching a segment from an address space. It must be called 14142 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(), 14143 * for segments with a valid region_cookie. 14144 * It will also be called from all seg_vn routines which change a 14145 * segment's attributes such as segvn_setprot(), segvn_setpagesize(), 14146 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault 14147 * from segvn_fault(). 14148 */ 14149 void 14150 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) 14151 { 14152 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14153 sf_scd_t *scdp; 14154 uint_t rhash; 14155 uint_t rid = (uint_t)((uint64_t)rcookie); 14156 hatlock_t *hatlockp = NULL; 14157 sf_region_t *rgnp; 14158 sf_region_t **prev_rgnpp; 14159 sf_region_t *cur_rgnp; 14160 void *r_obj; 14161 int i; 14162 caddr_t r_saddr; 14163 caddr_t r_eaddr; 14164 size_t r_size; 14165 uchar_t r_pgszc; 14166 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14167 14168 ASSERT(sfmmup != ksfmmup); 14169 ASSERT(srdp != NULL); 14170 ASSERT(srdp->srd_refcnt > 0); 14171 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14172 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14173 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL); 14174 14175 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14176 SFMMU_REGION_HME; 14177 14178 if (r_type == SFMMU_REGION_ISM) { 14179 ASSERT(SFMMU_IS_ISMRID_VALID(rid)); 14180 ASSERT(rid < SFMMU_MAX_ISM_REGIONS); 14181 rgnp = srdp->srd_ismrgnp[rid]; 14182 } else { 14183 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14184 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14185 rgnp = srdp->srd_hmergnp[rid]; 14186 } 14187 ASSERT(rgnp != NULL); 14188 ASSERT(rgnp->rgn_id == rid); 14189 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14190 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14191 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14192 14193 ASSERT(sfmmup->sfmmu_xhat_provider == NULL); 14194 if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) { 14195 xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr, 14196 rgnp->rgn_size, 0, NULL); 14197 } 14198 14199 if (sfmmup->sfmmu_free) { 14200 ulong_t rttecnt; 14201 r_pgszc = rgnp->rgn_pgszc; 14202 r_size = rgnp->rgn_size; 14203 14204 ASSERT(sfmmup->sfmmu_scdp == NULL); 14205 if (r_type == SFMMU_REGION_ISM) { 14206 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14207 } else { 14208 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14209 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14210 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14211 14212 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14213 -rttecnt); 14214 14215 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14216 } 14217 } else if (r_type == SFMMU_REGION_ISM) { 14218 hatlockp = sfmmu_hat_enter(sfmmup); 14219 ASSERT(rid < srdp->srd_next_ismrid); 14220 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14221 scdp = sfmmup->sfmmu_scdp; 14222 if (scdp != NULL && 14223 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 14224 sfmmu_leave_scd(sfmmup, r_type); 14225 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14226 } 14227 sfmmu_hat_exit(hatlockp); 14228 } else { 14229 ulong_t rttecnt; 14230 r_pgszc = rgnp->rgn_pgszc; 14231 r_saddr = rgnp->rgn_saddr; 14232 r_size = rgnp->rgn_size; 14233 r_eaddr = r_saddr + r_size; 14234 14235 ASSERT(r_type == SFMMU_REGION_HME); 14236 hatlockp = sfmmu_hat_enter(sfmmup); 14237 ASSERT(rid < srdp->srd_next_hmerid); 14238 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14239 14240 /* 14241 * If region is part of an SCD call sfmmu_leave_scd(). 14242 * Otherwise if process is not exiting and has valid context 14243 * just drop the context on the floor to lose stale TLB 14244 * entries and force the update of tsb miss area to reflect 14245 * the new region map. After that clean our TSB entries. 14246 */ 14247 scdp = sfmmup->sfmmu_scdp; 14248 if (scdp != NULL && 14249 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 14250 sfmmu_leave_scd(sfmmup, r_type); 14251 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14252 } 14253 sfmmu_invalidate_ctx(sfmmup); 14254 14255 i = TTE8K; 14256 while (i < mmu_page_sizes) { 14257 if (rgnp->rgn_ttecnt[i] != 0) { 14258 sfmmu_unload_tsb_range(sfmmup, r_saddr, 14259 r_eaddr, i); 14260 if (i < TTE4M) { 14261 i = TTE4M; 14262 continue; 14263 } else { 14264 break; 14265 } 14266 } 14267 i++; 14268 } 14269 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */ 14270 if (r_pgszc >= TTE4M) { 14271 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14272 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14273 rttecnt); 14274 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt; 14275 } 14276 14277 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14278 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14279 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14280 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt); 14281 14282 sfmmu_hat_exit(hatlockp); 14283 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 14284 /* sfmmup left the scd, grow private tsb */ 14285 sfmmu_check_page_sizes(sfmmup, 1); 14286 } else { 14287 sfmmu_check_page_sizes(sfmmup, 0); 14288 } 14289 } 14290 14291 if (r_type == SFMMU_REGION_HME) { 14292 sfmmu_unlink_from_hmeregion(sfmmup, rgnp); 14293 } 14294 14295 r_obj = rgnp->rgn_obj; 14296 if (atomic_add_32_nv((volatile uint_t *)&rgnp->rgn_refcnt, -1)) { 14297 return; 14298 } 14299 14300 /* 14301 * looks like nobody uses this region anymore. Free it. 14302 */ 14303 rhash = RGN_HASH_FUNCTION(r_obj); 14304 mutex_enter(&srdp->srd_mutex); 14305 for (prev_rgnpp = &srdp->srd_rgnhash[rhash]; 14306 (cur_rgnp = *prev_rgnpp) != NULL; 14307 prev_rgnpp = &cur_rgnp->rgn_hash) { 14308 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) { 14309 break; 14310 } 14311 } 14312 14313 if (cur_rgnp == NULL) { 14314 mutex_exit(&srdp->srd_mutex); 14315 return; 14316 } 14317 14318 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14319 *prev_rgnpp = rgnp->rgn_hash; 14320 if (r_type == SFMMU_REGION_ISM) { 14321 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14322 ASSERT(rid < srdp->srd_next_ismrid); 14323 rgnp->rgn_next = srdp->srd_ismrgnfree; 14324 srdp->srd_ismrgnfree = rgnp; 14325 ASSERT(srdp->srd_ismbusyrgns > 0); 14326 srdp->srd_ismbusyrgns--; 14327 mutex_exit(&srdp->srd_mutex); 14328 return; 14329 } 14330 mutex_exit(&srdp->srd_mutex); 14331 14332 /* 14333 * Destroy region's hmeblks. 14334 */ 14335 sfmmu_unload_hmeregion(srdp, rgnp); 14336 14337 rgnp->rgn_hmeflags = 0; 14338 14339 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14340 ASSERT(rgnp->rgn_id == rid); 14341 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14342 rgnp->rgn_ttecnt[i] = 0; 14343 } 14344 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14345 mutex_enter(&srdp->srd_mutex); 14346 ASSERT(rid < srdp->srd_next_hmerid); 14347 rgnp->rgn_next = srdp->srd_hmergnfree; 14348 srdp->srd_hmergnfree = rgnp; 14349 ASSERT(srdp->srd_hmebusyrgns > 0); 14350 srdp->srd_hmebusyrgns--; 14351 mutex_exit(&srdp->srd_mutex); 14352 } 14353 14354 /* 14355 * For now only called for hmeblk regions and not for ISM regions. 14356 */ 14357 void 14358 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie) 14359 { 14360 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14361 uint_t rid = (uint_t)((uint64_t)rcookie); 14362 sf_region_t *rgnp; 14363 sf_rgn_link_t *rlink; 14364 sf_rgn_link_t *hrlink; 14365 ulong_t rttecnt; 14366 14367 ASSERT(sfmmup != ksfmmup); 14368 ASSERT(srdp != NULL); 14369 ASSERT(srdp->srd_refcnt > 0); 14370 14371 ASSERT(rid < srdp->srd_next_hmerid); 14372 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14373 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14374 14375 rgnp = srdp->srd_hmergnp[rid]; 14376 ASSERT(rgnp->rgn_refcnt > 0); 14377 ASSERT(rgnp->rgn_id == rid); 14378 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); 14379 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14380 14381 atomic_add_32((volatile uint_t *)&rgnp->rgn_refcnt, 1); 14382 14383 /* LINTED: constant in conditional context */ 14384 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); 14385 ASSERT(rlink != NULL); 14386 mutex_enter(&rgnp->rgn_mutex); 14387 ASSERT(rgnp->rgn_sfmmu_head != NULL); 14388 /* LINTED: constant in conditional context */ 14389 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0); 14390 ASSERT(hrlink != NULL); 14391 ASSERT(hrlink->prev == NULL); 14392 rlink->next = rgnp->rgn_sfmmu_head; 14393 rlink->prev = NULL; 14394 hrlink->prev = sfmmup; 14395 /* 14396 * make sure rlink's next field is correct 14397 * before making this link visible. 14398 */ 14399 membar_stst(); 14400 rgnp->rgn_sfmmu_head = sfmmup; 14401 mutex_exit(&rgnp->rgn_mutex); 14402 14403 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14404 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 14405 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt); 14406 /* update tsb0 inflation count */ 14407 if (rgnp->rgn_pgszc >= TTE4M) { 14408 sfmmup->sfmmu_tsb0_4minflcnt += 14409 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14410 } 14411 /* 14412 * Update regionid bitmask without hat lock since no other thread 14413 * can update this region bitmask right now. 14414 */ 14415 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14416 } 14417 14418 /* ARGSUSED */ 14419 static int 14420 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags) 14421 { 14422 sf_region_t *rgnp = (sf_region_t *)buf; 14423 bzero(buf, sizeof (*rgnp)); 14424 14425 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL); 14426 14427 return (0); 14428 } 14429 14430 /* ARGSUSED */ 14431 static void 14432 sfmmu_rgncache_destructor(void *buf, void *cdrarg) 14433 { 14434 sf_region_t *rgnp = (sf_region_t *)buf; 14435 mutex_destroy(&rgnp->rgn_mutex); 14436 } 14437 14438 static int 14439 sfrgnmap_isnull(sf_region_map_t *map) 14440 { 14441 int i; 14442 14443 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14444 if (map->bitmap[i] != 0) { 14445 return (0); 14446 } 14447 } 14448 return (1); 14449 } 14450 14451 static int 14452 sfhmergnmap_isnull(sf_hmeregion_map_t *map) 14453 { 14454 int i; 14455 14456 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 14457 if (map->bitmap[i] != 0) { 14458 return (0); 14459 } 14460 } 14461 return (1); 14462 } 14463 14464 #ifdef DEBUG 14465 static void 14466 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist) 14467 { 14468 sfmmu_t *sp; 14469 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14470 14471 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) { 14472 ASSERT(srdp == sp->sfmmu_srdp); 14473 if (sp == sfmmup) { 14474 if (onlist) { 14475 return; 14476 } else { 14477 panic("shctx: sfmmu 0x%p found on scd" 14478 "list 0x%p", (void *)sfmmup, 14479 (void *)*headp); 14480 } 14481 } 14482 } 14483 if (onlist) { 14484 panic("shctx: sfmmu 0x%p not found on scd list 0x%p", 14485 (void *)sfmmup, (void *)*headp); 14486 } else { 14487 return; 14488 } 14489 } 14490 #else /* DEBUG */ 14491 #define check_scd_sfmmu_list(headp, sfmmup, onlist) 14492 #endif /* DEBUG */ 14493 14494 /* 14495 * Removes an sfmmu from the SCD sfmmu list. 14496 */ 14497 static void 14498 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14499 { 14500 ASSERT(sfmmup->sfmmu_srdp != NULL); 14501 check_scd_sfmmu_list(headp, sfmmup, 1); 14502 if (sfmmup->sfmmu_scd_link.prev != NULL) { 14503 ASSERT(*headp != sfmmup); 14504 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next = 14505 sfmmup->sfmmu_scd_link.next; 14506 } else { 14507 ASSERT(*headp == sfmmup); 14508 *headp = sfmmup->sfmmu_scd_link.next; 14509 } 14510 if (sfmmup->sfmmu_scd_link.next != NULL) { 14511 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev = 14512 sfmmup->sfmmu_scd_link.prev; 14513 } 14514 } 14515 14516 14517 /* 14518 * Adds an sfmmu to the start of the queue. 14519 */ 14520 static void 14521 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14522 { 14523 check_scd_sfmmu_list(headp, sfmmup, 0); 14524 sfmmup->sfmmu_scd_link.prev = NULL; 14525 sfmmup->sfmmu_scd_link.next = *headp; 14526 if (*headp != NULL) 14527 (*headp)->sfmmu_scd_link.prev = sfmmup; 14528 *headp = sfmmup; 14529 } 14530 14531 /* 14532 * Remove an scd from the start of the queue. 14533 */ 14534 static void 14535 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp) 14536 { 14537 if (scdp->scd_prev != NULL) { 14538 ASSERT(*headp != scdp); 14539 scdp->scd_prev->scd_next = scdp->scd_next; 14540 } else { 14541 ASSERT(*headp == scdp); 14542 *headp = scdp->scd_next; 14543 } 14544 14545 if (scdp->scd_next != NULL) { 14546 scdp->scd_next->scd_prev = scdp->scd_prev; 14547 } 14548 } 14549 14550 /* 14551 * Add an scd to the start of the queue. 14552 */ 14553 static void 14554 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp) 14555 { 14556 scdp->scd_prev = NULL; 14557 scdp->scd_next = *headp; 14558 if (*headp != NULL) { 14559 (*headp)->scd_prev = scdp; 14560 } 14561 *headp = scdp; 14562 } 14563 14564 static int 14565 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp) 14566 { 14567 uint_t rid; 14568 uint_t i; 14569 uint_t j; 14570 ulong_t w; 14571 sf_region_t *rgnp; 14572 ulong_t tte8k_cnt = 0; 14573 ulong_t tte4m_cnt = 0; 14574 uint_t tsb_szc; 14575 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 14576 sfmmu_t *ism_hatid; 14577 struct tsb_info *newtsb; 14578 int szc; 14579 14580 ASSERT(srdp != NULL); 14581 14582 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14583 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14584 continue; 14585 } 14586 j = 0; 14587 while (w) { 14588 if (!(w & 0x1)) { 14589 j++; 14590 w >>= 1; 14591 continue; 14592 } 14593 rid = (i << BT_ULSHIFT) | j; 14594 j++; 14595 w >>= 1; 14596 14597 if (rid < SFMMU_MAX_HME_REGIONS) { 14598 rgnp = srdp->srd_hmergnp[rid]; 14599 ASSERT(rgnp->rgn_id == rid); 14600 ASSERT(rgnp->rgn_refcnt > 0); 14601 14602 if (rgnp->rgn_pgszc < TTE4M) { 14603 tte8k_cnt += rgnp->rgn_size >> 14604 TTE_PAGE_SHIFT(TTE8K); 14605 } else { 14606 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14607 tte4m_cnt += rgnp->rgn_size >> 14608 TTE_PAGE_SHIFT(TTE4M); 14609 /* 14610 * Inflate SCD tsb0 by preallocating 14611 * 1/4 8k ttecnt for 4M regions to 14612 * allow for lgpg alloc failure. 14613 */ 14614 tte8k_cnt += rgnp->rgn_size >> 14615 (TTE_PAGE_SHIFT(TTE8K) + 2); 14616 } 14617 } else { 14618 rid -= SFMMU_MAX_HME_REGIONS; 14619 rgnp = srdp->srd_ismrgnp[rid]; 14620 ASSERT(rgnp->rgn_id == rid); 14621 ASSERT(rgnp->rgn_refcnt > 0); 14622 14623 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14624 ASSERT(ism_hatid->sfmmu_ismhat); 14625 14626 for (szc = 0; szc < TTE4M; szc++) { 14627 tte8k_cnt += 14628 ism_hatid->sfmmu_ttecnt[szc] << 14629 TTE_BSZS_SHIFT(szc); 14630 } 14631 14632 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14633 if (rgnp->rgn_pgszc >= TTE4M) { 14634 tte4m_cnt += rgnp->rgn_size >> 14635 TTE_PAGE_SHIFT(TTE4M); 14636 } 14637 } 14638 } 14639 } 14640 14641 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 14642 14643 /* Allocate both the SCD TSBs here. */ 14644 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14645 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) && 14646 (tsb_szc <= TSB_4M_SZCODE || 14647 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14648 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K, 14649 TSB_ALLOC, scsfmmup))) { 14650 14651 SFMMU_STAT(sf_scd_1sttsb_allocfail); 14652 return (TSB_ALLOCFAIL); 14653 } else { 14654 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX; 14655 14656 if (tte4m_cnt) { 14657 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 14658 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, 14659 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) && 14660 (tsb_szc <= TSB_4M_SZCODE || 14661 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 14662 TSB4M|TSB32M|TSB256M, 14663 TSB_ALLOC, scsfmmup))) { 14664 /* 14665 * If we fail to allocate the 2nd shared tsb, 14666 * just free the 1st tsb, return failure. 14667 */ 14668 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb); 14669 SFMMU_STAT(sf_scd_2ndtsb_allocfail); 14670 return (TSB_ALLOCFAIL); 14671 } else { 14672 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL); 14673 newtsb->tsb_flags |= TSB_SHAREDCTX; 14674 scsfmmup->sfmmu_tsb->tsb_next = newtsb; 14675 SFMMU_STAT(sf_scd_2ndtsb_alloc); 14676 } 14677 } 14678 SFMMU_STAT(sf_scd_1sttsb_alloc); 14679 } 14680 return (TSB_SUCCESS); 14681 } 14682 14683 static void 14684 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu) 14685 { 14686 while (scd_sfmmu->sfmmu_tsb != NULL) { 14687 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next; 14688 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb); 14689 scd_sfmmu->sfmmu_tsb = next; 14690 } 14691 } 14692 14693 /* 14694 * Link the sfmmu onto the hme region list. 14695 */ 14696 void 14697 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14698 { 14699 uint_t rid; 14700 sf_rgn_link_t *rlink; 14701 sfmmu_t *head; 14702 sf_rgn_link_t *hrlink; 14703 14704 rid = rgnp->rgn_id; 14705 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14706 14707 /* LINTED: constant in conditional context */ 14708 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1); 14709 ASSERT(rlink != NULL); 14710 mutex_enter(&rgnp->rgn_mutex); 14711 if ((head = rgnp->rgn_sfmmu_head) == NULL) { 14712 rlink->next = NULL; 14713 rlink->prev = NULL; 14714 /* 14715 * make sure rlink's next field is NULL 14716 * before making this link visible. 14717 */ 14718 membar_stst(); 14719 rgnp->rgn_sfmmu_head = sfmmup; 14720 } else { 14721 /* LINTED: constant in conditional context */ 14722 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0); 14723 ASSERT(hrlink != NULL); 14724 ASSERT(hrlink->prev == NULL); 14725 rlink->next = head; 14726 rlink->prev = NULL; 14727 hrlink->prev = sfmmup; 14728 /* 14729 * make sure rlink's next field is correct 14730 * before making this link visible. 14731 */ 14732 membar_stst(); 14733 rgnp->rgn_sfmmu_head = sfmmup; 14734 } 14735 mutex_exit(&rgnp->rgn_mutex); 14736 } 14737 14738 /* 14739 * Unlink the sfmmu from the hme region list. 14740 */ 14741 void 14742 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14743 { 14744 uint_t rid; 14745 sf_rgn_link_t *rlink; 14746 14747 rid = rgnp->rgn_id; 14748 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14749 14750 /* LINTED: constant in conditional context */ 14751 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 14752 ASSERT(rlink != NULL); 14753 mutex_enter(&rgnp->rgn_mutex); 14754 if (rgnp->rgn_sfmmu_head == sfmmup) { 14755 sfmmu_t *next = rlink->next; 14756 rgnp->rgn_sfmmu_head = next; 14757 /* 14758 * if we are stopped by xc_attention() after this 14759 * point the forward link walking in 14760 * sfmmu_rgntlb_demap() will work correctly since the 14761 * head correctly points to the next element. 14762 */ 14763 membar_stst(); 14764 rlink->next = NULL; 14765 ASSERT(rlink->prev == NULL); 14766 if (next != NULL) { 14767 sf_rgn_link_t *nrlink; 14768 /* LINTED: constant in conditional context */ 14769 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14770 ASSERT(nrlink != NULL); 14771 ASSERT(nrlink->prev == sfmmup); 14772 nrlink->prev = NULL; 14773 } 14774 } else { 14775 sfmmu_t *next = rlink->next; 14776 sfmmu_t *prev = rlink->prev; 14777 sf_rgn_link_t *prlink; 14778 14779 ASSERT(prev != NULL); 14780 /* LINTED: constant in conditional context */ 14781 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0); 14782 ASSERT(prlink != NULL); 14783 ASSERT(prlink->next == sfmmup); 14784 prlink->next = next; 14785 /* 14786 * if we are stopped by xc_attention() 14787 * after this point the forward link walking 14788 * will work correctly since the prev element 14789 * correctly points to the next element. 14790 */ 14791 membar_stst(); 14792 rlink->next = NULL; 14793 rlink->prev = NULL; 14794 if (next != NULL) { 14795 sf_rgn_link_t *nrlink; 14796 /* LINTED: constant in conditional context */ 14797 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14798 ASSERT(nrlink != NULL); 14799 ASSERT(nrlink->prev == sfmmup); 14800 nrlink->prev = prev; 14801 } 14802 } 14803 mutex_exit(&rgnp->rgn_mutex); 14804 } 14805 14806 /* 14807 * Link scd sfmmu onto ism or hme region list for each region in the 14808 * scd region map. 14809 */ 14810 void 14811 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14812 { 14813 uint_t rid; 14814 uint_t i; 14815 uint_t j; 14816 ulong_t w; 14817 sf_region_t *rgnp; 14818 sfmmu_t *scsfmmup; 14819 14820 scsfmmup = scdp->scd_sfmmup; 14821 ASSERT(scsfmmup->sfmmu_scdhat); 14822 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14823 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14824 continue; 14825 } 14826 j = 0; 14827 while (w) { 14828 if (!(w & 0x1)) { 14829 j++; 14830 w >>= 1; 14831 continue; 14832 } 14833 rid = (i << BT_ULSHIFT) | j; 14834 j++; 14835 w >>= 1; 14836 14837 if (rid < SFMMU_MAX_HME_REGIONS) { 14838 rgnp = srdp->srd_hmergnp[rid]; 14839 ASSERT(rgnp->rgn_id == rid); 14840 ASSERT(rgnp->rgn_refcnt > 0); 14841 sfmmu_link_to_hmeregion(scsfmmup, rgnp); 14842 } else { 14843 sfmmu_t *ism_hatid = NULL; 14844 ism_ment_t *ism_ment; 14845 rid -= SFMMU_MAX_HME_REGIONS; 14846 rgnp = srdp->srd_ismrgnp[rid]; 14847 ASSERT(rgnp->rgn_id == rid); 14848 ASSERT(rgnp->rgn_refcnt > 0); 14849 14850 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14851 ASSERT(ism_hatid->sfmmu_ismhat); 14852 ism_ment = &scdp->scd_ism_links[rid]; 14853 ism_ment->iment_hat = scsfmmup; 14854 ism_ment->iment_base_va = rgnp->rgn_saddr; 14855 mutex_enter(&ism_mlist_lock); 14856 iment_add(ism_ment, ism_hatid); 14857 mutex_exit(&ism_mlist_lock); 14858 14859 } 14860 } 14861 } 14862 } 14863 /* 14864 * Unlink scd sfmmu from ism or hme region list for each region in the 14865 * scd region map. 14866 */ 14867 void 14868 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14869 { 14870 uint_t rid; 14871 uint_t i; 14872 uint_t j; 14873 ulong_t w; 14874 sf_region_t *rgnp; 14875 sfmmu_t *scsfmmup; 14876 14877 scsfmmup = scdp->scd_sfmmup; 14878 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14879 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14880 continue; 14881 } 14882 j = 0; 14883 while (w) { 14884 if (!(w & 0x1)) { 14885 j++; 14886 w >>= 1; 14887 continue; 14888 } 14889 rid = (i << BT_ULSHIFT) | j; 14890 j++; 14891 w >>= 1; 14892 14893 if (rid < SFMMU_MAX_HME_REGIONS) { 14894 rgnp = srdp->srd_hmergnp[rid]; 14895 ASSERT(rgnp->rgn_id == rid); 14896 ASSERT(rgnp->rgn_refcnt > 0); 14897 sfmmu_unlink_from_hmeregion(scsfmmup, 14898 rgnp); 14899 14900 } else { 14901 sfmmu_t *ism_hatid = NULL; 14902 ism_ment_t *ism_ment; 14903 rid -= SFMMU_MAX_HME_REGIONS; 14904 rgnp = srdp->srd_ismrgnp[rid]; 14905 ASSERT(rgnp->rgn_id == rid); 14906 ASSERT(rgnp->rgn_refcnt > 0); 14907 14908 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14909 ASSERT(ism_hatid->sfmmu_ismhat); 14910 ism_ment = &scdp->scd_ism_links[rid]; 14911 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup); 14912 ASSERT(ism_ment->iment_base_va == 14913 rgnp->rgn_saddr); 14914 mutex_enter(&ism_mlist_lock); 14915 iment_sub(ism_ment, ism_hatid); 14916 mutex_exit(&ism_mlist_lock); 14917 14918 } 14919 } 14920 } 14921 } 14922 /* 14923 * Allocates and initialises a new SCD structure, this is called with 14924 * the srd_scd_mutex held and returns with the reference count 14925 * initialised to 1. 14926 */ 14927 static sf_scd_t * 14928 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map) 14929 { 14930 sf_scd_t *new_scdp; 14931 sfmmu_t *scsfmmup; 14932 int i; 14933 14934 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex)); 14935 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP); 14936 14937 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 14938 new_scdp->scd_sfmmup = scsfmmup; 14939 scsfmmup->sfmmu_srdp = srdp; 14940 scsfmmup->sfmmu_scdp = new_scdp; 14941 scsfmmup->sfmmu_tsb0_4minflcnt = 0; 14942 scsfmmup->sfmmu_scdhat = 1; 14943 CPUSET_ALL(scsfmmup->sfmmu_cpusran); 14944 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 14945 14946 ASSERT(max_mmu_ctxdoms > 0); 14947 for (i = 0; i < max_mmu_ctxdoms; i++) { 14948 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 14949 scsfmmup->sfmmu_ctxs[i].gnum = 0; 14950 } 14951 14952 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14953 new_scdp->scd_rttecnt[i] = 0; 14954 } 14955 14956 new_scdp->scd_region_map = *new_map; 14957 new_scdp->scd_refcnt = 1; 14958 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) { 14959 kmem_cache_free(scd_cache, new_scdp); 14960 kmem_cache_free(sfmmuid_cache, scsfmmup); 14961 return (NULL); 14962 } 14963 if (&mmu_init_scd) { 14964 mmu_init_scd(new_scdp); 14965 } 14966 return (new_scdp); 14967 } 14968 14969 /* 14970 * The first phase of a process joining an SCD. The hat structure is 14971 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set 14972 * and a cross-call with context invalidation is used to cause the 14973 * remaining work to be carried out in the sfmmu_tsbmiss_exception() 14974 * routine. 14975 */ 14976 static void 14977 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) 14978 { 14979 hatlock_t *hatlockp; 14980 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14981 int i; 14982 sf_scd_t *old_scdp; 14983 14984 ASSERT(srdp != NULL); 14985 ASSERT(scdp != NULL); 14986 ASSERT(scdp->scd_refcnt > 0); 14987 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14988 14989 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { 14990 ASSERT(old_scdp != scdp); 14991 14992 mutex_enter(&old_scdp->scd_mutex); 14993 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup); 14994 mutex_exit(&old_scdp->scd_mutex); 14995 /* 14996 * sfmmup leaves the old scd. Update sfmmu_ttecnt to 14997 * include the shme rgn ttecnt for rgns that 14998 * were in the old SCD 14999 */ 15000 for (i = 0; i < mmu_page_sizes; i++) { 15001 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15002 old_scdp->scd_rttecnt[i]); 15003 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15004 sfmmup->sfmmu_scdrttecnt[i]); 15005 } 15006 } 15007 15008 /* 15009 * Move sfmmu to the scd lists. 15010 */ 15011 mutex_enter(&scdp->scd_mutex); 15012 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup); 15013 mutex_exit(&scdp->scd_mutex); 15014 SF_SCD_INCR_REF(scdp); 15015 15016 hatlockp = sfmmu_hat_enter(sfmmup); 15017 /* 15018 * For a multi-thread process, we must stop 15019 * all the other threads before joining the scd. 15020 */ 15021 15022 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD); 15023 15024 sfmmu_invalidate_ctx(sfmmup); 15025 sfmmup->sfmmu_scdp = scdp; 15026 15027 /* 15028 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update 15029 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD. 15030 */ 15031 for (i = 0; i < mmu_page_sizes; i++) { 15032 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i]; 15033 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); 15034 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15035 -sfmmup->sfmmu_scdrttecnt[i]); 15036 } 15037 /* update tsb0 inflation count */ 15038 if (old_scdp != NULL) { 15039 sfmmup->sfmmu_tsb0_4minflcnt += 15040 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15041 } 15042 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 15043 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); 15044 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15045 15046 sfmmu_hat_exit(hatlockp); 15047 15048 if (old_scdp != NULL) { 15049 SF_SCD_DECR_REF(srdp, old_scdp); 15050 } 15051 15052 } 15053 15054 /* 15055 * This routine is called by a process to become part of an SCD. It is called 15056 * from sfmmu_tsbmiss_exception() once most of the initial work has been 15057 * done by sfmmu_join_scd(). This routine must not drop the hat lock. 15058 */ 15059 static void 15060 sfmmu_finish_join_scd(sfmmu_t *sfmmup) 15061 { 15062 struct tsb_info *tsbinfop; 15063 15064 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15065 ASSERT(sfmmup->sfmmu_scdp != NULL); 15066 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)); 15067 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15068 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)); 15069 15070 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 15071 tsbinfop = tsbinfop->tsb_next) { 15072 if (tsbinfop->tsb_flags & TSB_SWAPPED) { 15073 continue; 15074 } 15075 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG)); 15076 15077 sfmmu_inv_tsb(tsbinfop->tsb_va, 15078 TSB_BYTES(tsbinfop->tsb_szc)); 15079 } 15080 15081 /* Set HAT_CTX1_FLAG for all SCD ISMs */ 15082 sfmmu_ism_hatflags(sfmmup, 1); 15083 15084 SFMMU_STAT(sf_join_scd); 15085 } 15086 15087 /* 15088 * This routine is called in order to check if there is an SCD which matches 15089 * the process's region map if not then a new SCD may be created. 15090 */ 15091 static void 15092 sfmmu_find_scd(sfmmu_t *sfmmup) 15093 { 15094 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15095 sf_scd_t *scdp, *new_scdp; 15096 int ret; 15097 15098 ASSERT(srdp != NULL); 15099 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15100 15101 mutex_enter(&srdp->srd_scd_mutex); 15102 for (scdp = srdp->srd_scdp; scdp != NULL; 15103 scdp = scdp->scd_next) { 15104 SF_RGNMAP_EQUAL(&scdp->scd_region_map, 15105 &sfmmup->sfmmu_region_map, ret); 15106 if (ret == 1) { 15107 SF_SCD_INCR_REF(scdp); 15108 mutex_exit(&srdp->srd_scd_mutex); 15109 sfmmu_join_scd(scdp, sfmmup); 15110 ASSERT(scdp->scd_refcnt >= 2); 15111 atomic_add_32((volatile uint32_t *) 15112 &scdp->scd_refcnt, -1); 15113 return; 15114 } else { 15115 /* 15116 * If the sfmmu region map is a subset of the scd 15117 * region map, then the assumption is that this process 15118 * will continue attaching to ISM segments until the 15119 * region maps are equal. 15120 */ 15121 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map, 15122 &sfmmup->sfmmu_region_map, ret); 15123 if (ret == 1) { 15124 mutex_exit(&srdp->srd_scd_mutex); 15125 return; 15126 } 15127 } 15128 } 15129 15130 ASSERT(scdp == NULL); 15131 /* 15132 * No matching SCD has been found, create a new one. 15133 */ 15134 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) == 15135 NULL) { 15136 mutex_exit(&srdp->srd_scd_mutex); 15137 return; 15138 } 15139 15140 /* 15141 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd. 15142 */ 15143 15144 /* Set scd_rttecnt for shme rgns in SCD */ 15145 sfmmu_set_scd_rttecnt(srdp, new_scdp); 15146 15147 /* 15148 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists. 15149 */ 15150 sfmmu_link_scd_to_regions(srdp, new_scdp); 15151 sfmmu_add_scd(&srdp->srd_scdp, new_scdp); 15152 SFMMU_STAT_ADD(sf_create_scd, 1); 15153 15154 mutex_exit(&srdp->srd_scd_mutex); 15155 sfmmu_join_scd(new_scdp, sfmmup); 15156 ASSERT(new_scdp->scd_refcnt >= 2); 15157 atomic_add_32((volatile uint32_t *)&new_scdp->scd_refcnt, -1); 15158 } 15159 15160 /* 15161 * This routine is called by a process to remove itself from an SCD. It is 15162 * either called when the processes has detached from a segment or from 15163 * hat_free_start() as a result of calling exit. 15164 */ 15165 static void 15166 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) 15167 { 15168 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15169 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15170 hatlock_t *hatlockp = TSB_HASH(sfmmup); 15171 int i; 15172 15173 ASSERT(scdp != NULL); 15174 ASSERT(srdp != NULL); 15175 15176 if (sfmmup->sfmmu_free) { 15177 /* 15178 * If the process is part of an SCD the sfmmu is unlinked 15179 * from scd_sf_list. 15180 */ 15181 mutex_enter(&scdp->scd_mutex); 15182 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15183 mutex_exit(&scdp->scd_mutex); 15184 /* 15185 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15186 * are about to leave the SCD 15187 */ 15188 for (i = 0; i < mmu_page_sizes; i++) { 15189 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15190 scdp->scd_rttecnt[i]); 15191 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15192 sfmmup->sfmmu_scdrttecnt[i]); 15193 sfmmup->sfmmu_scdrttecnt[i] = 0; 15194 } 15195 sfmmup->sfmmu_scdp = NULL; 15196 15197 SF_SCD_DECR_REF(srdp, scdp); 15198 return; 15199 } 15200 15201 ASSERT(r_type != SFMMU_REGION_ISM || 15202 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15203 ASSERT(scdp->scd_refcnt); 15204 ASSERT(!sfmmup->sfmmu_free); 15205 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15206 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15207 15208 /* 15209 * Wait for ISM maps to be updated. 15210 */ 15211 if (r_type != SFMMU_REGION_ISM) { 15212 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) && 15213 sfmmup->sfmmu_scdp != NULL) { 15214 cv_wait(&sfmmup->sfmmu_tsb_cv, 15215 HATLOCK_MUTEXP(hatlockp)); 15216 } 15217 15218 if (sfmmup->sfmmu_scdp == NULL) { 15219 sfmmu_hat_exit(hatlockp); 15220 return; 15221 } 15222 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 15223 } 15224 15225 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 15226 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 15227 /* 15228 * Since HAT_JOIN_SCD was set our context 15229 * is still invalid. 15230 */ 15231 } else { 15232 /* 15233 * For a multi-thread process, we must stop 15234 * all the other threads before leaving the scd. 15235 */ 15236 15237 sfmmu_invalidate_ctx(sfmmup); 15238 } 15239 15240 /* Clear all the rid's for ISM, delete flags, etc */ 15241 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15242 sfmmu_ism_hatflags(sfmmup, 0); 15243 15244 /* 15245 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15246 * are in SCD before this sfmmup leaves the SCD. 15247 */ 15248 for (i = 0; i < mmu_page_sizes; i++) { 15249 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15250 scdp->scd_rttecnt[i]); 15251 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15252 sfmmup->sfmmu_scdrttecnt[i]); 15253 sfmmup->sfmmu_scdrttecnt[i] = 0; 15254 /* update ismttecnt to include SCD ism before hat leaves SCD */ 15255 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; 15256 sfmmup->sfmmu_scdismttecnt[i] = 0; 15257 } 15258 /* update tsb0 inflation count */ 15259 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15260 15261 if (r_type != SFMMU_REGION_ISM) { 15262 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 15263 } 15264 sfmmup->sfmmu_scdp = NULL; 15265 15266 sfmmu_hat_exit(hatlockp); 15267 15268 /* 15269 * Unlink sfmmu from scd_sf_list this can be done without holding 15270 * the hat lock as we hold the sfmmu_as lock which prevents 15271 * hat_join_region from adding this thread to the scd again. Other 15272 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL 15273 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp 15274 * while holding the hat lock. 15275 */ 15276 mutex_enter(&scdp->scd_mutex); 15277 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15278 mutex_exit(&scdp->scd_mutex); 15279 SFMMU_STAT(sf_leave_scd); 15280 15281 SF_SCD_DECR_REF(srdp, scdp); 15282 hatlockp = sfmmu_hat_enter(sfmmup); 15283 15284 } 15285 15286 /* 15287 * Unlink and free up an SCD structure with a reference count of 0. 15288 */ 15289 static void 15290 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) 15291 { 15292 sfmmu_t *scsfmmup; 15293 sf_scd_t *sp; 15294 hatlock_t *shatlockp; 15295 int i, ret; 15296 15297 mutex_enter(&srdp->srd_scd_mutex); 15298 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) { 15299 if (sp == scdp) 15300 break; 15301 } 15302 if (sp == NULL || sp->scd_refcnt) { 15303 mutex_exit(&srdp->srd_scd_mutex); 15304 return; 15305 } 15306 15307 /* 15308 * It is possible that the scd has been freed and reallocated with a 15309 * different region map while we've been waiting for the srd_scd_mutex. 15310 */ 15311 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret); 15312 if (ret != 1) { 15313 mutex_exit(&srdp->srd_scd_mutex); 15314 return; 15315 } 15316 15317 ASSERT(scdp->scd_sf_list == NULL); 15318 /* 15319 * Unlink scd from srd_scdp list. 15320 */ 15321 sfmmu_remove_scd(&srdp->srd_scdp, scdp); 15322 mutex_exit(&srdp->srd_scd_mutex); 15323 15324 sfmmu_unlink_scd_from_regions(srdp, scdp); 15325 15326 /* Clear shared context tsb and release ctx */ 15327 scsfmmup = scdp->scd_sfmmup; 15328 15329 /* 15330 * create a barrier so that scd will not be destroyed 15331 * if other thread still holds the same shared hat lock. 15332 * E.g., sfmmu_tsbmiss_exception() needs to acquire the 15333 * shared hat lock before checking the shared tsb reloc flag. 15334 */ 15335 shatlockp = sfmmu_hat_enter(scsfmmup); 15336 sfmmu_hat_exit(shatlockp); 15337 15338 sfmmu_free_scd_tsbs(scsfmmup); 15339 15340 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 15341 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) { 15342 kmem_free(scsfmmup->sfmmu_hmeregion_links[i], 15343 SFMMU_L2_HMERLINKS_SIZE); 15344 scsfmmup->sfmmu_hmeregion_links[i] = NULL; 15345 } 15346 } 15347 kmem_cache_free(sfmmuid_cache, scsfmmup); 15348 kmem_cache_free(scd_cache, scdp); 15349 SFMMU_STAT(sf_destroy_scd); 15350 } 15351 15352 /* 15353 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to 15354 * bits which are set in the ism_region_map parameter. This flag indicates to 15355 * the tsbmiss handler that mapping for these segments should be loaded using 15356 * the shared context. 15357 */ 15358 static void 15359 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag) 15360 { 15361 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15362 ism_blk_t *ism_blkp; 15363 ism_map_t *ism_map; 15364 int i, rid; 15365 15366 ASSERT(sfmmup->sfmmu_iblk != NULL); 15367 ASSERT(scdp != NULL); 15368 /* 15369 * Note that the caller either set HAT_ISMBUSY flag or checked 15370 * under hat lock that HAT_ISMBUSY was not set by another thread. 15371 */ 15372 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15373 15374 ism_blkp = sfmmup->sfmmu_iblk; 15375 while (ism_blkp != NULL) { 15376 ism_map = ism_blkp->iblk_maps; 15377 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 15378 rid = ism_map[i].imap_rid; 15379 if (rid == SFMMU_INVALID_ISMRID) { 15380 continue; 15381 } 15382 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS); 15383 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) && 15384 addflag) { 15385 ism_map[i].imap_hatflags |= 15386 HAT_CTX1_FLAG; 15387 } else { 15388 ism_map[i].imap_hatflags &= 15389 ~HAT_CTX1_FLAG; 15390 } 15391 } 15392 ism_blkp = ism_blkp->iblk_next; 15393 } 15394 } 15395 15396 static int 15397 sfmmu_srd_lock_held(sf_srd_t *srdp) 15398 { 15399 return (MUTEX_HELD(&srdp->srd_mutex)); 15400 } 15401 15402 /* ARGSUSED */ 15403 static int 15404 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags) 15405 { 15406 sf_scd_t *scdp = (sf_scd_t *)buf; 15407 15408 bzero(buf, sizeof (sf_scd_t)); 15409 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL); 15410 return (0); 15411 } 15412 15413 /* ARGSUSED */ 15414 static void 15415 sfmmu_scdcache_destructor(void *buf, void *cdrarg) 15416 { 15417 sf_scd_t *scdp = (sf_scd_t *)buf; 15418 15419 mutex_destroy(&scdp->scd_mutex); 15420 } 15421 15422 /* 15423 * The listp parameter is a pointer to a list of hmeblks which are partially 15424 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the 15425 * freeing process is to cross-call all cpus to ensure that there are no 15426 * remaining cached references. 15427 * 15428 * If the local generation number is less than the global then we can free 15429 * hmeblks which are already on the pending queue as another cpu has completed 15430 * the cross-call. 15431 * 15432 * We cross-call to make sure that there are no threads on other cpus accessing 15433 * these hmblks and then complete the process of freeing them under the 15434 * following conditions: 15435 * The total number of pending hmeblks is greater than the threshold 15436 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks 15437 * It is at least 1 second since the last time we cross-called 15438 * 15439 * Otherwise, we add the hmeblks to the per-cpu pending queue. 15440 */ 15441 static void 15442 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree) 15443 { 15444 struct hme_blk *hblkp, *pr_hblkp = NULL; 15445 int count = 0; 15446 cpuset_t cpuset = cpu_ready_set; 15447 cpu_hme_pend_t *cpuhp; 15448 timestruc_t now; 15449 int one_second_expired = 0; 15450 15451 gethrestime_lasttick(&now); 15452 15453 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) { 15454 ASSERT(hblkp->hblk_shw_bit == 0); 15455 ASSERT(hblkp->hblk_shared == 0); 15456 count++; 15457 pr_hblkp = hblkp; 15458 } 15459 15460 cpuhp = &cpu_hme_pend[CPU->cpu_seqid]; 15461 mutex_enter(&cpuhp->chp_mutex); 15462 15463 if ((cpuhp->chp_count + count) == 0) { 15464 mutex_exit(&cpuhp->chp_mutex); 15465 return; 15466 } 15467 15468 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) { 15469 one_second_expired = 1; 15470 } 15471 15472 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT || 15473 (cpuhp->chp_count + count) > cpu_hme_pend_thresh || 15474 one_second_expired)) { 15475 /* Append global list to local */ 15476 if (pr_hblkp == NULL) { 15477 *listp = cpuhp->chp_listp; 15478 } else { 15479 pr_hblkp->hblk_next = cpuhp->chp_listp; 15480 } 15481 cpuhp->chp_listp = NULL; 15482 cpuhp->chp_count = 0; 15483 cpuhp->chp_timestamp = now.tv_sec; 15484 mutex_exit(&cpuhp->chp_mutex); 15485 15486 kpreempt_disable(); 15487 CPUSET_DEL(cpuset, CPU->cpu_id); 15488 xt_sync(cpuset); 15489 xt_sync(cpuset); 15490 kpreempt_enable(); 15491 15492 /* 15493 * At this stage we know that no trap handlers on other 15494 * cpus can have references to hmeblks on the list. 15495 */ 15496 sfmmu_hblk_free(listp); 15497 } else if (*listp != NULL) { 15498 pr_hblkp->hblk_next = cpuhp->chp_listp; 15499 cpuhp->chp_listp = *listp; 15500 cpuhp->chp_count += count; 15501 *listp = NULL; 15502 mutex_exit(&cpuhp->chp_mutex); 15503 } else { 15504 mutex_exit(&cpuhp->chp_mutex); 15505 } 15506 } 15507 15508 /* 15509 * Add an hmeblk to the the hash list. 15510 */ 15511 void 15512 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15513 uint64_t hblkpa) 15514 { 15515 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15516 #ifdef DEBUG 15517 if (hmebp->hmeblkp == NULL) { 15518 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA); 15519 } 15520 #endif /* DEBUG */ 15521 15522 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa; 15523 /* 15524 * Since the TSB miss handler now does not lock the hash chain before 15525 * walking it, make sure that the hmeblks nextpa is globally visible 15526 * before we make the hmeblk globally visible by updating the chain root 15527 * pointer in the hash bucket. 15528 */ 15529 membar_producer(); 15530 hmebp->hmeh_nextpa = hblkpa; 15531 hmeblkp->hblk_next = hmebp->hmeblkp; 15532 hmebp->hmeblkp = hmeblkp; 15533 15534 } 15535 15536 /* 15537 * This function is the first part of a 2 part process to remove an hmeblk 15538 * from the hash chain. In this phase we unlink the hmeblk from the hash chain 15539 * but leave the next physical pointer unchanged. The hmeblk is then linked onto 15540 * a per-cpu pending list using the virtual address pointer. 15541 * 15542 * TSB miss trap handlers that start after this phase will no longer see 15543 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register 15544 * can still use it for further chain traversal because we haven't yet modifed 15545 * the next physical pointer or freed it. 15546 * 15547 * In the second phase of hmeblk removal we'll issue a barrier xcall before 15548 * we reuse or free this hmeblk. This will make sure all lingering references to 15549 * the hmeblk after first phase disappear before we finally reclaim it. 15550 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains 15551 * during their traversal. 15552 * 15553 * The hmehash_mutex must be held when calling this function. 15554 * 15555 * Input: 15556 * hmebp - hme hash bucket pointer 15557 * hmeblkp - address of hmeblk to be removed 15558 * pr_hblk - virtual address of previous hmeblkp 15559 * listp - pointer to list of hmeblks linked by virtual address 15560 * free_now flag - indicates that a complete removal from the hash chains 15561 * is necessary. 15562 * 15563 * It is inefficient to use the free_now flag as a cross-call is required to 15564 * remove a single hmeblk from the hash chain but is necessary when hmeblks are 15565 * in short supply. 15566 */ 15567 void 15568 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15569 struct hme_blk *pr_hblk, struct hme_blk **listp, 15570 int free_now) 15571 { 15572 int shw_size, vshift; 15573 struct hme_blk *shw_hblkp; 15574 uint_t shw_mask, newshw_mask; 15575 caddr_t vaddr; 15576 int size; 15577 cpuset_t cpuset = cpu_ready_set; 15578 15579 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15580 15581 if (hmebp->hmeblkp == hmeblkp) { 15582 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa; 15583 hmebp->hmeblkp = hmeblkp->hblk_next; 15584 } else { 15585 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa; 15586 pr_hblk->hblk_next = hmeblkp->hblk_next; 15587 } 15588 15589 size = get_hblk_ttesz(hmeblkp); 15590 shw_hblkp = hmeblkp->hblk_shadow; 15591 if (shw_hblkp) { 15592 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 15593 ASSERT(!hmeblkp->hblk_shared); 15594 #ifdef DEBUG 15595 if (mmu_page_sizes == max_mmu_page_sizes) { 15596 ASSERT(size < TTE256M); 15597 } else { 15598 ASSERT(size < TTE4M); 15599 } 15600 #endif /* DEBUG */ 15601 15602 shw_size = get_hblk_ttesz(shw_hblkp); 15603 vaddr = (caddr_t)get_hblk_base(hmeblkp); 15604 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 15605 ASSERT(vshift < 8); 15606 /* 15607 * Atomically clear shadow mask bit 15608 */ 15609 do { 15610 shw_mask = shw_hblkp->hblk_shw_mask; 15611 ASSERT(shw_mask & (1 << vshift)); 15612 newshw_mask = shw_mask & ~(1 << vshift); 15613 newshw_mask = cas32(&shw_hblkp->hblk_shw_mask, 15614 shw_mask, newshw_mask); 15615 } while (newshw_mask != shw_mask); 15616 hmeblkp->hblk_shadow = NULL; 15617 } 15618 hmeblkp->hblk_shw_bit = 0; 15619 15620 if (hmeblkp->hblk_shared) { 15621 #ifdef DEBUG 15622 sf_srd_t *srdp; 15623 sf_region_t *rgnp; 15624 uint_t rid; 15625 15626 srdp = hblktosrd(hmeblkp); 15627 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 15628 rid = hmeblkp->hblk_tag.htag_rid; 15629 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 15630 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 15631 rgnp = srdp->srd_hmergnp[rid]; 15632 ASSERT(rgnp != NULL); 15633 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 15634 #endif /* DEBUG */ 15635 hmeblkp->hblk_shared = 0; 15636 } 15637 if (free_now) { 15638 kpreempt_disable(); 15639 CPUSET_DEL(cpuset, CPU->cpu_id); 15640 xt_sync(cpuset); 15641 xt_sync(cpuset); 15642 kpreempt_enable(); 15643 15644 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 15645 hmeblkp->hblk_next = NULL; 15646 } else { 15647 /* Append hmeblkp to listp for processing later. */ 15648 hmeblkp->hblk_next = *listp; 15649 *listp = hmeblkp; 15650 } 15651 } 15652 15653 /* 15654 * This routine is called when memory is in short supply and returns a free 15655 * hmeblk of the requested size from the cpu pending lists. 15656 */ 15657 static struct hme_blk * 15658 sfmmu_check_pending_hblks(int size) 15659 { 15660 int i; 15661 struct hme_blk *hmeblkp = NULL, *last_hmeblkp; 15662 int found_hmeblk; 15663 cpuset_t cpuset = cpu_ready_set; 15664 cpu_hme_pend_t *cpuhp; 15665 15666 /* Flush cpu hblk pending queues */ 15667 for (i = 0; i < NCPU; i++) { 15668 cpuhp = &cpu_hme_pend[i]; 15669 if (cpuhp->chp_listp != NULL) { 15670 mutex_enter(&cpuhp->chp_mutex); 15671 if (cpuhp->chp_listp == NULL) { 15672 mutex_exit(&cpuhp->chp_mutex); 15673 continue; 15674 } 15675 found_hmeblk = 0; 15676 last_hmeblkp = NULL; 15677 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL; 15678 hmeblkp = hmeblkp->hblk_next) { 15679 if (get_hblk_ttesz(hmeblkp) == size) { 15680 if (last_hmeblkp == NULL) { 15681 cpuhp->chp_listp = 15682 hmeblkp->hblk_next; 15683 } else { 15684 last_hmeblkp->hblk_next = 15685 hmeblkp->hblk_next; 15686 } 15687 ASSERT(cpuhp->chp_count > 0); 15688 cpuhp->chp_count--; 15689 found_hmeblk = 1; 15690 break; 15691 } else { 15692 last_hmeblkp = hmeblkp; 15693 } 15694 } 15695 mutex_exit(&cpuhp->chp_mutex); 15696 15697 if (found_hmeblk) { 15698 kpreempt_disable(); 15699 CPUSET_DEL(cpuset, CPU->cpu_id); 15700 xt_sync(cpuset); 15701 xt_sync(cpuset); 15702 kpreempt_enable(); 15703 return (hmeblkp); 15704 } 15705 } 15706 } 15707 return (NULL); 15708 }