Print this page
6583 remove whole-process swapping
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * VM - Hardware Address Translation management for Spitfire MMU.
30 30 *
31 31 * This file implements the machine specific hardware translation
32 32 * needed by the VM system. The machine independent interface is
33 33 * described in <vm/hat.h> while the machine dependent interface
34 34 * and data structures are described in <vm/hat_sfmmu.h>.
35 35 *
36 36 * The hat layer manages the address translation hardware as a cache
37 37 * driven by calls from the higher levels in the VM system.
38 38 */
39 39
40 40 #include <sys/types.h>
41 41 #include <sys/kstat.h>
42 42 #include <vm/hat.h>
43 43 #include <vm/hat_sfmmu.h>
44 44 #include <vm/page.h>
45 45 #include <sys/pte.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/mman.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/machparam.h>
50 50 #include <sys/vtrace.h>
51 51 #include <sys/kmem.h>
52 52 #include <sys/mmu.h>
53 53 #include <sys/cmn_err.h>
54 54 #include <sys/cpu.h>
55 55 #include <sys/cpuvar.h>
56 56 #include <sys/debug.h>
57 57 #include <sys/lgrp.h>
58 58 #include <sys/archsystm.h>
59 59 #include <sys/machsystm.h>
60 60 #include <sys/vmsystm.h>
61 61 #include <vm/as.h>
62 62 #include <vm/seg.h>
63 63 #include <vm/seg_kp.h>
64 64 #include <vm/seg_kmem.h>
65 65 #include <vm/seg_kpm.h>
66 66 #include <vm/rm.h>
67 67 #include <sys/t_lock.h>
68 68 #include <sys/obpdefs.h>
69 69 #include <sys/vm_machparam.h>
70 70 #include <sys/var.h>
71 71 #include <sys/trap.h>
72 72 #include <sys/machtrap.h>
73 73 #include <sys/scb.h>
74 74 #include <sys/bitmap.h>
75 75 #include <sys/machlock.h>
76 76 #include <sys/membar.h>
77 77 #include <sys/atomic.h>
78 78 #include <sys/cpu_module.h>
79 79 #include <sys/prom_debug.h>
80 80 #include <sys/ksynch.h>
81 81 #include <sys/mem_config.h>
82 82 #include <sys/mem_cage.h>
83 83 #include <vm/vm_dep.h>
84 84 #include <sys/fpu/fpusystm.h>
85 85 #include <vm/mach_kpm.h>
86 86 #include <sys/callb.h>
87 87
88 88 #ifdef DEBUG
89 89 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \
90 90 if (SFMMU_IS_SHMERID_VALID(rid)) { \
91 91 caddr_t _eaddr = (saddr) + (len); \
92 92 sf_srd_t *_srdp; \
93 93 sf_region_t *_rgnp; \
94 94 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
95 95 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \
96 96 ASSERT((hat) != ksfmmup); \
97 97 _srdp = (hat)->sfmmu_srdp; \
98 98 ASSERT(_srdp != NULL); \
99 99 ASSERT(_srdp->srd_refcnt != 0); \
100 100 _rgnp = _srdp->srd_hmergnp[(rid)]; \
101 101 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \
102 102 ASSERT(_rgnp->rgn_refcnt != 0); \
103 103 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \
104 104 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
105 105 SFMMU_REGION_HME); \
106 106 ASSERT((saddr) >= _rgnp->rgn_saddr); \
107 107 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \
108 108 ASSERT(_eaddr > _rgnp->rgn_saddr); \
109 109 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \
110 110 }
111 111
112 112 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \
113 113 { \
114 114 caddr_t _hsva; \
115 115 caddr_t _heva; \
116 116 caddr_t _rsva; \
117 117 caddr_t _reva; \
118 118 int _ttesz = get_hblk_ttesz(hmeblkp); \
119 119 int _flagtte; \
120 120 ASSERT((srdp)->srd_refcnt != 0); \
121 121 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
122 122 ASSERT((rgnp)->rgn_id == rid); \
123 123 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \
124 124 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
125 125 SFMMU_REGION_HME); \
126 126 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \
127 127 _hsva = (caddr_t)get_hblk_base(hmeblkp); \
128 128 _heva = get_hblk_endaddr(hmeblkp); \
129 129 _rsva = (caddr_t)P2ALIGN( \
130 130 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \
131 131 _reva = (caddr_t)P2ROUNDUP( \
132 132 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \
133 133 HBLK_MIN_BYTES); \
134 134 ASSERT(_hsva >= _rsva); \
135 135 ASSERT(_hsva < _reva); \
136 136 ASSERT(_heva > _rsva); \
137 137 ASSERT(_heva <= _reva); \
138 138 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
139 139 _ttesz; \
140 140 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \
141 141 }
142 142
143 143 #else /* DEBUG */
144 144 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
145 145 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
146 146 #endif /* DEBUG */
147 147
148 148 #if defined(SF_ERRATA_57)
149 149 extern caddr_t errata57_limit;
150 150 #endif
151 151
152 152 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \
153 153 (sizeof (int64_t)))
154 154 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve)
155 155
156 156 #define HBLK_RESERVE_CNT 128
157 157 #define HBLK_RESERVE_MIN 20
158 158
159 159 static struct hme_blk *freehblkp;
160 160 static kmutex_t freehblkp_lock;
161 161 static int freehblkcnt;
162 162
163 163 static int64_t hblk_reserve[HME8BLK_SZ_RND];
164 164 static kmutex_t hblk_reserve_lock;
165 165 static kthread_t *hblk_reserve_thread;
166 166
167 167 static nucleus_hblk8_info_t nucleus_hblk8;
168 168 static nucleus_hblk1_info_t nucleus_hblk1;
169 169
170 170 /*
171 171 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
172 172 * after the initial phase of removing an hmeblk from the hash chain, see
173 173 * the detailed comment in sfmmu_hblk_hash_rm() for further details.
174 174 */
175 175 static cpu_hme_pend_t *cpu_hme_pend;
176 176 static uint_t cpu_hme_pend_thresh;
177 177 /*
178 178 * SFMMU specific hat functions
179 179 */
180 180 void hat_pagecachectl(struct page *, int);
181 181
182 182 /* flags for hat_pagecachectl */
183 183 #define HAT_CACHE 0x1
184 184 #define HAT_UNCACHE 0x2
185 185 #define HAT_TMPNC 0x4
186 186
187 187 /*
188 188 * Flag to allow the creation of non-cacheable translations
189 189 * to system memory. It is off by default. At the moment this
190 190 * flag is used by the ecache error injector. The error injector
191 191 * will turn it on when creating such a translation then shut it
192 192 * off when it's finished.
193 193 */
194 194
195 195 int sfmmu_allow_nc_trans = 0;
196 196
197 197 /*
198 198 * Flag to disable large page support.
199 199 * value of 1 => disable all large pages.
200 200 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
201 201 *
202 202 * For example, use the value 0x4 to disable 512K pages.
203 203 *
204 204 */
205 205 #define LARGE_PAGES_OFF 0x1
206 206
207 207 /*
208 208 * The disable_large_pages and disable_ism_large_pages variables control
209 209 * hat_memload_array and the page sizes to be used by ISM and the kernel.
210 210 *
211 211 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
212 212 * are only used to control which OOB pages to use at upper VM segment creation
213 213 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
214 214 * Their values may come from platform or CPU specific code to disable page
215 215 * sizes that should not be used.
216 216 *
217 217 * WARNING: 512K pages are currently not supported for ISM/DISM.
218 218 */
219 219 uint_t disable_large_pages = 0;
220 220 uint_t disable_ism_large_pages = (1 << TTE512K);
221 221 uint_t disable_auto_data_large_pages = 0;
222 222 uint_t disable_auto_text_large_pages = 0;
223 223
224 224 /*
225 225 * Private sfmmu data structures for hat management
226 226 */
227 227 static struct kmem_cache *sfmmuid_cache;
228 228 static struct kmem_cache *mmuctxdom_cache;
229 229
230 230 /*
231 231 * Private sfmmu data structures for tsb management
232 232 */
233 233 static struct kmem_cache *sfmmu_tsbinfo_cache;
234 234 static struct kmem_cache *sfmmu_tsb8k_cache;
235 235 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
236 236 static vmem_t *kmem_bigtsb_arena;
237 237 static vmem_t *kmem_tsb_arena;
238 238
239 239 /*
240 240 * sfmmu static variables for hmeblk resource management.
241 241 */
242 242 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
243 243 static struct kmem_cache *sfmmu8_cache;
244 244 static struct kmem_cache *sfmmu1_cache;
245 245 static struct kmem_cache *pa_hment_cache;
246 246
247 247 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
248 248 /*
249 249 * private data for ism
250 250 */
251 251 static struct kmem_cache *ism_blk_cache;
252 252 static struct kmem_cache *ism_ment_cache;
253 253 #define ISMID_STARTADDR NULL
254 254
255 255 /*
256 256 * Region management data structures and function declarations.
257 257 */
258 258
259 259 static void sfmmu_leave_srd(sfmmu_t *);
260 260 static int sfmmu_srdcache_constructor(void *, void *, int);
261 261 static void sfmmu_srdcache_destructor(void *, void *);
262 262 static int sfmmu_rgncache_constructor(void *, void *, int);
263 263 static void sfmmu_rgncache_destructor(void *, void *);
264 264 static int sfrgnmap_isnull(sf_region_map_t *);
265 265 static int sfhmergnmap_isnull(sf_hmeregion_map_t *);
266 266 static int sfmmu_scdcache_constructor(void *, void *, int);
267 267 static void sfmmu_scdcache_destructor(void *, void *);
268 268 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
269 269 size_t, void *, u_offset_t);
270 270
271 271 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
272 272 static sf_srd_bucket_t *srd_buckets;
273 273 static struct kmem_cache *srd_cache;
274 274 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
275 275 static struct kmem_cache *region_cache;
276 276 static struct kmem_cache *scd_cache;
277 277
278 278 #ifdef sun4v
279 279 int use_bigtsb_arena = 1;
280 280 #else
281 281 int use_bigtsb_arena = 0;
282 282 #endif
283 283
284 284 /* External /etc/system tunable, for turning on&off the shctx support */
285 285 int disable_shctx = 0;
286 286 /* Internal variable, set by MD if the HW supports shctx feature */
287 287 int shctx_on = 0;
288 288
289 289 #ifdef DEBUG
290 290 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
291 291 #endif
292 292 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
293 293 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
294 294
295 295 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
296 296 static void sfmmu_find_scd(sfmmu_t *);
297 297 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
298 298 static void sfmmu_finish_join_scd(sfmmu_t *);
299 299 static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
300 300 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
301 301 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
302 302 static void sfmmu_free_scd_tsbs(sfmmu_t *);
303 303 static void sfmmu_tsb_inv_ctx(sfmmu_t *);
304 304 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
305 305 static void sfmmu_ism_hatflags(sfmmu_t *, int);
306 306 static int sfmmu_srd_lock_held(sf_srd_t *);
307 307 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
308 308 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
309 309 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
310 310 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
311 311 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
312 312 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
313 313
314 314 /*
315 315 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
316 316 * HAT flags, synchronizing TLB/TSB coherency, and context management.
317 317 * The lock is hashed on the sfmmup since the case where we need to lock
318 318 * all processes is rare but does occur (e.g. we need to unload a shared
319 319 * mapping from all processes using the mapping). We have a lot of buckets,
320 320 * and each slab of sfmmu_t's can use about a quarter of them, giving us
321 321 * a fairly good distribution without wasting too much space and overhead
322 322 * when we have to grab them all.
323 323 */
324 324 #define SFMMU_NUM_LOCK 128 /* must be power of two */
325 325 hatlock_t hat_lock[SFMMU_NUM_LOCK];
326 326
327 327 /*
328 328 * Hash algorithm optimized for a small number of slabs.
329 329 * 7 is (highbit((sizeof sfmmu_t)) - 1)
330 330 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
331 331 * kmem_cache, and thus they will be sequential within that cache. In
332 332 * addition, each new slab will have a different "color" up to cache_maxcolor
333 333 * which will skew the hashing for each successive slab which is allocated.
334 334 * If the size of sfmmu_t changed to a larger size, this algorithm may need
335 335 * to be revisited.
336 336 */
337 337 #define TSB_HASH_SHIFT_BITS (7)
338 338 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
339 339
340 340 #ifdef DEBUG
341 341 int tsb_hash_debug = 0;
342 342 #define TSB_HASH(sfmmup) \
343 343 (tsb_hash_debug ? &hat_lock[0] : \
344 344 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
345 345 #else /* DEBUG */
346 346 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
347 347 #endif /* DEBUG */
348 348
349 349
350 350 /* sfmmu_replace_tsb() return codes. */
351 351 typedef enum tsb_replace_rc {
352 352 TSB_SUCCESS,
353 353 TSB_ALLOCFAIL,
354 354 TSB_LOSTRACE,
355 355 TSB_ALREADY_SWAPPED,
356 356 TSB_CANTGROW
357 357 } tsb_replace_rc_t;
358 358
359 359 /*
360 360 * Flags for TSB allocation routines.
361 361 */
362 362 #define TSB_ALLOC 0x01
363 363 #define TSB_FORCEALLOC 0x02
364 364 #define TSB_GROW 0x04
365 365 #define TSB_SHRINK 0x08
366 366 #define TSB_SWAPIN 0x10
367 367
368 368 /*
369 369 * Support for HAT callbacks.
370 370 */
371 371 #define SFMMU_MAX_RELOC_CALLBACKS 10
372 372 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
373 373 static id_t sfmmu_cb_nextid = 0;
374 374 static id_t sfmmu_tsb_cb_id;
375 375 struct sfmmu_callback *sfmmu_cb_table;
376 376
377 377 kmutex_t kpr_mutex;
378 378 kmutex_t kpr_suspendlock;
379 379 kthread_t *kreloc_thread;
380 380
381 381 /*
382 382 * Enable VA->PA translation sanity checking on DEBUG kernels.
383 383 * Disabled by default. This is incompatible with some
384 384 * drivers (error injector, RSM) so if it breaks you get
385 385 * to keep both pieces.
386 386 */
387 387 int hat_check_vtop = 0;
388 388
389 389 /*
390 390 * Private sfmmu routines (prototypes)
391 391 */
392 392 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
393 393 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
394 394 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
395 395 uint_t);
396 396 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
397 397 caddr_t, demap_range_t *, uint_t);
398 398 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
399 399 caddr_t, int);
400 400 static void sfmmu_hblk_free(struct hme_blk **);
401 401 static void sfmmu_hblks_list_purge(struct hme_blk **, int);
402 402 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t);
403 403 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t);
404 404 static struct hme_blk *sfmmu_hblk_steal(int);
405 405 static int sfmmu_steal_this_hblk(struct hmehash_bucket *,
406 406 struct hme_blk *, uint64_t, struct hme_blk *);
407 407 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
408 408
409 409 static void hat_do_memload_array(struct hat *, caddr_t, size_t,
410 410 struct page **, uint_t, uint_t, uint_t);
411 411 static void hat_do_memload(struct hat *, caddr_t, struct page *,
412 412 uint_t, uint_t, uint_t);
413 413 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
414 414 uint_t, uint_t, pgcnt_t, uint_t);
415 415 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
416 416 uint_t);
417 417 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
418 418 uint_t, uint_t);
419 419 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
420 420 caddr_t, int, uint_t);
421 421 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
422 422 struct hmehash_bucket *, caddr_t, uint_t, uint_t,
423 423 uint_t);
424 424 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
425 425 caddr_t, page_t **, uint_t, uint_t);
426 426 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
427 427
428 428 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
429 429 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
430 430 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
431 431 #ifdef VAC
432 432 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
433 433 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *);
434 434 int tst_tnc(page_t *pp, pgcnt_t);
435 435 void conv_tnc(page_t *pp, int);
436 436 #endif
437 437
438 438 static void sfmmu_get_ctx(sfmmu_t *);
439 439 static void sfmmu_free_sfmmu(sfmmu_t *);
440 440
441 441 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
442 442 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
443 443
444 444 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int);
445 445 static void hat_pagereload(struct page *, struct page *);
446 446 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
447 447 #ifdef VAC
448 448 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
449 449 static void sfmmu_page_cache(page_t *, int, int, int);
450 450 #endif
451 451
452 452 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
453 453 struct hme_blk *, int);
454 454 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
455 455 pfn_t, int, int, int, int);
456 456 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
457 457 pfn_t, int);
458 458 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
459 459 static void sfmmu_tlb_range_demap(demap_range_t *);
460 460 static void sfmmu_invalidate_ctx(sfmmu_t *);
461 461 static void sfmmu_sync_mmustate(sfmmu_t *);
462 462
463 463 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
464 464 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
465 465 sfmmu_t *);
466 466 static void sfmmu_tsb_free(struct tsb_info *);
467 467 static void sfmmu_tsbinfo_free(struct tsb_info *);
468 468 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
469 469 sfmmu_t *);
470 470 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
471 471 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
472 472 static int sfmmu_select_tsb_szc(pgcnt_t);
473 473 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
474 474 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
475 475 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
476 476 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \
477 477 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
478 478 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
479 479 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
480 480 hatlock_t *, uint_t);
481 481 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
482 482
483 483 #ifdef VAC
484 484 void sfmmu_cache_flush(pfn_t, int);
485 485 void sfmmu_cache_flushcolor(int, pfn_t);
486 486 #endif
487 487 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
488 488 caddr_t, demap_range_t *, uint_t, int);
489 489
490 490 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
491 491 static uint_t sfmmu_ptov_attr(tte_t *);
492 492 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
493 493 caddr_t, demap_range_t *, uint_t);
494 494 static uint_t sfmmu_vtop_prot(uint_t, uint_t *);
495 495 static int sfmmu_idcache_constructor(void *, void *, int);
496 496 static void sfmmu_idcache_destructor(void *, void *);
497 497 static int sfmmu_hblkcache_constructor(void *, void *, int);
498 498 static void sfmmu_hblkcache_destructor(void *, void *);
499 499 static void sfmmu_hblkcache_reclaim(void *);
500 500 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
501 501 struct hmehash_bucket *);
502 502 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
503 503 struct hme_blk *, struct hme_blk **, int);
504 504 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
505 505 uint64_t);
506 506 static struct hme_blk *sfmmu_check_pending_hblks(int);
507 507 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
508 508 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
509 509 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
510 510 int, caddr_t *);
511 511 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
512 512
513 513 static void sfmmu_rm_large_mappings(page_t *, int);
514 514
515 515 static void hat_lock_init(void);
516 516 static void hat_kstat_init(void);
517 517 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
518 518 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
519 519 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
520 520 static void sfmmu_check_page_sizes(sfmmu_t *, int);
521 521 int fnd_mapping_sz(page_t *);
522 522 static void iment_add(struct ism_ment *, struct hat *);
523 523 static void iment_sub(struct ism_ment *, struct hat *);
524 524 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc);
525 525 extern void sfmmu_setup_tsbinfo(sfmmu_t *);
526 526 extern void sfmmu_clear_utsbinfo(void);
527 527
528 528 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
529 529
530 530 extern int vpm_enable;
531 531
532 532 /* kpm globals */
533 533 #ifdef DEBUG
534 534 /*
535 535 * Enable trap level tsbmiss handling
536 536 */
537 537 int kpm_tsbmtl = 1;
538 538
539 539 /*
540 540 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
541 541 * required TLB shootdowns in this case, so handle w/ care. Off by default.
542 542 */
543 543 int kpm_tlb_flush;
544 544 #endif /* DEBUG */
545 545
546 546 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
547 547
548 548 #ifdef DEBUG
549 549 static void sfmmu_check_hblk_flist();
550 550 #endif
551 551
552 552 /*
553 553 * Semi-private sfmmu data structures. Some of them are initialize in
554 554 * startup or in hat_init. Some of them are private but accessed by
555 555 * assembly code or mach_sfmmu.c
556 556 */
557 557 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */
558 558 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */
559 559 uint64_t uhme_hash_pa; /* PA of uhme_hash */
560 560 uint64_t khme_hash_pa; /* PA of khme_hash */
561 561 int uhmehash_num; /* # of buckets in user hash table */
562 562 int khmehash_num; /* # of buckets in kernel hash table */
563 563
564 564 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */
565 565 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */
566 566 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */
567 567
568 568 #define DEFAULT_NUM_CTXS_PER_MMU 8192
569 569 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
570 570
571 571 int cache; /* describes system cache */
572 572
573 573 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */
574 574 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */
575 575 int ktsb_szcode; /* kernel 8k-indexed tsb size code */
576 576 int ktsb_sz; /* kernel 8k-indexed tsb size */
577 577
578 578 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */
579 579 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */
580 580 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */
581 581 int ktsb4m_sz; /* kernel 4m-indexed tsb size */
582 582
583 583 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */
584 584 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */
585 585 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */
586 586 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */
587 587
588 588 #ifndef sun4v
589 589 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */
590 590 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
591 591 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */
592 592 caddr_t utsb_vabase; /* reserved kernel virtual memory */
593 593 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */
594 594 #endif /* sun4v */
595 595 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */
596 596 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */
597 597 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
598 598
599 599 /*
600 600 * Size to use for TSB slabs. Future platforms that support page sizes
601 601 * larger than 4M may wish to change these values, and provide their own
602 602 * assembly macros for building and decoding the TSB base register contents.
603 603 * Note disable_large_pages will override the value set here.
604 604 */
605 605 static uint_t tsb_slab_ttesz = TTE4M;
606 606 size_t tsb_slab_size = MMU_PAGESIZE4M;
607 607 uint_t tsb_slab_shift = MMU_PAGESHIFT4M;
608 608 /* PFN mask for TTE */
609 609 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
610 610
611 611 /*
612 612 * Size to use for TSB slabs. These are used only when 256M tsb arenas
613 613 * exist.
614 614 */
615 615 static uint_t bigtsb_slab_ttesz = TTE256M;
616 616 static size_t bigtsb_slab_size = MMU_PAGESIZE256M;
617 617 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M;
618 618 /* 256M page alignment for 8K pfn */
619 619 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
620 620
621 621 /* largest TSB size to grow to, will be smaller on smaller memory systems */
622 622 static int tsb_max_growsize = 0;
623 623
624 624 /*
625 625 * Tunable parameters dealing with TSB policies.
626 626 */
627 627
628 628 /*
629 629 * This undocumented tunable forces all 8K TSBs to be allocated from
630 630 * the kernel heap rather than from the kmem_tsb_default_arena arenas.
631 631 */
632 632 #ifdef DEBUG
633 633 int tsb_forceheap = 0;
634 634 #endif /* DEBUG */
635 635
636 636 /*
637 637 * Decide whether to use per-lgroup arenas, or one global set of
638 638 * TSB arenas. The default is not to break up per-lgroup, since
639 639 * most platforms don't recognize any tangible benefit from it.
640 640 */
641 641 int tsb_lgrp_affinity = 0;
642 642
643 643 /*
644 644 * Used for growing the TSB based on the process RSS.
645 645 * tsb_rss_factor is based on the smallest TSB, and is
646 646 * shifted by the TSB size to determine if we need to grow.
647 647 * The default will grow the TSB if the number of TTEs for
648 648 * this page size exceeds 75% of the number of TSB entries,
649 649 * which should _almost_ eliminate all conflict misses
650 650 * (at the expense of using up lots and lots of memory).
651 651 */
652 652 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
653 653 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc)
654 654 #define SELECT_TSB_SIZECODE(pgcnt) ( \
655 655 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
656 656 default_tsb_size)
657 657 #define TSB_OK_SHRINK() \
658 658 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
659 659 #define TSB_OK_GROW() \
660 660 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
661 661
662 662 int enable_tsb_rss_sizing = 1;
663 663 int tsb_rss_factor = (int)TSB_RSS_FACTOR;
664 664
665 665 /* which TSB size code to use for new address spaces or if rss sizing off */
666 666 int default_tsb_size = TSB_8K_SZCODE;
667 667
668 668 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
669 669 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
670 670 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32
671 671
672 672 #ifdef DEBUG
673 673 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
674 674 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
675 675 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */
676 676 static int tsb_alloc_fail_mtbf = 0;
677 677 static int tsb_alloc_count = 0;
678 678 #endif /* DEBUG */
679 679
680 680 /* if set to 1, will remap valid TTEs when growing TSB. */
681 681 int tsb_remap_ttes = 1;
682 682
683 683 /*
684 684 * If we have more than this many mappings, allocate a second TSB.
685 685 * This default is chosen because the I/D fully associative TLBs are
686 686 * assumed to have at least 8 available entries. Platforms with a
687 687 * larger fully-associative TLB could probably override the default.
688 688 */
689 689
690 690 #ifdef sun4v
691 691 int tsb_sectsb_threshold = 0;
692 692 #else
693 693 int tsb_sectsb_threshold = 8;
694 694 #endif
695 695
696 696 /*
697 697 * kstat data
698 698 */
699 699 struct sfmmu_global_stat sfmmu_global_stat;
700 700 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
701 701
702 702 /*
703 703 * Global data
704 704 */
705 705 sfmmu_t *ksfmmup; /* kernel's hat id */
706 706
707 707 #ifdef DEBUG
708 708 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
709 709 #endif
710 710
711 711 /* sfmmu locking operations */
712 712 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
713 713 static int sfmmu_mlspl_held(struct page *, int);
714 714
715 715 kmutex_t *sfmmu_page_enter(page_t *);
716 716 void sfmmu_page_exit(kmutex_t *);
717 717 int sfmmu_page_spl_held(struct page *);
718 718
719 719 /* sfmmu internal locking operations - accessed directly */
720 720 static void sfmmu_mlist_reloc_enter(page_t *, page_t *,
721 721 kmutex_t **, kmutex_t **);
722 722 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
723 723 static hatlock_t *
724 724 sfmmu_hat_enter(sfmmu_t *);
725 725 static hatlock_t *
726 726 sfmmu_hat_tryenter(sfmmu_t *);
727 727 static void sfmmu_hat_exit(hatlock_t *);
728 728 static void sfmmu_hat_lock_all(void);
729 729 static void sfmmu_hat_unlock_all(void);
730 730 static void sfmmu_ismhat_enter(sfmmu_t *, int);
731 731 static void sfmmu_ismhat_exit(sfmmu_t *, int);
732 732
733 733 kpm_hlk_t *kpmp_table;
734 734 uint_t kpmp_table_sz; /* must be a power of 2 */
735 735 uchar_t kpmp_shift;
736 736
737 737 kpm_shlk_t *kpmp_stable;
738 738 uint_t kpmp_stable_sz; /* must be a power of 2 */
739 739
740 740 /*
741 741 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
742 742 * SPL_SHIFT is log2(SPL_TABLE_SIZE).
743 743 */
744 744 #if ((2*NCPU_P2) > 128)
745 745 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1))
746 746 #else
747 747 #define SPL_SHIFT 7U
748 748 #endif
749 749 #define SPL_TABLE_SIZE (1U << SPL_SHIFT)
750 750 #define SPL_MASK (SPL_TABLE_SIZE - 1)
751 751
752 752 /*
753 753 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
754 754 * and by multiples of SPL_SHIFT to get as many varied bits as we can.
755 755 */
756 756 #define SPL_INDEX(pp) \
757 757 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \
758 758 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
759 759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
760 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
761 761 SPL_MASK)
762 762
763 763 #define SPL_HASH(pp) \
764 764 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
765 765
766 766 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE];
767 767
768 768 /* Array of mutexes protecting a page's mapping list and p_nrm field. */
769 769
770 770 #define MML_TABLE_SIZE SPL_TABLE_SIZE
771 771 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex)
772 772
773 773 static pad_mutex_t mml_table[MML_TABLE_SIZE];
774 774
775 775 /*
776 776 * hat_unload_callback() will group together callbacks in order
777 777 * to avoid xt_sync() calls. This is the maximum size of the group.
778 778 */
779 779 #define MAX_CB_ADDR 32
780 780
781 781 tte_t hw_tte;
782 782 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
783 783
784 784 static char *mmu_ctx_kstat_names[] = {
785 785 "mmu_ctx_tsb_exceptions",
786 786 "mmu_ctx_tsb_raise_exception",
787 787 "mmu_ctx_wrap_around",
788 788 };
789 789
790 790 /*
791 791 * Wrapper for vmem_xalloc since vmem_create only allows limited
792 792 * parameters for vm_source_alloc functions. This function allows us
793 793 * to specify alignment consistent with the size of the object being
794 794 * allocated.
795 795 */
796 796 static void *
797 797 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
798 798 {
799 799 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
800 800 }
801 801
802 802 /* Common code for setting tsb_alloc_hiwater. */
803 803 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \
804 804 ptob(pages) / tsb_alloc_hiwater_factor
805 805
806 806 /*
807 807 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
808 808 * a single TSB. physmem is the number of physical pages so we need physmem 8K
809 809 * TTEs to represent all those physical pages. We round this up by using
810 810 * 1<<highbit(). To figure out which size code to use, remember that the size
811 811 * code is just an amount to shift the smallest TSB size to get the size of
812 812 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or
813 813 * highbit() - 1) to get the size code for the smallest TSB that can represent
814 814 * all of physical memory, while erring on the side of too much.
815 815 *
816 816 * Restrict tsb_max_growsize to make sure that:
817 817 * 1) TSBs can't grow larger than the TSB slab size
818 818 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE.
819 819 */
820 820 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \
821 821 int _i, _szc, _slabszc, _tsbszc; \
822 822 \
823 823 _i = highbit(pages); \
824 824 if ((1 << (_i - 1)) == (pages)) \
825 825 _i--; /* 2^n case, round down */ \
826 826 _szc = _i - TSB_START_SIZE; \
827 827 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
828 828 _tsbszc = MIN(_szc, _slabszc); \
829 829 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \
830 830 }
831 831
832 832 /*
833 833 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
834 834 * tsb_info which handles that TTE size.
835 835 */
836 836 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \
837 837 (tsbinfop) = (sfmmup)->sfmmu_tsb; \
838 838 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \
839 839 sfmmu_hat_lock_held(sfmmup)); \
840 840 if ((tte_szc) >= TTE4M) { \
841 841 ASSERT((tsbinfop) != NULL); \
842 842 (tsbinfop) = (tsbinfop)->tsb_next; \
843 843 } \
844 844 }
845 845
846 846 /*
847 847 * Macro to use to unload entries from the TSB.
848 848 * It has knowledge of which page sizes get replicated in the TSB
849 849 * and will call the appropriate unload routine for the appropriate size.
850 850 */
851 851 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
852 852 { \
853 853 int ttesz = get_hblk_ttesz(hmeblkp); \
854 854 if (ttesz == TTE8K || ttesz == TTE4M) { \
855 855 sfmmu_unload_tsb(sfmmup, addr, ttesz); \
856 856 } else { \
857 857 caddr_t sva = ismhat ? addr : \
858 858 (caddr_t)get_hblk_base(hmeblkp); \
859 859 caddr_t eva = sva + get_hblk_span(hmeblkp); \
860 860 ASSERT(addr >= sva && addr < eva); \
861 861 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \
862 862 } \
863 863 }
864 864
865 865
866 866 /* Update tsb_alloc_hiwater after memory is configured. */
867 867 /*ARGSUSED*/
868 868 static void
869 869 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
870 870 {
871 871 /* Assumes physmem has already been updated. */
872 872 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
873 873 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
874 874 }
875 875
876 876 /*
877 877 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here
878 878 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
879 879 * deleted.
880 880 */
881 881 /*ARGSUSED*/
882 882 static int
883 883 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
884 884 {
885 885 return (0);
886 886 }
887 887
888 888 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
889 889 /*ARGSUSED*/
890 890 static void
891 891 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
892 892 {
893 893 /*
894 894 * Whether the delete was cancelled or not, just go ahead and update
895 895 * tsb_alloc_hiwater and tsb_max_growsize.
896 896 */
897 897 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
898 898 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
899 899 }
900 900
901 901 static kphysm_setup_vector_t sfmmu_update_vec = {
902 902 KPHYSM_SETUP_VECTOR_VERSION, /* version */
903 903 sfmmu_update_post_add, /* post_add */
904 904 sfmmu_update_pre_del, /* pre_del */
905 905 sfmmu_update_post_del /* post_del */
906 906 };
907 907
908 908
909 909 /*
910 910 * HME_BLK HASH PRIMITIVES
911 911 */
912 912
913 913 /*
914 914 * Enter a hme on the mapping list for page pp.
915 915 * When large pages are more prevalent in the system we might want to
916 916 * keep the mapping list in ascending order by the hment size. For now,
917 917 * small pages are more frequent, so don't slow it down.
918 918 */
919 919 #define HME_ADD(hme, pp) \
920 920 { \
921 921 ASSERT(sfmmu_mlist_held(pp)); \
922 922 \
923 923 hme->hme_prev = NULL; \
924 924 hme->hme_next = pp->p_mapping; \
925 925 hme->hme_page = pp; \
926 926 if (pp->p_mapping) { \
927 927 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
928 928 ASSERT(pp->p_share > 0); \
929 929 } else { \
930 930 /* EMPTY */ \
931 931 ASSERT(pp->p_share == 0); \
932 932 } \
933 933 pp->p_mapping = hme; \
934 934 pp->p_share++; \
935 935 }
936 936
937 937 /*
938 938 * Enter a hme on the mapping list for page pp.
939 939 * If we are unmapping a large translation, we need to make sure that the
940 940 * change is reflect in the corresponding bit of the p_index field.
941 941 */
942 942 #define HME_SUB(hme, pp) \
943 943 { \
944 944 ASSERT(sfmmu_mlist_held(pp)); \
945 945 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \
946 946 \
947 947 if (pp->p_mapping == NULL) { \
948 948 panic("hme_remove - no mappings"); \
949 949 } \
950 950 \
951 951 membar_stst(); /* ensure previous stores finish */ \
952 952 \
953 953 ASSERT(pp->p_share > 0); \
954 954 pp->p_share--; \
955 955 \
956 956 if (hme->hme_prev) { \
957 957 ASSERT(pp->p_mapping != hme); \
958 958 ASSERT(hme->hme_prev->hme_page == pp || \
959 959 IS_PAHME(hme->hme_prev)); \
960 960 hme->hme_prev->hme_next = hme->hme_next; \
961 961 } else { \
962 962 ASSERT(pp->p_mapping == hme); \
963 963 pp->p_mapping = hme->hme_next; \
964 964 ASSERT((pp->p_mapping == NULL) ? \
965 965 (pp->p_share == 0) : 1); \
966 966 } \
967 967 \
968 968 if (hme->hme_next) { \
969 969 ASSERT(hme->hme_next->hme_page == pp || \
970 970 IS_PAHME(hme->hme_next)); \
971 971 hme->hme_next->hme_prev = hme->hme_prev; \
972 972 } \
973 973 \
974 974 /* zero out the entry */ \
975 975 hme->hme_next = NULL; \
976 976 hme->hme_prev = NULL; \
977 977 hme->hme_page = NULL; \
978 978 \
979 979 if (hme_size(hme) > TTE8K) { \
980 980 /* remove mappings for remainder of large pg */ \
981 981 sfmmu_rm_large_mappings(pp, hme_size(hme)); \
982 982 } \
983 983 }
984 984
985 985 /*
986 986 * This function returns the hment given the hme_blk and a vaddr.
987 987 * It assumes addr has already been checked to belong to hme_blk's
988 988 * range.
989 989 */
990 990 #define HBLKTOHME(hment, hmeblkp, addr) \
991 991 { \
992 992 int index; \
993 993 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
994 994 }
995 995
996 996 /*
997 997 * Version of HBLKTOHME that also returns the index in hmeblkp
998 998 * of the hment.
999 999 */
1000 1000 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1001 1001 { \
1002 1002 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1003 1003 \
1004 1004 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1005 1005 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1006 1006 } else \
1007 1007 idx = 0; \
1008 1008 \
1009 1009 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1010 1010 }
1011 1011
1012 1012 /*
1013 1013 * Disable any page sizes not supported by the CPU
1014 1014 */
1015 1015 void
1016 1016 hat_init_pagesizes()
1017 1017 {
1018 1018 int i;
1019 1019
1020 1020 mmu_exported_page_sizes = 0;
1021 1021 for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1022 1022
1023 1023 szc_2_userszc[i] = (uint_t)-1;
1024 1024 userszc_2_szc[i] = (uint_t)-1;
1025 1025
1026 1026 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1027 1027 disable_large_pages |= (1 << i);
1028 1028 } else {
1029 1029 szc_2_userszc[i] = mmu_exported_page_sizes;
1030 1030 userszc_2_szc[mmu_exported_page_sizes] = i;
1031 1031 mmu_exported_page_sizes++;
1032 1032 }
1033 1033 }
1034 1034
1035 1035 disable_ism_large_pages |= disable_large_pages;
1036 1036 disable_auto_data_large_pages = disable_large_pages;
1037 1037 disable_auto_text_large_pages = disable_large_pages;
1038 1038
1039 1039 /*
1040 1040 * Initialize mmu-specific large page sizes.
1041 1041 */
1042 1042 if (&mmu_large_pages_disabled) {
1043 1043 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1044 1044 disable_ism_large_pages |=
1045 1045 mmu_large_pages_disabled(HAT_LOAD_SHARE);
1046 1046 disable_auto_data_large_pages |=
1047 1047 mmu_large_pages_disabled(HAT_AUTO_DATA);
1048 1048 disable_auto_text_large_pages |=
1049 1049 mmu_large_pages_disabled(HAT_AUTO_TEXT);
1050 1050 }
1051 1051 }
1052 1052
1053 1053 /*
1054 1054 * Initialize the hardware address translation structures.
1055 1055 */
1056 1056 void
1057 1057 hat_init(void)
1058 1058 {
1059 1059 int i;
1060 1060 uint_t sz;
1061 1061 size_t size;
1062 1062
1063 1063 hat_lock_init();
1064 1064 hat_kstat_init();
1065 1065
1066 1066 /*
1067 1067 * Hardware-only bits in a TTE
1068 1068 */
1069 1069 MAKE_TTE_MASK(&hw_tte);
1070 1070
1071 1071 hat_init_pagesizes();
1072 1072
1073 1073 /* Initialize the hash locks */
1074 1074 for (i = 0; i < khmehash_num; i++) {
1075 1075 mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1076 1076 MUTEX_DEFAULT, NULL);
1077 1077 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1078 1078 }
1079 1079 for (i = 0; i < uhmehash_num; i++) {
1080 1080 mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1081 1081 MUTEX_DEFAULT, NULL);
1082 1082 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1083 1083 }
1084 1084 khmehash_num--; /* make sure counter starts from 0 */
1085 1085 uhmehash_num--; /* make sure counter starts from 0 */
1086 1086
1087 1087 /*
1088 1088 * Allocate context domain structures.
1089 1089 *
1090 1090 * A platform may choose to modify max_mmu_ctxdoms in
1091 1091 * set_platform_defaults(). If a platform does not define
1092 1092 * a set_platform_defaults() or does not choose to modify
1093 1093 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1094 1094 *
1095 1095 * For all platforms that have CPUs sharing MMUs, this
1096 1096 * value must be defined.
1097 1097 */
1098 1098 if (max_mmu_ctxdoms == 0)
1099 1099 max_mmu_ctxdoms = max_ncpus;
1100 1100
1101 1101 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1102 1102 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1103 1103
1104 1104 /* mmu_ctx_t is 64 bytes aligned */
1105 1105 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1106 1106 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1107 1107 /*
1108 1108 * MMU context domain initialization for the Boot CPU.
1109 1109 * This needs the context domains array allocated above.
1110 1110 */
1111 1111 mutex_enter(&cpu_lock);
1112 1112 sfmmu_cpu_init(CPU);
1113 1113 mutex_exit(&cpu_lock);
1114 1114
1115 1115 /*
1116 1116 * Intialize ism mapping list lock.
1117 1117 */
1118 1118
1119 1119 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1120 1120
1121 1121 /*
1122 1122 * Each sfmmu structure carries an array of MMU context info
1123 1123 * structures, one per context domain. The size of this array depends
1124 1124 * on the maximum number of context domains. So, the size of the
1125 1125 * sfmmu structure varies per platform.
1126 1126 *
1127 1127 * sfmmu is allocated from static arena, because trap
1128 1128 * handler at TL > 0 is not allowed to touch kernel relocatable
1129 1129 * memory. sfmmu's alignment is changed to 64 bytes from
1130 1130 * default 8 bytes, as the lower 6 bits will be used to pass
1131 1131 * pgcnt to vtag_flush_pgcnt_tl1.
1132 1132 */
1133 1133 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1134 1134
1135 1135 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1136 1136 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1137 1137 NULL, NULL, static_arena, 0);
1138 1138
1139 1139 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1140 1140 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1141 1141
1142 1142 /*
1143 1143 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1144 1144 * from the heap when low on memory or when TSB_FORCEALLOC is
1145 1145 * specified, don't use magazines to cache them--we want to return
1146 1146 * them to the system as quickly as possible.
1147 1147 */
1148 1148 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1149 1149 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1150 1150 static_arena, KMC_NOMAGAZINE);
1151 1151
1152 1152 /*
1153 1153 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1154 1154 * memory, which corresponds to the old static reserve for TSBs.
1155 1155 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of
1156 1156 * memory we'll allocate for TSB slabs; beyond this point TSB
1157 1157 * allocations will be taken from the kernel heap (via
1158 1158 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1159 1159 * consumer.
1160 1160 */
1161 1161 if (tsb_alloc_hiwater_factor == 0) {
1162 1162 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1163 1163 }
1164 1164 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1165 1165
1166 1166 for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1167 1167 if (!(disable_large_pages & (1 << sz)))
1168 1168 break;
1169 1169 }
1170 1170
1171 1171 if (sz < tsb_slab_ttesz) {
1172 1172 tsb_slab_ttesz = sz;
1173 1173 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1174 1174 tsb_slab_size = 1 << tsb_slab_shift;
1175 1175 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1176 1176 use_bigtsb_arena = 0;
1177 1177 } else if (use_bigtsb_arena &&
1178 1178 (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1179 1179 use_bigtsb_arena = 0;
1180 1180 }
1181 1181
1182 1182 if (!use_bigtsb_arena) {
1183 1183 bigtsb_slab_shift = tsb_slab_shift;
1184 1184 }
1185 1185 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1186 1186
1187 1187 /*
1188 1188 * On smaller memory systems, allocate TSB memory in smaller chunks
1189 1189 * than the default 4M slab size. We also honor disable_large_pages
1190 1190 * here.
1191 1191 *
1192 1192 * The trap handlers need to be patched with the final slab shift,
1193 1193 * since they need to be able to construct the TSB pointer at runtime.
1194 1194 */
1195 1195 if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1196 1196 !(disable_large_pages & (1 << TTE512K))) {
1197 1197 tsb_slab_ttesz = TTE512K;
1198 1198 tsb_slab_shift = MMU_PAGESHIFT512K;
1199 1199 tsb_slab_size = MMU_PAGESIZE512K;
1200 1200 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1201 1201 use_bigtsb_arena = 0;
1202 1202 }
1203 1203
1204 1204 if (!use_bigtsb_arena) {
1205 1205 bigtsb_slab_ttesz = tsb_slab_ttesz;
1206 1206 bigtsb_slab_shift = tsb_slab_shift;
1207 1207 bigtsb_slab_size = tsb_slab_size;
1208 1208 bigtsb_slab_mask = tsb_slab_mask;
1209 1209 }
1210 1210
1211 1211
1212 1212 /*
1213 1213 * Set up memory callback to update tsb_alloc_hiwater and
1214 1214 * tsb_max_growsize.
1215 1215 */
1216 1216 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1217 1217 ASSERT(i == 0);
1218 1218
1219 1219 /*
1220 1220 * kmem_tsb_arena is the source from which large TSB slabs are
1221 1221 * drawn. The quantum of this arena corresponds to the largest
1222 1222 * TSB size we can dynamically allocate for user processes.
1223 1223 * Currently it must also be a supported page size since we
1224 1224 * use exactly one translation entry to map each slab page.
1225 1225 *
1226 1226 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1227 1227 * which most TSBs are allocated. Since most TSB allocations are
1228 1228 * typically 8K we have a kmem cache we stack on top of each
1229 1229 * kmem_tsb_default_arena to speed up those allocations.
1230 1230 *
1231 1231 * Note the two-level scheme of arenas is required only
1232 1232 * because vmem_create doesn't allow us to specify alignment
1233 1233 * requirements. If this ever changes the code could be
1234 1234 * simplified to use only one level of arenas.
1235 1235 *
1236 1236 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1237 1237 * will be provided in addition to the 4M kmem_tsb_arena.
1238 1238 */
1239 1239 if (use_bigtsb_arena) {
1240 1240 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1241 1241 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1242 1242 vmem_xfree, heap_arena, 0, VM_SLEEP);
1243 1243 }
1244 1244
1245 1245 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1246 1246 sfmmu_vmem_xalloc_aligned_wrapper,
1247 1247 vmem_xfree, heap_arena, 0, VM_SLEEP);
1248 1248
1249 1249 if (tsb_lgrp_affinity) {
1250 1250 char s[50];
1251 1251 for (i = 0; i < NLGRPS_MAX; i++) {
1252 1252 if (use_bigtsb_arena) {
1253 1253 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1254 1254 kmem_bigtsb_default_arena[i] = vmem_create(s,
1255 1255 NULL, 0, 2 * tsb_slab_size,
1256 1256 sfmmu_tsb_segkmem_alloc,
1257 1257 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1258 1258 0, VM_SLEEP | VM_BESTFIT);
1259 1259 }
1260 1260
1261 1261 (void) sprintf(s, "kmem_tsb_lgrp%d", i);
1262 1262 kmem_tsb_default_arena[i] = vmem_create(s,
1263 1263 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1264 1264 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1265 1265 VM_SLEEP | VM_BESTFIT);
1266 1266
1267 1267 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1268 1268 sfmmu_tsb_cache[i] = kmem_cache_create(s,
1269 1269 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1270 1270 kmem_tsb_default_arena[i], 0);
1271 1271 }
1272 1272 } else {
1273 1273 if (use_bigtsb_arena) {
1274 1274 kmem_bigtsb_default_arena[0] =
1275 1275 vmem_create("kmem_bigtsb_default", NULL, 0,
1276 1276 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1277 1277 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1278 1278 VM_SLEEP | VM_BESTFIT);
1279 1279 }
1280 1280
1281 1281 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1282 1282 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1283 1283 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1284 1284 VM_SLEEP | VM_BESTFIT);
1285 1285 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1286 1286 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1287 1287 kmem_tsb_default_arena[0], 0);
1288 1288 }
1289 1289
1290 1290 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1291 1291 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1292 1292 sfmmu_hblkcache_destructor,
1293 1293 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1294 1294 hat_memload_arena, KMC_NOHASH);
1295 1295
1296 1296 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1297 1297 segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1298 1298 VMC_DUMPSAFE | VM_SLEEP);
1299 1299
1300 1300 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1301 1301 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1302 1302 sfmmu_hblkcache_destructor,
1303 1303 NULL, (void *)HME1BLK_SZ,
1304 1304 hat_memload1_arena, KMC_NOHASH);
1305 1305
1306 1306 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1307 1307 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1308 1308
1309 1309 ism_blk_cache = kmem_cache_create("ism_blk_cache",
1310 1310 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1311 1311 NULL, NULL, static_arena, KMC_NOHASH);
1312 1312
1313 1313 ism_ment_cache = kmem_cache_create("ism_ment_cache",
1314 1314 sizeof (ism_ment_t), 0, NULL, NULL,
1315 1315 NULL, NULL, NULL, 0);
1316 1316
1317 1317 /*
1318 1318 * We grab the first hat for the kernel,
1319 1319 */
1320 1320 AS_LOCK_ENTER(&kas, RW_WRITER);
1321 1321 kas.a_hat = hat_alloc(&kas);
1322 1322 AS_LOCK_EXIT(&kas);
1323 1323
1324 1324 /*
1325 1325 * Initialize hblk_reserve.
1326 1326 */
1327 1327 ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1328 1328 va_to_pa((caddr_t)hblk_reserve);
1329 1329
1330 1330 #ifndef UTSB_PHYS
1331 1331 /*
1332 1332 * Reserve some kernel virtual address space for the locked TTEs
1333 1333 * that allow us to probe the TSB from TL>0.
1334 1334 */
1335 1335 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1336 1336 0, 0, NULL, NULL, VM_SLEEP);
1337 1337 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1338 1338 0, 0, NULL, NULL, VM_SLEEP);
1339 1339 #endif
1340 1340
1341 1341 #ifdef VAC
1342 1342 /*
1343 1343 * The big page VAC handling code assumes VAC
1344 1344 * will not be bigger than the smallest big
1345 1345 * page- which is 64K.
1346 1346 */
1347 1347 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1348 1348 cmn_err(CE_PANIC, "VAC too big!");
1349 1349 }
1350 1350 #endif
1351 1351
1352 1352 uhme_hash_pa = va_to_pa(uhme_hash);
1353 1353 khme_hash_pa = va_to_pa(khme_hash);
1354 1354
1355 1355 /*
1356 1356 * Initialize relocation locks. kpr_suspendlock is held
1357 1357 * at PIL_MAX to prevent interrupts from pinning the holder
1358 1358 * of a suspended TTE which may access it leading to a
1359 1359 * deadlock condition.
1360 1360 */
1361 1361 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1362 1362 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1363 1363
1364 1364 /*
1365 1365 * If Shared context support is disabled via /etc/system
1366 1366 * set shctx_on to 0 here if it was set to 1 earlier in boot
1367 1367 * sequence by cpu module initialization code.
1368 1368 */
1369 1369 if (shctx_on && disable_shctx) {
1370 1370 shctx_on = 0;
1371 1371 }
1372 1372
1373 1373 if (shctx_on) {
1374 1374 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1375 1375 sizeof (srd_buckets[0]), KM_SLEEP);
1376 1376 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1377 1377 mutex_init(&srd_buckets[i].srdb_lock, NULL,
1378 1378 MUTEX_DEFAULT, NULL);
1379 1379 }
1380 1380
1381 1381 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1382 1382 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1383 1383 NULL, NULL, NULL, 0);
1384 1384 region_cache = kmem_cache_create("region_cache",
1385 1385 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1386 1386 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1387 1387 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1388 1388 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor,
1389 1389 NULL, NULL, NULL, 0);
1390 1390 }
1391 1391
1392 1392 /*
1393 1393 * Pre-allocate hrm_hashtab before enabling the collection of
1394 1394 * refmod statistics. Allocating on the fly would mean us
1395 1395 * running the risk of suffering recursive mutex enters or
1396 1396 * deadlocks.
1397 1397 */
1398 1398 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1399 1399 KM_SLEEP);
1400 1400
1401 1401 /* Allocate per-cpu pending freelist of hmeblks */
1402 1402 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1403 1403 KM_SLEEP);
1404 1404 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1405 1405 (uintptr_t)cpu_hme_pend, 64);
1406 1406
1407 1407 for (i = 0; i < NCPU; i++) {
1408 1408 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1409 1409 NULL);
1410 1410 }
1411 1411
1412 1412 if (cpu_hme_pend_thresh == 0) {
1413 1413 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1414 1414 }
1415 1415 }
1416 1416
1417 1417 /*
1418 1418 * Initialize locking for the hat layer, called early during boot.
1419 1419 */
1420 1420 static void
1421 1421 hat_lock_init()
1422 1422 {
1423 1423 int i;
1424 1424
1425 1425 /*
1426 1426 * initialize the array of mutexes protecting a page's mapping
1427 1427 * list and p_nrm field.
1428 1428 */
1429 1429 for (i = 0; i < MML_TABLE_SIZE; i++)
1430 1430 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1431 1431
1432 1432 if (kpm_enable) {
1433 1433 for (i = 0; i < kpmp_table_sz; i++) {
1434 1434 mutex_init(&kpmp_table[i].khl_mutex, NULL,
1435 1435 MUTEX_DEFAULT, NULL);
1436 1436 }
1437 1437 }
1438 1438
1439 1439 /*
1440 1440 * Initialize array of mutex locks that protects sfmmu fields and
1441 1441 * TSB lists.
1442 1442 */
1443 1443 for (i = 0; i < SFMMU_NUM_LOCK; i++)
1444 1444 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1445 1445 NULL);
1446 1446 }
1447 1447
1448 1448 #define SFMMU_KERNEL_MAXVA \
1449 1449 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1450 1450
1451 1451 /*
1452 1452 * Allocate a hat structure.
1453 1453 * Called when an address space first uses a hat.
1454 1454 */
1455 1455 struct hat *
1456 1456 hat_alloc(struct as *as)
1457 1457 {
1458 1458 sfmmu_t *sfmmup;
1459 1459 int i;
1460 1460 uint64_t cnum;
1461 1461 extern uint_t get_color_start(struct as *);
1462 1462
1463 1463 ASSERT(AS_WRITE_HELD(as));
1464 1464 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1465 1465 sfmmup->sfmmu_as = as;
1466 1466 sfmmup->sfmmu_flags = 0;
1467 1467 sfmmup->sfmmu_tteflags = 0;
1468 1468 sfmmup->sfmmu_rtteflags = 0;
1469 1469 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1470 1470
1471 1471 if (as == &kas) {
1472 1472 ksfmmup = sfmmup;
1473 1473 sfmmup->sfmmu_cext = 0;
1474 1474 cnum = KCONTEXT;
1475 1475
1476 1476 sfmmup->sfmmu_clrstart = 0;
1477 1477 sfmmup->sfmmu_tsb = NULL;
1478 1478 /*
1479 1479 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1480 1480 * to setup tsb_info for ksfmmup.
1481 1481 */
1482 1482 } else {
1483 1483
1484 1484 /*
1485 1485 * Just set to invalid ctx. When it faults, it will
1486 1486 * get a valid ctx. This would avoid the situation
1487 1487 * where we get a ctx, but it gets stolen and then
1488 1488 * we fault when we try to run and so have to get
1489 1489 * another ctx.
1490 1490 */
1491 1491 sfmmup->sfmmu_cext = 0;
1492 1492 cnum = INVALID_CONTEXT;
1493 1493
1494 1494 /* initialize original physical page coloring bin */
1495 1495 sfmmup->sfmmu_clrstart = get_color_start(as);
1496 1496 #ifdef DEBUG
1497 1497 if (tsb_random_size) {
1498 1498 uint32_t randval = (uint32_t)gettick() >> 4;
1499 1499 int size = randval % (tsb_max_growsize + 1);
1500 1500
1501 1501 /* chose a random tsb size for stress testing */
1502 1502 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1503 1503 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1504 1504 } else
1505 1505 #endif /* DEBUG */
1506 1506 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1507 1507 default_tsb_size,
1508 1508 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1509 1509 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1510 1510 ASSERT(sfmmup->sfmmu_tsb != NULL);
1511 1511 }
1512 1512
1513 1513 ASSERT(max_mmu_ctxdoms > 0);
1514 1514 for (i = 0; i < max_mmu_ctxdoms; i++) {
1515 1515 sfmmup->sfmmu_ctxs[i].cnum = cnum;
1516 1516 sfmmup->sfmmu_ctxs[i].gnum = 0;
1517 1517 }
1518 1518
1519 1519 for (i = 0; i < max_mmu_page_sizes; i++) {
1520 1520 sfmmup->sfmmu_ttecnt[i] = 0;
1521 1521 sfmmup->sfmmu_scdrttecnt[i] = 0;
1522 1522 sfmmup->sfmmu_ismttecnt[i] = 0;
1523 1523 sfmmup->sfmmu_scdismttecnt[i] = 0;
1524 1524 sfmmup->sfmmu_pgsz[i] = TTE8K;
1525 1525 }
1526 1526 sfmmup->sfmmu_tsb0_4minflcnt = 0;
1527 1527 sfmmup->sfmmu_iblk = NULL;
1528 1528 sfmmup->sfmmu_ismhat = 0;
1529 1529 sfmmup->sfmmu_scdhat = 0;
1530 1530 sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1531 1531 if (sfmmup == ksfmmup) {
1532 1532 CPUSET_ALL(sfmmup->sfmmu_cpusran);
1533 1533 } else {
1534 1534 CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1535 1535 }
1536 1536 sfmmup->sfmmu_free = 0;
1537 1537 sfmmup->sfmmu_rmstat = 0;
1538 1538 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1539 1539 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1540 1540 sfmmup->sfmmu_srdp = NULL;
1541 1541 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1542 1542 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1543 1543 sfmmup->sfmmu_scdp = NULL;
1544 1544 sfmmup->sfmmu_scd_link.next = NULL;
1545 1545 sfmmup->sfmmu_scd_link.prev = NULL;
1546 1546 return (sfmmup);
1547 1547 }
1548 1548
1549 1549 /*
1550 1550 * Create per-MMU context domain kstats for a given MMU ctx.
1551 1551 */
1552 1552 static void
1553 1553 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1554 1554 {
1555 1555 mmu_ctx_stat_t stat;
1556 1556 kstat_t *mmu_kstat;
1557 1557
1558 1558 ASSERT(MUTEX_HELD(&cpu_lock));
1559 1559 ASSERT(mmu_ctxp->mmu_kstat == NULL);
1560 1560
1561 1561 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1562 1562 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1563 1563
1564 1564 if (mmu_kstat == NULL) {
1565 1565 cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1566 1566 mmu_ctxp->mmu_idx);
1567 1567 } else {
1568 1568 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1569 1569 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1570 1570 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1571 1571 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1572 1572 mmu_ctxp->mmu_kstat = mmu_kstat;
1573 1573 kstat_install(mmu_kstat);
1574 1574 }
1575 1575 }
1576 1576
1577 1577 /*
1578 1578 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1579 1579 * context domain information for a given CPU. If a platform does not
1580 1580 * specify that interface, then the function below is used instead to return
1581 1581 * default information. The defaults are as follows:
1582 1582 *
1583 1583 * - The number of MMU context IDs supported on any CPU in the
1584 1584 * system is 8K.
1585 1585 * - There is one MMU context domain per CPU.
1586 1586 */
1587 1587 /*ARGSUSED*/
1588 1588 static void
1589 1589 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1590 1590 {
1591 1591 infop->mmu_nctxs = nctxs;
1592 1592 infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1593 1593 }
1594 1594
1595 1595 /*
1596 1596 * Called during CPU initialization to set the MMU context-related information
1597 1597 * for a CPU.
1598 1598 *
1599 1599 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1600 1600 */
1601 1601 void
1602 1602 sfmmu_cpu_init(cpu_t *cp)
1603 1603 {
1604 1604 mmu_ctx_info_t info;
1605 1605 mmu_ctx_t *mmu_ctxp;
1606 1606
1607 1607 ASSERT(MUTEX_HELD(&cpu_lock));
1608 1608
1609 1609 if (&plat_cpuid_to_mmu_ctx_info == NULL)
1610 1610 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1611 1611 else
1612 1612 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1613 1613
1614 1614 ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1615 1615
1616 1616 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1617 1617 /* Each mmu_ctx is cacheline aligned. */
1618 1618 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1619 1619 bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1620 1620
1621 1621 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1622 1622 (void *)ipltospl(DISP_LEVEL));
1623 1623 mmu_ctxp->mmu_idx = info.mmu_idx;
1624 1624 mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1625 1625 /*
1626 1626 * Globally for lifetime of a system,
1627 1627 * gnum must always increase.
1628 1628 * mmu_saved_gnum is protected by the cpu_lock.
1629 1629 */
1630 1630 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1631 1631 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1632 1632
1633 1633 sfmmu_mmu_kstat_create(mmu_ctxp);
1634 1634
1635 1635 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1636 1636 } else {
1637 1637 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1638 1638 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1639 1639 }
1640 1640
1641 1641 /*
1642 1642 * The mmu_lock is acquired here to prevent races with
1643 1643 * the wrap-around code.
1644 1644 */
1645 1645 mutex_enter(&mmu_ctxp->mmu_lock);
1646 1646
1647 1647
1648 1648 mmu_ctxp->mmu_ncpus++;
1649 1649 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1650 1650 CPU_MMU_IDX(cp) = info.mmu_idx;
1651 1651 CPU_MMU_CTXP(cp) = mmu_ctxp;
1652 1652
1653 1653 mutex_exit(&mmu_ctxp->mmu_lock);
1654 1654 }
1655 1655
1656 1656 static void
1657 1657 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1658 1658 {
1659 1659 ASSERT(MUTEX_HELD(&cpu_lock));
1660 1660 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1661 1661
1662 1662 mutex_destroy(&mmu_ctxp->mmu_lock);
1663 1663
1664 1664 if (mmu_ctxp->mmu_kstat)
1665 1665 kstat_delete(mmu_ctxp->mmu_kstat);
1666 1666
1667 1667 /* mmu_saved_gnum is protected by the cpu_lock. */
1668 1668 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1669 1669 mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1670 1670
1671 1671 kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1672 1672 }
1673 1673
1674 1674 /*
1675 1675 * Called to perform MMU context-related cleanup for a CPU.
1676 1676 */
1677 1677 void
1678 1678 sfmmu_cpu_cleanup(cpu_t *cp)
1679 1679 {
1680 1680 mmu_ctx_t *mmu_ctxp;
1681 1681
1682 1682 ASSERT(MUTEX_HELD(&cpu_lock));
1683 1683
1684 1684 mmu_ctxp = CPU_MMU_CTXP(cp);
1685 1685 ASSERT(mmu_ctxp != NULL);
1686 1686
1687 1687 /*
1688 1688 * The mmu_lock is acquired here to prevent races with
1689 1689 * the wrap-around code.
1690 1690 */
1691 1691 mutex_enter(&mmu_ctxp->mmu_lock);
1692 1692
1693 1693 CPU_MMU_CTXP(cp) = NULL;
1694 1694
1695 1695 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1696 1696 if (--mmu_ctxp->mmu_ncpus == 0) {
1697 1697 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1698 1698 mutex_exit(&mmu_ctxp->mmu_lock);
1699 1699 sfmmu_ctxdom_free(mmu_ctxp);
1700 1700 return;
1701 1701 }
1702 1702
1703 1703 mutex_exit(&mmu_ctxp->mmu_lock);
1704 1704 }
1705 1705
1706 1706 uint_t
1707 1707 sfmmu_ctxdom_nctxs(int idx)
1708 1708 {
1709 1709 return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1710 1710 }
1711 1711
1712 1712 #ifdef sun4v
1713 1713 /*
1714 1714 * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1715 1715 * consistant after suspend/resume on system that can resume on a different
1716 1716 * hardware than it was suspended.
1717 1717 *
1718 1718 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1719 1719 * from being allocated. It acquires all hat_locks, which blocks most access to
1720 1720 * context data, except for a few cases that are handled separately or are
1721 1721 * harmless. It wraps each domain to increment gnum and invalidate on-CPU
1722 1722 * contexts, and forces cnum to its max. As a result of this call all user
1723 1723 * threads that are running on CPUs trap and try to perform wrap around but
1724 1724 * can't because hat_locks are taken. Threads that were not on CPUs but started
1725 1725 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1726 1726 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1727 1727 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs
1728 1728 * are paused, else it could deadlock acquiring locks held by paused CPUs.
1729 1729 *
1730 1730 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1731 1731 * the CPUs that had them. It must be called after CPUs have been paused. This
1732 1732 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1733 1733 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1734 1734 * runs with interrupts disabled. When CPUs are later resumed, they may enter
1735 1735 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1736 1736 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus
1737 1737 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1738 1738 * accessing the old context domains.
1739 1739 *
1740 1740 * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1741 1741 * allocates new context domains based on hardware layout. It initializes
1742 1742 * every CPU that had context domain before migration to have one again.
1743 1743 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1744 1744 * could deadlock acquiring locks held by paused CPUs.
1745 1745 *
1746 1746 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1747 1747 * acquire new context ids and continue execution.
1748 1748 *
1749 1749 * Therefore functions should be called in the following order:
1750 1750 * suspend_routine()
1751 1751 * sfmmu_ctxdom_lock()
1752 1752 * pause_cpus()
1753 1753 * suspend()
1754 1754 * if (suspend failed)
1755 1755 * sfmmu_ctxdom_unlock()
1756 1756 * ...
1757 1757 * sfmmu_ctxdom_remove()
1758 1758 * resume_cpus()
1759 1759 * sfmmu_ctxdom_update()
1760 1760 * sfmmu_ctxdom_unlock()
1761 1761 */
1762 1762 static cpuset_t sfmmu_ctxdoms_pset;
1763 1763
1764 1764 void
1765 1765 sfmmu_ctxdoms_remove()
1766 1766 {
1767 1767 processorid_t id;
1768 1768 cpu_t *cp;
1769 1769
1770 1770 /*
1771 1771 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1772 1772 * be restored post-migration. A CPU may be powered off and not have a
1773 1773 * domain, for example.
1774 1774 */
1775 1775 CPUSET_ZERO(sfmmu_ctxdoms_pset);
1776 1776
1777 1777 for (id = 0; id < NCPU; id++) {
1778 1778 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1779 1779 CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1780 1780 CPU_MMU_CTXP(cp) = NULL;
1781 1781 }
1782 1782 }
1783 1783 }
1784 1784
1785 1785 void
1786 1786 sfmmu_ctxdoms_lock(void)
1787 1787 {
1788 1788 int idx;
1789 1789 mmu_ctx_t *mmu_ctxp;
1790 1790
1791 1791 sfmmu_hat_lock_all();
1792 1792
1793 1793 /*
1794 1794 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1795 1795 * hat_lock is always taken before calling it.
1796 1796 *
1797 1797 * For each domain, set mmu_cnum to max so no more contexts can be
1798 1798 * allocated, and wrap to flush on-CPU contexts and force threads to
1799 1799 * acquire a new context when we later drop hat_lock after migration.
1800 1800 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1801 1801 * but the latter uses CAS and will miscompare and not overwrite it.
1802 1802 */
1803 1803 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1804 1804 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1805 1805 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1806 1806 mutex_enter(&mmu_ctxp->mmu_lock);
1807 1807 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1808 1808 /* make sure updated cnum visible */
1809 1809 membar_enter();
1810 1810 mutex_exit(&mmu_ctxp->mmu_lock);
1811 1811 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1812 1812 }
1813 1813 }
1814 1814 kpreempt_enable();
1815 1815 }
1816 1816
1817 1817 void
1818 1818 sfmmu_ctxdoms_unlock(void)
1819 1819 {
1820 1820 sfmmu_hat_unlock_all();
1821 1821 }
1822 1822
1823 1823 void
1824 1824 sfmmu_ctxdoms_update(void)
1825 1825 {
1826 1826 processorid_t id;
1827 1827 cpu_t *cp;
1828 1828 uint_t idx;
1829 1829 mmu_ctx_t *mmu_ctxp;
1830 1830
1831 1831 /*
1832 1832 * Free all context domains. As side effect, this increases
1833 1833 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1834 1834 * init gnum in the new domains, which therefore will be larger than the
1835 1835 * sfmmu gnum for any process, guaranteeing that every process will see
1836 1836 * a new generation and allocate a new context regardless of what new
1837 1837 * domain it runs in.
1838 1838 */
1839 1839 mutex_enter(&cpu_lock);
1840 1840
1841 1841 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1842 1842 if (mmu_ctxs_tbl[idx] != NULL) {
1843 1843 mmu_ctxp = mmu_ctxs_tbl[idx];
1844 1844 mmu_ctxs_tbl[idx] = NULL;
1845 1845 sfmmu_ctxdom_free(mmu_ctxp);
1846 1846 }
1847 1847 }
1848 1848
1849 1849 for (id = 0; id < NCPU; id++) {
1850 1850 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1851 1851 (cp = cpu[id]) != NULL)
1852 1852 sfmmu_cpu_init(cp);
1853 1853 }
1854 1854 mutex_exit(&cpu_lock);
1855 1855 }
1856 1856 #endif
1857 1857
1858 1858 /*
1859 1859 * Hat_setup, makes an address space context the current active one.
1860 1860 * In sfmmu this translates to setting the secondary context with the
1861 1861 * corresponding context.
1862 1862 */
1863 1863 void
1864 1864 hat_setup(struct hat *sfmmup, int allocflag)
1865 1865 {
1866 1866 hatlock_t *hatlockp;
1867 1867
1868 1868 /* Init needs some special treatment. */
1869 1869 if (allocflag == HAT_INIT) {
1870 1870 /*
1871 1871 * Make sure that we have
1872 1872 * 1. a TSB
1873 1873 * 2. a valid ctx that doesn't get stolen after this point.
1874 1874 */
1875 1875 hatlockp = sfmmu_hat_enter(sfmmup);
1876 1876
1877 1877 /*
1878 1878 * Swap in the TSB. hat_init() allocates tsbinfos without
1879 1879 * TSBs, but we need one for init, since the kernel does some
1880 1880 * special things to set up its stack and needs the TSB to
1881 1881 * resolve page faults.
1882 1882 */
1883 1883 sfmmu_tsb_swapin(sfmmup, hatlockp);
1884 1884
1885 1885 sfmmu_get_ctx(sfmmup);
1886 1886
1887 1887 sfmmu_hat_exit(hatlockp);
1888 1888 } else {
1889 1889 ASSERT(allocflag == HAT_ALLOC);
1890 1890
1891 1891 hatlockp = sfmmu_hat_enter(sfmmup);
1892 1892 kpreempt_disable();
1893 1893
1894 1894 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1895 1895 /*
1896 1896 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1897 1897 * pagesize bits don't matter in this case since we are passing
1898 1898 * INVALID_CONTEXT to it.
1899 1899 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1900 1900 */
1901 1901 sfmmu_setctx_sec(INVALID_CONTEXT);
1902 1902 sfmmu_clear_utsbinfo();
1903 1903
1904 1904 kpreempt_enable();
1905 1905 sfmmu_hat_exit(hatlockp);
1906 1906 }
1907 1907 }
1908 1908
1909 1909 /*
1910 1910 * Free all the translation resources for the specified address space.
1911 1911 * Called from as_free when an address space is being destroyed.
1912 1912 */
1913 1913 void
1914 1914 hat_free_start(struct hat *sfmmup)
1915 1915 {
1916 1916 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
1917 1917 ASSERT(sfmmup != ksfmmup);
1918 1918
1919 1919 sfmmup->sfmmu_free = 1;
1920 1920 if (sfmmup->sfmmu_scdp != NULL) {
1921 1921 sfmmu_leave_scd(sfmmup, 0);
1922 1922 }
1923 1923
1924 1924 ASSERT(sfmmup->sfmmu_scdp == NULL);
1925 1925 }
1926 1926
1927 1927 void
1928 1928 hat_free_end(struct hat *sfmmup)
1929 1929 {
1930 1930 int i;
1931 1931
1932 1932 ASSERT(sfmmup->sfmmu_free == 1);
1933 1933 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1934 1934 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1935 1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1936 1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1937 1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1938 1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1939 1939
1940 1940 if (sfmmup->sfmmu_rmstat) {
1941 1941 hat_freestat(sfmmup->sfmmu_as, NULL);
1942 1942 }
1943 1943
1944 1944 while (sfmmup->sfmmu_tsb != NULL) {
1945 1945 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1946 1946 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1947 1947 sfmmup->sfmmu_tsb = next;
1948 1948 }
1949 1949
1950 1950 if (sfmmup->sfmmu_srdp != NULL) {
1951 1951 sfmmu_leave_srd(sfmmup);
1952 1952 ASSERT(sfmmup->sfmmu_srdp == NULL);
1953 1953 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1954 1954 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1955 1955 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1956 1956 SFMMU_L2_HMERLINKS_SIZE);
1957 1957 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1958 1958 }
1959 1959 }
1960 1960 }
1961 1961 sfmmu_free_sfmmu(sfmmup);
1962 1962
↓ open down ↓ |
1962 lines elided |
↑ open up ↑ |
1963 1963 #ifdef DEBUG
1964 1964 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1965 1965 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1966 1966 }
1967 1967 #endif
1968 1968
1969 1969 kmem_cache_free(sfmmuid_cache, sfmmup);
1970 1970 }
1971 1971
1972 1972 /*
1973 - * Set up any translation structures, for the specified address space,
1974 - * that are needed or preferred when the process is being swapped in.
1975 - */
1976 -/* ARGSUSED */
1977 -void
1978 -hat_swapin(struct hat *hat)
1979 -{
1980 -}
1981 -
1982 -/*
1983 - * Free all of the translation resources, for the specified address space,
1984 - * that can be freed while the process is swapped out. Called from as_swapout.
1985 - * Also, free up the ctx that this process was using.
1986 - */
1987 -void
1988 -hat_swapout(struct hat *sfmmup)
1989 -{
1990 - struct hmehash_bucket *hmebp;
1991 - struct hme_blk *hmeblkp;
1992 - struct hme_blk *pr_hblk = NULL;
1993 - struct hme_blk *nx_hblk;
1994 - int i;
1995 - struct hme_blk *list = NULL;
1996 - hatlock_t *hatlockp;
1997 - struct tsb_info *tsbinfop;
1998 - struct free_tsb {
1999 - struct free_tsb *next;
2000 - struct tsb_info *tsbinfop;
2001 - }; /* free list of TSBs */
2002 - struct free_tsb *freelist, *last, *next;
2003 -
2004 - SFMMU_STAT(sf_swapout);
2005 -
2006 - /*
2007 - * There is no way to go from an as to all its translations in sfmmu.
2008 - * Here is one of the times when we take the big hit and traverse
2009 - * the hash looking for hme_blks to free up. Not only do we free up
2010 - * this as hme_blks but all those that are free. We are obviously
2011 - * swapping because we need memory so let's free up as much
2012 - * as we can.
2013 - *
2014 - * Note that we don't flush TLB/TSB here -- it's not necessary
2015 - * because:
2016 - * 1) we free the ctx we're using and throw away the TSB(s);
2017 - * 2) processes aren't runnable while being swapped out.
2018 - */
2019 - ASSERT(sfmmup != KHATID);
2020 - for (i = 0; i <= UHMEHASH_SZ; i++) {
2021 - hmebp = &uhme_hash[i];
2022 - SFMMU_HASH_LOCK(hmebp);
2023 - hmeblkp = hmebp->hmeblkp;
2024 - pr_hblk = NULL;
2025 - while (hmeblkp) {
2026 -
2027 - if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2028 - !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2029 - ASSERT(!hmeblkp->hblk_shared);
2030 - (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2031 - (caddr_t)get_hblk_base(hmeblkp),
2032 - get_hblk_endaddr(hmeblkp),
2033 - NULL, HAT_UNLOAD);
2034 - }
2035 - nx_hblk = hmeblkp->hblk_next;
2036 - if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2037 - ASSERT(!hmeblkp->hblk_lckcnt);
2038 - sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2039 - &list, 0);
2040 - } else {
2041 - pr_hblk = hmeblkp;
2042 - }
2043 - hmeblkp = nx_hblk;
2044 - }
2045 - SFMMU_HASH_UNLOCK(hmebp);
2046 - }
2047 -
2048 - sfmmu_hblks_list_purge(&list, 0);
2049 -
2050 - /*
2051 - * Now free up the ctx so that others can reuse it.
2052 - */
2053 - hatlockp = sfmmu_hat_enter(sfmmup);
2054 -
2055 - sfmmu_invalidate_ctx(sfmmup);
2056 -
2057 - /*
2058 - * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2059 - * If TSBs were never swapped in, just return.
2060 - * This implies that we don't support partial swapping
2061 - * of TSBs -- either all are swapped out, or none are.
2062 - *
2063 - * We must hold the HAT lock here to prevent racing with another
2064 - * thread trying to unmap TTEs from the TSB or running the post-
2065 - * relocator after relocating the TSB's memory. Unfortunately, we
2066 - * can't free memory while holding the HAT lock or we could
2067 - * deadlock, so we build a list of TSBs to be freed after marking
2068 - * the tsbinfos as swapped out and free them after dropping the
2069 - * lock.
2070 - */
2071 - if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2072 - sfmmu_hat_exit(hatlockp);
2073 - return;
2074 - }
2075 -
2076 - SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2077 - last = freelist = NULL;
2078 - for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2079 - tsbinfop = tsbinfop->tsb_next) {
2080 - ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2081 -
2082 - /*
2083 - * Cast the TSB into a struct free_tsb and put it on the free
2084 - * list.
2085 - */
2086 - if (freelist == NULL) {
2087 - last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2088 - } else {
2089 - last->next = (struct free_tsb *)tsbinfop->tsb_va;
2090 - last = last->next;
2091 - }
2092 - last->next = NULL;
2093 - last->tsbinfop = tsbinfop;
2094 - tsbinfop->tsb_flags |= TSB_SWAPPED;
2095 - /*
2096 - * Zero out the TTE to clear the valid bit.
2097 - * Note we can't use a value like 0xbad because we want to
2098 - * ensure diagnostic bits are NEVER set on TTEs that might
2099 - * be loaded. The intent is to catch any invalid access
2100 - * to the swapped TSB, such as a thread running with a valid
2101 - * context without first calling sfmmu_tsb_swapin() to
2102 - * allocate TSB memory.
2103 - */
2104 - tsbinfop->tsb_tte.ll = 0;
2105 - }
2106 -
2107 - /* Now we can drop the lock and free the TSB memory. */
2108 - sfmmu_hat_exit(hatlockp);
2109 - for (; freelist != NULL; freelist = next) {
2110 - next = freelist->next;
2111 - sfmmu_tsb_free(freelist->tsbinfop);
2112 - }
2113 -}
2114 -
2115 -/*
2116 1973 * Duplicate the translations of an as into another newas
2117 1974 */
2118 1975 /* ARGSUSED */
2119 1976 int
2120 1977 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2121 1978 uint_t flag)
2122 1979 {
2123 1980 sf_srd_t *srdp;
2124 1981 sf_scd_t *scdp;
2125 1982 int i;
2126 1983 extern uint_t get_color_start(struct as *);
2127 1984
2128 1985 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2129 1986 (flag == HAT_DUP_SRD));
2130 1987 ASSERT(hat != ksfmmup);
2131 1988 ASSERT(newhat != ksfmmup);
2132 1989 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2133 1990
2134 1991 if (flag == HAT_DUP_COW) {
2135 1992 panic("hat_dup: HAT_DUP_COW not supported");
2136 1993 }
2137 1994
2138 1995 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2139 1996 ASSERT(srdp->srd_evp != NULL);
2140 1997 VN_HOLD(srdp->srd_evp);
2141 1998 ASSERT(srdp->srd_refcnt > 0);
2142 1999 newhat->sfmmu_srdp = srdp;
2143 2000 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2144 2001 }
2145 2002
2146 2003 /*
2147 2004 * HAT_DUP_ALL flag is used after as duplication is done.
2148 2005 */
2149 2006 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2150 2007 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2151 2008 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2152 2009 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2153 2010 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2154 2011 }
2155 2012
2156 2013 /* check if need to join scd */
2157 2014 if ((scdp = hat->sfmmu_scdp) != NULL &&
2158 2015 newhat->sfmmu_scdp != scdp) {
2159 2016 int ret;
2160 2017 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2161 2018 &scdp->scd_region_map, ret);
2162 2019 ASSERT(ret);
2163 2020 sfmmu_join_scd(scdp, newhat);
2164 2021 ASSERT(newhat->sfmmu_scdp == scdp &&
2165 2022 scdp->scd_refcnt >= 2);
2166 2023 for (i = 0; i < max_mmu_page_sizes; i++) {
2167 2024 newhat->sfmmu_ismttecnt[i] =
2168 2025 hat->sfmmu_ismttecnt[i];
2169 2026 newhat->sfmmu_scdismttecnt[i] =
2170 2027 hat->sfmmu_scdismttecnt[i];
2171 2028 }
2172 2029 }
2173 2030
2174 2031 sfmmu_check_page_sizes(newhat, 1);
2175 2032 }
2176 2033
2177 2034 if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2178 2035 update_proc_pgcolorbase_after_fork != 0) {
2179 2036 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2180 2037 }
2181 2038 return (0);
2182 2039 }
2183 2040
2184 2041 void
2185 2042 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2186 2043 uint_t attr, uint_t flags)
2187 2044 {
2188 2045 hat_do_memload(hat, addr, pp, attr, flags,
2189 2046 SFMMU_INVALID_SHMERID);
2190 2047 }
2191 2048
2192 2049 void
2193 2050 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2194 2051 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2195 2052 {
2196 2053 uint_t rid;
2197 2054 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2198 2055 hat_do_memload(hat, addr, pp, attr, flags,
2199 2056 SFMMU_INVALID_SHMERID);
2200 2057 return;
2201 2058 }
2202 2059 rid = (uint_t)((uint64_t)rcookie);
2203 2060 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2204 2061 hat_do_memload(hat, addr, pp, attr, flags, rid);
2205 2062 }
2206 2063
2207 2064 /*
2208 2065 * Set up addr to map to page pp with protection prot.
2209 2066 * As an optimization we also load the TSB with the
2210 2067 * corresponding tte but it is no big deal if the tte gets kicked out.
2211 2068 */
2212 2069 static void
2213 2070 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2214 2071 uint_t attr, uint_t flags, uint_t rid)
2215 2072 {
2216 2073 tte_t tte;
2217 2074
2218 2075
2219 2076 ASSERT(hat != NULL);
2220 2077 ASSERT(PAGE_LOCKED(pp));
2221 2078 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2222 2079 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2223 2080 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2224 2081 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2225 2082
2226 2083 if (PP_ISFREE(pp)) {
2227 2084 panic("hat_memload: loading a mapping to free page %p",
2228 2085 (void *)pp);
2229 2086 }
2230 2087
2231 2088 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2232 2089
2233 2090 if (flags & ~SFMMU_LOAD_ALLFLAG)
2234 2091 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2235 2092 flags & ~SFMMU_LOAD_ALLFLAG);
2236 2093
2237 2094 if (hat->sfmmu_rmstat)
2238 2095 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2239 2096
2240 2097 #if defined(SF_ERRATA_57)
2241 2098 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2242 2099 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2243 2100 !(flags & HAT_LOAD_SHARE)) {
2244 2101 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2245 2102 " page executable");
2246 2103 attr &= ~PROT_EXEC;
2247 2104 }
2248 2105 #endif
2249 2106
2250 2107 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2251 2108 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2252 2109
2253 2110 /*
2254 2111 * Check TSB and TLB page sizes.
2255 2112 */
2256 2113 if ((flags & HAT_LOAD_SHARE) == 0) {
2257 2114 sfmmu_check_page_sizes(hat, 1);
2258 2115 }
2259 2116 }
2260 2117
2261 2118 /*
2262 2119 * hat_devload can be called to map real memory (e.g.
2263 2120 * /dev/kmem) and even though hat_devload will determine pf is
2264 2121 * for memory, it will be unable to get a shared lock on the
2265 2122 * page (because someone else has it exclusively) and will
2266 2123 * pass dp = NULL. If tteload doesn't get a non-NULL
2267 2124 * page pointer it can't cache memory.
2268 2125 */
2269 2126 void
2270 2127 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2271 2128 uint_t attr, int flags)
2272 2129 {
2273 2130 tte_t tte;
2274 2131 struct page *pp = NULL;
2275 2132 int use_lgpg = 0;
2276 2133
2277 2134 ASSERT(hat != NULL);
2278 2135
2279 2136 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2280 2137 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2281 2138 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2282 2139 if (len == 0)
2283 2140 panic("hat_devload: zero len");
2284 2141 if (flags & ~SFMMU_LOAD_ALLFLAG)
2285 2142 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2286 2143 flags & ~SFMMU_LOAD_ALLFLAG);
2287 2144
2288 2145 #if defined(SF_ERRATA_57)
2289 2146 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2290 2147 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2291 2148 !(flags & HAT_LOAD_SHARE)) {
2292 2149 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2293 2150 " page executable");
2294 2151 attr &= ~PROT_EXEC;
2295 2152 }
2296 2153 #endif
2297 2154
2298 2155 /*
2299 2156 * If it's a memory page find its pp
2300 2157 */
2301 2158 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2302 2159 pp = page_numtopp_nolock(pfn);
2303 2160 if (pp == NULL) {
2304 2161 flags |= HAT_LOAD_NOCONSIST;
2305 2162 } else {
2306 2163 if (PP_ISFREE(pp)) {
2307 2164 panic("hat_memload: loading "
2308 2165 "a mapping to free page %p",
2309 2166 (void *)pp);
2310 2167 }
2311 2168 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2312 2169 panic("hat_memload: loading a mapping "
2313 2170 "to unlocked relocatable page %p",
2314 2171 (void *)pp);
2315 2172 }
2316 2173 ASSERT(len == MMU_PAGESIZE);
2317 2174 }
2318 2175 }
2319 2176
2320 2177 if (hat->sfmmu_rmstat)
2321 2178 hat_resvstat(len, hat->sfmmu_as, addr);
2322 2179
2323 2180 if (flags & HAT_LOAD_NOCONSIST) {
2324 2181 attr |= SFMMU_UNCACHEVTTE;
2325 2182 use_lgpg = 1;
2326 2183 }
2327 2184 if (!pf_is_memory(pfn)) {
2328 2185 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2329 2186 use_lgpg = 1;
2330 2187 switch (attr & HAT_ORDER_MASK) {
2331 2188 case HAT_STRICTORDER:
2332 2189 case HAT_UNORDERED_OK:
2333 2190 /*
2334 2191 * we set the side effect bit for all non
2335 2192 * memory mappings unless merging is ok
2336 2193 */
2337 2194 attr |= SFMMU_SIDEFFECT;
2338 2195 break;
2339 2196 case HAT_MERGING_OK:
2340 2197 case HAT_LOADCACHING_OK:
2341 2198 case HAT_STORECACHING_OK:
2342 2199 break;
2343 2200 default:
2344 2201 panic("hat_devload: bad attr");
2345 2202 break;
2346 2203 }
2347 2204 }
2348 2205 while (len) {
2349 2206 if (!use_lgpg) {
2350 2207 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2351 2208 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2352 2209 flags, SFMMU_INVALID_SHMERID);
2353 2210 len -= MMU_PAGESIZE;
2354 2211 addr += MMU_PAGESIZE;
2355 2212 pfn++;
2356 2213 continue;
2357 2214 }
2358 2215 /*
2359 2216 * try to use large pages, check va/pa alignments
2360 2217 * Note that 32M/256M page sizes are not (yet) supported.
2361 2218 */
2362 2219 if ((len >= MMU_PAGESIZE4M) &&
2363 2220 !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2364 2221 !(disable_large_pages & (1 << TTE4M)) &&
2365 2222 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2366 2223 sfmmu_memtte(&tte, pfn, attr, TTE4M);
2367 2224 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2368 2225 flags, SFMMU_INVALID_SHMERID);
2369 2226 len -= MMU_PAGESIZE4M;
2370 2227 addr += MMU_PAGESIZE4M;
2371 2228 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2372 2229 } else if ((len >= MMU_PAGESIZE512K) &&
2373 2230 !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2374 2231 !(disable_large_pages & (1 << TTE512K)) &&
2375 2232 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2376 2233 sfmmu_memtte(&tte, pfn, attr, TTE512K);
2377 2234 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2378 2235 flags, SFMMU_INVALID_SHMERID);
2379 2236 len -= MMU_PAGESIZE512K;
2380 2237 addr += MMU_PAGESIZE512K;
2381 2238 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2382 2239 } else if ((len >= MMU_PAGESIZE64K) &&
2383 2240 !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2384 2241 !(disable_large_pages & (1 << TTE64K)) &&
2385 2242 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2386 2243 sfmmu_memtte(&tte, pfn, attr, TTE64K);
2387 2244 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2388 2245 flags, SFMMU_INVALID_SHMERID);
2389 2246 len -= MMU_PAGESIZE64K;
2390 2247 addr += MMU_PAGESIZE64K;
2391 2248 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2392 2249 } else {
2393 2250 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2394 2251 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2395 2252 flags, SFMMU_INVALID_SHMERID);
2396 2253 len -= MMU_PAGESIZE;
2397 2254 addr += MMU_PAGESIZE;
2398 2255 pfn++;
2399 2256 }
2400 2257 }
2401 2258
2402 2259 /*
2403 2260 * Check TSB and TLB page sizes.
2404 2261 */
2405 2262 if ((flags & HAT_LOAD_SHARE) == 0) {
2406 2263 sfmmu_check_page_sizes(hat, 1);
2407 2264 }
2408 2265 }
2409 2266
2410 2267 void
2411 2268 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2412 2269 struct page **pps, uint_t attr, uint_t flags)
2413 2270 {
2414 2271 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2415 2272 SFMMU_INVALID_SHMERID);
2416 2273 }
2417 2274
2418 2275 void
2419 2276 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2420 2277 struct page **pps, uint_t attr, uint_t flags,
2421 2278 hat_region_cookie_t rcookie)
2422 2279 {
2423 2280 uint_t rid;
2424 2281 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2425 2282 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2426 2283 SFMMU_INVALID_SHMERID);
2427 2284 return;
2428 2285 }
2429 2286 rid = (uint_t)((uint64_t)rcookie);
2430 2287 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2431 2288 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2432 2289 }
2433 2290
2434 2291 /*
2435 2292 * Map the largest extend possible out of the page array. The array may NOT
2436 2293 * be in order. The largest possible mapping a page can have
2437 2294 * is specified in the p_szc field. The p_szc field
2438 2295 * cannot change as long as there any mappings (large or small)
2439 2296 * to any of the pages that make up the large page. (ie. any
2440 2297 * promotion/demotion of page size is not up to the hat but up to
2441 2298 * the page free list manager). The array
2442 2299 * should consist of properly aligned contigous pages that are
2443 2300 * part of a big page for a large mapping to be created.
2444 2301 */
2445 2302 static void
2446 2303 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2447 2304 struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2448 2305 {
2449 2306 int ttesz;
2450 2307 size_t mapsz;
2451 2308 pgcnt_t numpg, npgs;
2452 2309 tte_t tte;
2453 2310 page_t *pp;
2454 2311 uint_t large_pages_disable;
2455 2312
2456 2313 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2457 2314 SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2458 2315
2459 2316 if (hat->sfmmu_rmstat)
2460 2317 hat_resvstat(len, hat->sfmmu_as, addr);
2461 2318
2462 2319 #if defined(SF_ERRATA_57)
2463 2320 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2464 2321 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2465 2322 !(flags & HAT_LOAD_SHARE)) {
2466 2323 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2467 2324 "user page executable");
2468 2325 attr &= ~PROT_EXEC;
2469 2326 }
2470 2327 #endif
2471 2328
2472 2329 /* Get number of pages */
2473 2330 npgs = len >> MMU_PAGESHIFT;
2474 2331
2475 2332 if (flags & HAT_LOAD_SHARE) {
2476 2333 large_pages_disable = disable_ism_large_pages;
2477 2334 } else {
2478 2335 large_pages_disable = disable_large_pages;
2479 2336 }
2480 2337
2481 2338 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2482 2339 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2483 2340 rid);
2484 2341 return;
2485 2342 }
2486 2343
2487 2344 while (npgs >= NHMENTS) {
2488 2345 pp = *pps;
2489 2346 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2490 2347 /*
2491 2348 * Check if this page size is disabled.
2492 2349 */
2493 2350 if (large_pages_disable & (1 << ttesz))
2494 2351 continue;
2495 2352
2496 2353 numpg = TTEPAGES(ttesz);
2497 2354 mapsz = numpg << MMU_PAGESHIFT;
2498 2355 if ((npgs >= numpg) &&
2499 2356 IS_P2ALIGNED(addr, mapsz) &&
2500 2357 IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2501 2358 /*
2502 2359 * At this point we have enough pages and
2503 2360 * we know the virtual address and the pfn
2504 2361 * are properly aligned. We still need
2505 2362 * to check for physical contiguity but since
2506 2363 * it is very likely that this is the case
2507 2364 * we will assume they are so and undo
2508 2365 * the request if necessary. It would
2509 2366 * be great if we could get a hint flag
2510 2367 * like HAT_CONTIG which would tell us
2511 2368 * the pages are contigous for sure.
2512 2369 */
2513 2370 sfmmu_memtte(&tte, (*pps)->p_pagenum,
2514 2371 attr, ttesz);
2515 2372 if (!sfmmu_tteload_array(hat, &tte, addr,
2516 2373 pps, flags, rid)) {
2517 2374 break;
2518 2375 }
2519 2376 }
2520 2377 }
2521 2378 if (ttesz == TTE8K) {
2522 2379 /*
2523 2380 * We were not able to map array using a large page
2524 2381 * batch a hmeblk or fraction at a time.
2525 2382 */
2526 2383 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2527 2384 & (NHMENTS-1);
2528 2385 numpg = NHMENTS - numpg;
2529 2386 ASSERT(numpg <= npgs);
2530 2387 mapsz = numpg * MMU_PAGESIZE;
2531 2388 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2532 2389 numpg, rid);
2533 2390 }
2534 2391 addr += mapsz;
2535 2392 npgs -= numpg;
2536 2393 pps += numpg;
2537 2394 }
2538 2395
2539 2396 if (npgs) {
2540 2397 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2541 2398 rid);
2542 2399 }
2543 2400
2544 2401 /*
2545 2402 * Check TSB and TLB page sizes.
2546 2403 */
2547 2404 if ((flags & HAT_LOAD_SHARE) == 0) {
2548 2405 sfmmu_check_page_sizes(hat, 1);
2549 2406 }
2550 2407 }
2551 2408
2552 2409 /*
2553 2410 * Function tries to batch 8K pages into the same hme blk.
2554 2411 */
2555 2412 static void
2556 2413 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2557 2414 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2558 2415 {
2559 2416 tte_t tte;
2560 2417 page_t *pp;
2561 2418 struct hmehash_bucket *hmebp;
2562 2419 struct hme_blk *hmeblkp;
2563 2420 int index;
2564 2421
2565 2422 while (npgs) {
2566 2423 /*
2567 2424 * Acquire the hash bucket.
2568 2425 */
2569 2426 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2570 2427 rid);
2571 2428 ASSERT(hmebp);
2572 2429
2573 2430 /*
2574 2431 * Find the hment block.
2575 2432 */
2576 2433 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2577 2434 TTE8K, flags, rid);
2578 2435 ASSERT(hmeblkp);
2579 2436
2580 2437 do {
2581 2438 /*
2582 2439 * Make the tte.
2583 2440 */
2584 2441 pp = *pps;
2585 2442 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2586 2443
2587 2444 /*
2588 2445 * Add the translation.
2589 2446 */
2590 2447 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2591 2448 vaddr, pps, flags, rid);
2592 2449
2593 2450 /*
2594 2451 * Goto next page.
2595 2452 */
2596 2453 pps++;
2597 2454 npgs--;
2598 2455
2599 2456 /*
2600 2457 * Goto next address.
2601 2458 */
2602 2459 vaddr += MMU_PAGESIZE;
2603 2460
2604 2461 /*
2605 2462 * Don't crossover into a different hmentblk.
2606 2463 */
2607 2464 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2608 2465 (NHMENTS-1));
2609 2466
2610 2467 } while (index != 0 && npgs != 0);
2611 2468
2612 2469 /*
2613 2470 * Release the hash bucket.
2614 2471 */
2615 2472
2616 2473 sfmmu_tteload_release_hashbucket(hmebp);
2617 2474 }
2618 2475 }
2619 2476
2620 2477 /*
2621 2478 * Construct a tte for a page:
2622 2479 *
2623 2480 * tte_valid = 1
2624 2481 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2625 2482 * tte_size = size
2626 2483 * tte_nfo = attr & HAT_NOFAULT
2627 2484 * tte_ie = attr & HAT_STRUCTURE_LE
2628 2485 * tte_hmenum = hmenum
2629 2486 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2630 2487 * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2631 2488 * tte_ref = 1 (optimization)
2632 2489 * tte_wr_perm = attr & PROT_WRITE;
2633 2490 * tte_no_sync = attr & HAT_NOSYNC
2634 2491 * tte_lock = attr & SFMMU_LOCKTTE
2635 2492 * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2636 2493 * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2637 2494 * tte_e = attr & SFMMU_SIDEFFECT
2638 2495 * tte_priv = !(attr & PROT_USER)
2639 2496 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2640 2497 * tte_glb = 0
2641 2498 */
2642 2499 void
2643 2500 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2644 2501 {
2645 2502 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2646 2503
2647 2504 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2648 2505 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2649 2506
2650 2507 if (TTE_IS_NOSYNC(ttep)) {
2651 2508 TTE_SET_REF(ttep);
2652 2509 if (TTE_IS_WRITABLE(ttep)) {
2653 2510 TTE_SET_MOD(ttep);
2654 2511 }
2655 2512 }
2656 2513 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2657 2514 panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2658 2515 }
2659 2516 }
2660 2517
2661 2518 /*
2662 2519 * This function will add a translation to the hme_blk and allocate the
2663 2520 * hme_blk if one does not exist.
2664 2521 * If a page structure is specified then it will add the
2665 2522 * corresponding hment to the mapping list.
2666 2523 * It will also update the hmenum field for the tte.
2667 2524 *
2668 2525 * Currently this function is only used for kernel mappings.
2669 2526 * So pass invalid region to sfmmu_tteload_array().
2670 2527 */
2671 2528 void
2672 2529 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2673 2530 uint_t flags)
2674 2531 {
2675 2532 ASSERT(sfmmup == ksfmmup);
2676 2533 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2677 2534 SFMMU_INVALID_SHMERID);
2678 2535 }
2679 2536
2680 2537 /*
2681 2538 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2682 2539 * Assumes that a particular page size may only be resident in one TSB.
2683 2540 */
2684 2541 static void
2685 2542 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2686 2543 {
2687 2544 struct tsb_info *tsbinfop = NULL;
2688 2545 uint64_t tag;
2689 2546 struct tsbe *tsbe_addr;
2690 2547 uint64_t tsb_base;
2691 2548 uint_t tsb_size;
2692 2549 int vpshift = MMU_PAGESHIFT;
2693 2550 int phys = 0;
2694 2551
2695 2552 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2696 2553 phys = ktsb_phys;
2697 2554 if (ttesz >= TTE4M) {
2698 2555 #ifndef sun4v
2699 2556 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2700 2557 #endif
2701 2558 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2702 2559 tsb_size = ktsb4m_szcode;
2703 2560 } else {
2704 2561 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2705 2562 tsb_size = ktsb_szcode;
2706 2563 }
2707 2564 } else {
2708 2565 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2709 2566
2710 2567 /*
2711 2568 * If there isn't a TSB for this page size, or the TSB is
2712 2569 * swapped out, there is nothing to do. Note that the latter
2713 2570 * case seems impossible but can occur if hat_pageunload()
2714 2571 * is called on an ISM mapping while the process is swapped
2715 2572 * out.
2716 2573 */
2717 2574 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2718 2575 return;
2719 2576
2720 2577 /*
2721 2578 * If another thread is in the middle of relocating a TSB
2722 2579 * we can't unload the entry so set a flag so that the
2723 2580 * TSB will be flushed before it can be accessed by the
2724 2581 * process.
2725 2582 */
2726 2583 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2727 2584 if (ttep == NULL)
2728 2585 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2729 2586 return;
2730 2587 }
2731 2588 #if defined(UTSB_PHYS)
2732 2589 phys = 1;
2733 2590 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2734 2591 #else
2735 2592 tsb_base = (uint64_t)tsbinfop->tsb_va;
2736 2593 #endif
2737 2594 tsb_size = tsbinfop->tsb_szc;
2738 2595 }
2739 2596 if (ttesz >= TTE4M)
2740 2597 vpshift = MMU_PAGESHIFT4M;
2741 2598
2742 2599 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2743 2600 tag = sfmmu_make_tsbtag(vaddr);
2744 2601
2745 2602 if (ttep == NULL) {
2746 2603 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2747 2604 } else {
2748 2605 if (ttesz >= TTE4M) {
2749 2606 SFMMU_STAT(sf_tsb_load4m);
2750 2607 } else {
2751 2608 SFMMU_STAT(sf_tsb_load8k);
2752 2609 }
2753 2610
2754 2611 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2755 2612 }
2756 2613 }
2757 2614
2758 2615 /*
2759 2616 * Unmap all entries from [start, end) matching the given page size.
2760 2617 *
2761 2618 * This function is used primarily to unmap replicated 64K or 512K entries
2762 2619 * from the TSB that are inserted using the base page size TSB pointer, but
2763 2620 * it may also be called to unmap a range of addresses from the TSB.
2764 2621 */
2765 2622 void
2766 2623 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2767 2624 {
2768 2625 struct tsb_info *tsbinfop;
2769 2626 uint64_t tag;
2770 2627 struct tsbe *tsbe_addr;
2771 2628 caddr_t vaddr;
2772 2629 uint64_t tsb_base;
2773 2630 int vpshift, vpgsz;
2774 2631 uint_t tsb_size;
2775 2632 int phys = 0;
2776 2633
2777 2634 /*
2778 2635 * Assumptions:
2779 2636 * If ttesz == 8K, 64K or 512K, we walk through the range 8K
2780 2637 * at a time shooting down any valid entries we encounter.
2781 2638 *
2782 2639 * If ttesz >= 4M we walk the range 4M at a time shooting
2783 2640 * down any valid mappings we find.
2784 2641 */
2785 2642 if (sfmmup == ksfmmup) {
2786 2643 phys = ktsb_phys;
2787 2644 if (ttesz >= TTE4M) {
2788 2645 #ifndef sun4v
2789 2646 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2790 2647 #endif
2791 2648 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2792 2649 tsb_size = ktsb4m_szcode;
2793 2650 } else {
2794 2651 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2795 2652 tsb_size = ktsb_szcode;
2796 2653 }
2797 2654 } else {
2798 2655 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2799 2656
2800 2657 /*
2801 2658 * If there isn't a TSB for this page size, or the TSB is
2802 2659 * swapped out, there is nothing to do. Note that the latter
2803 2660 * case seems impossible but can occur if hat_pageunload()
2804 2661 * is called on an ISM mapping while the process is swapped
2805 2662 * out.
2806 2663 */
2807 2664 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2808 2665 return;
2809 2666
2810 2667 /*
2811 2668 * If another thread is in the middle of relocating a TSB
2812 2669 * we can't unload the entry so set a flag so that the
2813 2670 * TSB will be flushed before it can be accessed by the
2814 2671 * process.
2815 2672 */
2816 2673 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2817 2674 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2818 2675 return;
2819 2676 }
2820 2677 #if defined(UTSB_PHYS)
2821 2678 phys = 1;
2822 2679 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2823 2680 #else
2824 2681 tsb_base = (uint64_t)tsbinfop->tsb_va;
2825 2682 #endif
2826 2683 tsb_size = tsbinfop->tsb_szc;
2827 2684 }
2828 2685 if (ttesz >= TTE4M) {
2829 2686 vpshift = MMU_PAGESHIFT4M;
2830 2687 vpgsz = MMU_PAGESIZE4M;
2831 2688 } else {
2832 2689 vpshift = MMU_PAGESHIFT;
2833 2690 vpgsz = MMU_PAGESIZE;
2834 2691 }
2835 2692
2836 2693 for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2837 2694 tag = sfmmu_make_tsbtag(vaddr);
2838 2695 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2839 2696 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2840 2697 }
2841 2698 }
2842 2699
2843 2700 /*
2844 2701 * Select the optimum TSB size given the number of mappings
2845 2702 * that need to be cached.
2846 2703 */
2847 2704 static int
2848 2705 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2849 2706 {
2850 2707 int szc = 0;
2851 2708
2852 2709 #ifdef DEBUG
2853 2710 if (tsb_grow_stress) {
2854 2711 uint32_t randval = (uint32_t)gettick() >> 4;
2855 2712 return (randval % (tsb_max_growsize + 1));
2856 2713 }
2857 2714 #endif /* DEBUG */
2858 2715
2859 2716 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2860 2717 szc++;
2861 2718 return (szc);
2862 2719 }
2863 2720
2864 2721 /*
2865 2722 * This function will add a translation to the hme_blk and allocate the
2866 2723 * hme_blk if one does not exist.
2867 2724 * If a page structure is specified then it will add the
2868 2725 * corresponding hment to the mapping list.
2869 2726 * It will also update the hmenum field for the tte.
2870 2727 * Furthermore, it attempts to create a large page translation
2871 2728 * for <addr,hat> at page array pps. It assumes addr and first
2872 2729 * pp is correctly aligned. It returns 0 if successful and 1 otherwise.
2873 2730 */
2874 2731 static int
2875 2732 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2876 2733 page_t **pps, uint_t flags, uint_t rid)
2877 2734 {
2878 2735 struct hmehash_bucket *hmebp;
2879 2736 struct hme_blk *hmeblkp;
2880 2737 int ret;
2881 2738 uint_t size;
2882 2739
2883 2740 /*
2884 2741 * Get mapping size.
2885 2742 */
2886 2743 size = TTE_CSZ(ttep);
2887 2744 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2888 2745
2889 2746 /*
2890 2747 * Acquire the hash bucket.
2891 2748 */
2892 2749 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2893 2750 ASSERT(hmebp);
2894 2751
2895 2752 /*
2896 2753 * Find the hment block.
2897 2754 */
2898 2755 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2899 2756 rid);
2900 2757 ASSERT(hmeblkp);
2901 2758
2902 2759 /*
2903 2760 * Add the translation.
2904 2761 */
2905 2762 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2906 2763 rid);
2907 2764
2908 2765 /*
2909 2766 * Release the hash bucket.
2910 2767 */
2911 2768 sfmmu_tteload_release_hashbucket(hmebp);
2912 2769
2913 2770 return (ret);
2914 2771 }
2915 2772
2916 2773 /*
2917 2774 * Function locks and returns a pointer to the hash bucket for vaddr and size.
2918 2775 */
2919 2776 static struct hmehash_bucket *
2920 2777 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2921 2778 uint_t rid)
2922 2779 {
2923 2780 struct hmehash_bucket *hmebp;
2924 2781 int hmeshift;
2925 2782 void *htagid = sfmmutohtagid(sfmmup, rid);
2926 2783
2927 2784 ASSERT(htagid != NULL);
2928 2785
2929 2786 hmeshift = HME_HASH_SHIFT(size);
2930 2787
2931 2788 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2932 2789
2933 2790 SFMMU_HASH_LOCK(hmebp);
2934 2791
2935 2792 return (hmebp);
2936 2793 }
2937 2794
2938 2795 /*
2939 2796 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2940 2797 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2941 2798 * allocated.
2942 2799 */
2943 2800 static struct hme_blk *
2944 2801 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2945 2802 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2946 2803 {
2947 2804 hmeblk_tag hblktag;
2948 2805 int hmeshift;
2949 2806 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2950 2807
2951 2808 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2952 2809
2953 2810 hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2954 2811 ASSERT(hblktag.htag_id != NULL);
2955 2812 hmeshift = HME_HASH_SHIFT(size);
2956 2813 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2957 2814 hblktag.htag_rehash = HME_HASH_REHASH(size);
2958 2815 hblktag.htag_rid = rid;
2959 2816
2960 2817 ttearray_realloc:
2961 2818
2962 2819 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2963 2820
2964 2821 /*
2965 2822 * We block until hblk_reserve_lock is released; it's held by
2966 2823 * the thread, temporarily using hblk_reserve, until hblk_reserve is
2967 2824 * replaced by a hblk from sfmmu8_cache.
2968 2825 */
2969 2826 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2970 2827 hblk_reserve_thread != curthread) {
2971 2828 SFMMU_HASH_UNLOCK(hmebp);
2972 2829 mutex_enter(&hblk_reserve_lock);
2973 2830 mutex_exit(&hblk_reserve_lock);
2974 2831 SFMMU_STAT(sf_hblk_reserve_hit);
2975 2832 SFMMU_HASH_LOCK(hmebp);
2976 2833 goto ttearray_realloc;
2977 2834 }
2978 2835
2979 2836 if (hmeblkp == NULL) {
2980 2837 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2981 2838 hblktag, flags, rid);
2982 2839 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2983 2840 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2984 2841 } else {
2985 2842 /*
2986 2843 * It is possible for 8k and 64k hblks to collide since they
2987 2844 * have the same rehash value. This is because we
2988 2845 * lazily free hblks and 8K/64K blks could be lingering.
2989 2846 * If we find size mismatch we free the block and & try again.
2990 2847 */
2991 2848 if (get_hblk_ttesz(hmeblkp) != size) {
2992 2849 ASSERT(!hmeblkp->hblk_vcnt);
2993 2850 ASSERT(!hmeblkp->hblk_hmecnt);
2994 2851 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2995 2852 &list, 0);
2996 2853 goto ttearray_realloc;
2997 2854 }
2998 2855 if (hmeblkp->hblk_shw_bit) {
2999 2856 /*
3000 2857 * if the hblk was previously used as a shadow hblk then
3001 2858 * we will change it to a normal hblk
3002 2859 */
3003 2860 ASSERT(!hmeblkp->hblk_shared);
3004 2861 if (hmeblkp->hblk_shw_mask) {
3005 2862 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3006 2863 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3007 2864 goto ttearray_realloc;
3008 2865 } else {
3009 2866 hmeblkp->hblk_shw_bit = 0;
3010 2867 }
3011 2868 }
3012 2869 SFMMU_STAT(sf_hblk_hit);
3013 2870 }
3014 2871
3015 2872 /*
3016 2873 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3017 2874 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3018 2875 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3019 2876 * just add these hmeblks to the per-cpu pending queue.
3020 2877 */
3021 2878 sfmmu_hblks_list_purge(&list, 1);
3022 2879
3023 2880 ASSERT(get_hblk_ttesz(hmeblkp) == size);
3024 2881 ASSERT(!hmeblkp->hblk_shw_bit);
3025 2882 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3026 2883 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3027 2884 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3028 2885
3029 2886 return (hmeblkp);
3030 2887 }
3031 2888
3032 2889 /*
3033 2890 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3034 2891 * otherwise.
3035 2892 */
3036 2893 static int
3037 2894 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3038 2895 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3039 2896 {
3040 2897 page_t *pp = *pps;
3041 2898 int hmenum, size, remap;
3042 2899 tte_t tteold, flush_tte;
3043 2900 #ifdef DEBUG
3044 2901 tte_t orig_old;
3045 2902 #endif /* DEBUG */
3046 2903 struct sf_hment *sfhme;
3047 2904 kmutex_t *pml, *pmtx;
3048 2905 hatlock_t *hatlockp;
3049 2906 int myflt;
3050 2907
3051 2908 /*
3052 2909 * remove this panic when we decide to let user virtual address
3053 2910 * space be >= USERLIMIT.
3054 2911 */
3055 2912 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3056 2913 panic("user addr %p in kernel space", (void *)vaddr);
3057 2914 #if defined(TTE_IS_GLOBAL)
3058 2915 if (TTE_IS_GLOBAL(ttep))
3059 2916 panic("sfmmu_tteload: creating global tte");
3060 2917 #endif
3061 2918
3062 2919 #ifdef DEBUG
3063 2920 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3064 2921 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3065 2922 panic("sfmmu_tteload: non cacheable memory tte");
3066 2923 #endif /* DEBUG */
3067 2924
3068 2925 /* don't simulate dirty bit for writeable ISM/DISM mappings */
3069 2926 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3070 2927 TTE_SET_REF(ttep);
3071 2928 TTE_SET_MOD(ttep);
3072 2929 }
3073 2930
3074 2931 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3075 2932 !TTE_IS_MOD(ttep)) {
3076 2933 /*
3077 2934 * Don't load TSB for dummy as in ISM. Also don't preload
3078 2935 * the TSB if the TTE isn't writable since we're likely to
3079 2936 * fault on it again -- preloading can be fairly expensive.
3080 2937 */
3081 2938 flags |= SFMMU_NO_TSBLOAD;
3082 2939 }
3083 2940
3084 2941 size = TTE_CSZ(ttep);
3085 2942 switch (size) {
3086 2943 case TTE8K:
3087 2944 SFMMU_STAT(sf_tteload8k);
3088 2945 break;
3089 2946 case TTE64K:
3090 2947 SFMMU_STAT(sf_tteload64k);
3091 2948 break;
3092 2949 case TTE512K:
3093 2950 SFMMU_STAT(sf_tteload512k);
3094 2951 break;
3095 2952 case TTE4M:
3096 2953 SFMMU_STAT(sf_tteload4m);
3097 2954 break;
3098 2955 case (TTE32M):
3099 2956 SFMMU_STAT(sf_tteload32m);
3100 2957 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3101 2958 break;
3102 2959 case (TTE256M):
3103 2960 SFMMU_STAT(sf_tteload256m);
3104 2961 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3105 2962 break;
3106 2963 }
3107 2964
3108 2965 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3109 2966 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3110 2967 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3111 2968 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3112 2969
3113 2970 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3114 2971
3115 2972 /*
3116 2973 * Need to grab mlist lock here so that pageunload
3117 2974 * will not change tte behind us.
3118 2975 */
3119 2976 if (pp) {
3120 2977 pml = sfmmu_mlist_enter(pp);
3121 2978 }
3122 2979
3123 2980 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3124 2981 /*
3125 2982 * Look for corresponding hment and if valid verify
3126 2983 * pfns are equal.
3127 2984 */
3128 2985 remap = TTE_IS_VALID(&tteold);
3129 2986 if (remap) {
3130 2987 pfn_t new_pfn, old_pfn;
3131 2988
3132 2989 old_pfn = TTE_TO_PFN(vaddr, &tteold);
3133 2990 new_pfn = TTE_TO_PFN(vaddr, ttep);
3134 2991
3135 2992 if (flags & HAT_LOAD_REMAP) {
3136 2993 /* make sure we are remapping same type of pages */
3137 2994 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3138 2995 panic("sfmmu_tteload - tte remap io<->memory");
3139 2996 }
3140 2997 if (old_pfn != new_pfn &&
3141 2998 (pp != NULL || sfhme->hme_page != NULL)) {
3142 2999 panic("sfmmu_tteload - tte remap pp != NULL");
3143 3000 }
3144 3001 } else if (old_pfn != new_pfn) {
3145 3002 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3146 3003 (void *)hmeblkp);
3147 3004 }
3148 3005 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3149 3006 }
3150 3007
3151 3008 if (pp) {
3152 3009 if (size == TTE8K) {
3153 3010 #ifdef VAC
3154 3011 /*
3155 3012 * Handle VAC consistency
3156 3013 */
3157 3014 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3158 3015 sfmmu_vac_conflict(sfmmup, vaddr, pp);
3159 3016 }
3160 3017 #endif
3161 3018
3162 3019 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3163 3020 pmtx = sfmmu_page_enter(pp);
3164 3021 PP_CLRRO(pp);
3165 3022 sfmmu_page_exit(pmtx);
3166 3023 } else if (!PP_ISMAPPED(pp) &&
3167 3024 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3168 3025 pmtx = sfmmu_page_enter(pp);
3169 3026 if (!(PP_ISMOD(pp))) {
3170 3027 PP_SETRO(pp);
3171 3028 }
3172 3029 sfmmu_page_exit(pmtx);
3173 3030 }
3174 3031
3175 3032 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3176 3033 /*
3177 3034 * sfmmu_pagearray_setup failed so return
3178 3035 */
3179 3036 sfmmu_mlist_exit(pml);
3180 3037 return (1);
3181 3038 }
3182 3039 }
3183 3040
3184 3041 /*
3185 3042 * Make sure hment is not on a mapping list.
3186 3043 */
3187 3044 ASSERT(remap || (sfhme->hme_page == NULL));
3188 3045
3189 3046 /* if it is not a remap then hme->next better be NULL */
3190 3047 ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3191 3048
3192 3049 if (flags & HAT_LOAD_LOCK) {
3193 3050 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3194 3051 panic("too high lckcnt-hmeblk %p",
3195 3052 (void *)hmeblkp);
3196 3053 }
3197 3054 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3198 3055
3199 3056 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3200 3057 }
3201 3058
3202 3059 #ifdef VAC
3203 3060 if (pp && PP_ISNC(pp)) {
3204 3061 /*
3205 3062 * If the physical page is marked to be uncacheable, like
3206 3063 * by a vac conflict, make sure the new mapping is also
3207 3064 * uncacheable.
3208 3065 */
3209 3066 TTE_CLR_VCACHEABLE(ttep);
3210 3067 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3211 3068 }
3212 3069 #endif
3213 3070 ttep->tte_hmenum = hmenum;
3214 3071
3215 3072 #ifdef DEBUG
3216 3073 orig_old = tteold;
3217 3074 #endif /* DEBUG */
3218 3075
3219 3076 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3220 3077 if ((sfmmup == KHATID) &&
3221 3078 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3222 3079 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3223 3080 }
3224 3081 #ifdef DEBUG
3225 3082 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3226 3083 #endif /* DEBUG */
3227 3084 }
3228 3085 ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3229 3086
3230 3087 if (!TTE_IS_VALID(&tteold)) {
3231 3088
3232 3089 atomic_inc_16(&hmeblkp->hblk_vcnt);
3233 3090 if (rid == SFMMU_INVALID_SHMERID) {
3234 3091 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3235 3092 } else {
3236 3093 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3237 3094 sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3238 3095 /*
3239 3096 * We already accounted for region ttecnt's in sfmmu
3240 3097 * during hat_join_region() processing. Here we
3241 3098 * only update ttecnt's in region struture.
3242 3099 */
3243 3100 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3244 3101 }
3245 3102 }
3246 3103
3247 3104 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3248 3105 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3249 3106 sfmmup != ksfmmup) {
3250 3107 uchar_t tteflag = 1 << size;
3251 3108 if (rid == SFMMU_INVALID_SHMERID) {
3252 3109 if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3253 3110 hatlockp = sfmmu_hat_enter(sfmmup);
3254 3111 sfmmup->sfmmu_tteflags |= tteflag;
3255 3112 sfmmu_hat_exit(hatlockp);
3256 3113 }
3257 3114 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3258 3115 hatlockp = sfmmu_hat_enter(sfmmup);
3259 3116 sfmmup->sfmmu_rtteflags |= tteflag;
3260 3117 sfmmu_hat_exit(hatlockp);
3261 3118 }
3262 3119 /*
3263 3120 * Update the current CPU tsbmiss area, so the current thread
3264 3121 * won't need to take the tsbmiss for the new pagesize.
3265 3122 * The other threads in the process will update their tsb
3266 3123 * miss area lazily in sfmmu_tsbmiss_exception() when they
3267 3124 * fail to find the translation for a newly added pagesize.
3268 3125 */
3269 3126 if (size > TTE64K && myflt) {
3270 3127 struct tsbmiss *tsbmp;
3271 3128 kpreempt_disable();
3272 3129 tsbmp = &tsbmiss_area[CPU->cpu_id];
3273 3130 if (rid == SFMMU_INVALID_SHMERID) {
3274 3131 if (!(tsbmp->uhat_tteflags & tteflag)) {
3275 3132 tsbmp->uhat_tteflags |= tteflag;
3276 3133 }
3277 3134 } else {
3278 3135 if (!(tsbmp->uhat_rtteflags & tteflag)) {
3279 3136 tsbmp->uhat_rtteflags |= tteflag;
3280 3137 }
3281 3138 }
3282 3139 kpreempt_enable();
3283 3140 }
3284 3141 }
3285 3142
3286 3143 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3287 3144 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3288 3145 hatlockp = sfmmu_hat_enter(sfmmup);
3289 3146 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3290 3147 sfmmu_hat_exit(hatlockp);
3291 3148 }
3292 3149
3293 3150 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3294 3151 hw_tte.tte_intlo;
3295 3152 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3296 3153 hw_tte.tte_inthi;
3297 3154
3298 3155 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3299 3156 /*
3300 3157 * If remap and new tte differs from old tte we need
3301 3158 * to sync the mod bit and flush TLB/TSB. We don't
3302 3159 * need to sync ref bit because we currently always set
3303 3160 * ref bit in tteload.
3304 3161 */
3305 3162 ASSERT(TTE_IS_REF(ttep));
3306 3163 if (TTE_IS_MOD(&tteold)) {
3307 3164 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3308 3165 }
3309 3166 /*
3310 3167 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3311 3168 * hmes are only used for read only text. Adding this code for
3312 3169 * completeness and future use of shared hmeblks with writable
3313 3170 * mappings of VMODSORT vnodes.
3314 3171 */
3315 3172 if (hmeblkp->hblk_shared) {
3316 3173 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3317 3174 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3318 3175 xt_sync(cpuset);
3319 3176 SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3320 3177 } else {
3321 3178 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3322 3179 xt_sync(sfmmup->sfmmu_cpusran);
3323 3180 }
3324 3181 }
3325 3182
3326 3183 if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3327 3184 /*
3328 3185 * We only preload 8K and 4M mappings into the TSB, since
3329 3186 * 64K and 512K mappings are replicated and hence don't
3330 3187 * have a single, unique TSB entry. Ditto for 32M/256M.
3331 3188 */
3332 3189 if (size == TTE8K || size == TTE4M) {
3333 3190 sf_scd_t *scdp;
3334 3191 hatlockp = sfmmu_hat_enter(sfmmup);
3335 3192 /*
3336 3193 * Don't preload private TSB if the mapping is used
3337 3194 * by the shctx in the SCD.
3338 3195 */
3339 3196 scdp = sfmmup->sfmmu_scdp;
3340 3197 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3341 3198 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3342 3199 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3343 3200 size);
3344 3201 }
3345 3202 sfmmu_hat_exit(hatlockp);
3346 3203 }
3347 3204 }
3348 3205 if (pp) {
3349 3206 if (!remap) {
3350 3207 HME_ADD(sfhme, pp);
3351 3208 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3352 3209 ASSERT(hmeblkp->hblk_hmecnt > 0);
3353 3210
3354 3211 /*
3355 3212 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3356 3213 * see pageunload() for comment.
3357 3214 */
3358 3215 }
3359 3216 sfmmu_mlist_exit(pml);
3360 3217 }
3361 3218
3362 3219 return (0);
3363 3220 }
3364 3221 /*
3365 3222 * Function unlocks hash bucket.
3366 3223 */
3367 3224 static void
3368 3225 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3369 3226 {
3370 3227 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3371 3228 SFMMU_HASH_UNLOCK(hmebp);
3372 3229 }
3373 3230
3374 3231 /*
3375 3232 * function which checks and sets up page array for a large
3376 3233 * translation. Will set p_vcolor, p_index, p_ro fields.
3377 3234 * Assumes addr and pfnum of first page are properly aligned.
3378 3235 * Will check for physical contiguity. If check fails it return
3379 3236 * non null.
3380 3237 */
3381 3238 static int
3382 3239 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3383 3240 {
3384 3241 int i, index, ttesz;
3385 3242 pfn_t pfnum;
3386 3243 pgcnt_t npgs;
3387 3244 page_t *pp, *pp1;
3388 3245 kmutex_t *pmtx;
3389 3246 #ifdef VAC
3390 3247 int osz;
3391 3248 int cflags = 0;
3392 3249 int vac_err = 0;
3393 3250 #endif
3394 3251 int newidx = 0;
3395 3252
3396 3253 ttesz = TTE_CSZ(ttep);
3397 3254
3398 3255 ASSERT(ttesz > TTE8K);
3399 3256
3400 3257 npgs = TTEPAGES(ttesz);
3401 3258 index = PAGESZ_TO_INDEX(ttesz);
3402 3259
3403 3260 pfnum = (*pps)->p_pagenum;
3404 3261 ASSERT(IS_P2ALIGNED(pfnum, npgs));
3405 3262
3406 3263 /*
3407 3264 * Save the first pp so we can do HAT_TMPNC at the end.
3408 3265 */
3409 3266 pp1 = *pps;
3410 3267 #ifdef VAC
3411 3268 osz = fnd_mapping_sz(pp1);
3412 3269 #endif
3413 3270
3414 3271 for (i = 0; i < npgs; i++, pps++) {
3415 3272 pp = *pps;
3416 3273 ASSERT(PAGE_LOCKED(pp));
3417 3274 ASSERT(pp->p_szc >= ttesz);
3418 3275 ASSERT(pp->p_szc == pp1->p_szc);
3419 3276 ASSERT(sfmmu_mlist_held(pp));
3420 3277
3421 3278 /*
3422 3279 * XXX is it possible to maintain P_RO on the root only?
3423 3280 */
3424 3281 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3425 3282 pmtx = sfmmu_page_enter(pp);
3426 3283 PP_CLRRO(pp);
3427 3284 sfmmu_page_exit(pmtx);
3428 3285 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3429 3286 !PP_ISMOD(pp)) {
3430 3287 pmtx = sfmmu_page_enter(pp);
3431 3288 if (!(PP_ISMOD(pp))) {
3432 3289 PP_SETRO(pp);
3433 3290 }
3434 3291 sfmmu_page_exit(pmtx);
3435 3292 }
3436 3293
3437 3294 /*
3438 3295 * If this is a remap we skip vac & contiguity checks.
3439 3296 */
3440 3297 if (remap)
3441 3298 continue;
3442 3299
3443 3300 /*
3444 3301 * set p_vcolor and detect any vac conflicts.
3445 3302 */
3446 3303 #ifdef VAC
3447 3304 if (vac_err == 0) {
3448 3305 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3449 3306
3450 3307 }
3451 3308 #endif
3452 3309
3453 3310 /*
3454 3311 * Save current index in case we need to undo it.
3455 3312 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))"
3456 3313 * "SFMMU_INDEX_SHIFT 6"
3457 3314 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)"
3458 3315 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)"
3459 3316 *
3460 3317 * So: index = PAGESZ_TO_INDEX(ttesz);
3461 3318 * if ttesz == 1 then index = 0x2
3462 3319 * 2 then index = 0x4
3463 3320 * 3 then index = 0x8
3464 3321 * 4 then index = 0x10
3465 3322 * 5 then index = 0x20
3466 3323 * The code below checks if it's a new pagesize (ie, newidx)
3467 3324 * in case we need to take it back out of p_index,
3468 3325 * and then or's the new index into the existing index.
3469 3326 */
3470 3327 if ((PP_MAPINDEX(pp) & index) == 0)
3471 3328 newidx = 1;
3472 3329 pp->p_index = (PP_MAPINDEX(pp) | index);
3473 3330
3474 3331 /*
3475 3332 * contiguity check
3476 3333 */
3477 3334 if (pp->p_pagenum != pfnum) {
3478 3335 /*
3479 3336 * If we fail the contiguity test then
3480 3337 * the only thing we need to fix is the p_index field.
3481 3338 * We might get a few extra flushes but since this
3482 3339 * path is rare that is ok. The p_ro field will
3483 3340 * get automatically fixed on the next tteload to
3484 3341 * the page. NO TNC bit is set yet.
3485 3342 */
3486 3343 while (i >= 0) {
3487 3344 pp = *pps;
3488 3345 if (newidx)
3489 3346 pp->p_index = (PP_MAPINDEX(pp) &
3490 3347 ~index);
3491 3348 pps--;
3492 3349 i--;
3493 3350 }
3494 3351 return (1);
3495 3352 }
3496 3353 pfnum++;
3497 3354 addr += MMU_PAGESIZE;
3498 3355 }
3499 3356
3500 3357 #ifdef VAC
3501 3358 if (vac_err) {
3502 3359 if (ttesz > osz) {
3503 3360 /*
3504 3361 * There are some smaller mappings that causes vac
3505 3362 * conflicts. Convert all existing small mappings to
3506 3363 * TNC.
3507 3364 */
3508 3365 SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3509 3366 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3510 3367 npgs);
3511 3368 } else {
3512 3369 /* EMPTY */
3513 3370 /*
3514 3371 * If there exists an big page mapping,
3515 3372 * that means the whole existing big page
3516 3373 * has TNC setting already. No need to covert to
3517 3374 * TNC again.
3518 3375 */
3519 3376 ASSERT(PP_ISTNC(pp1));
3520 3377 }
3521 3378 }
3522 3379 #endif /* VAC */
3523 3380
3524 3381 return (0);
3525 3382 }
3526 3383
3527 3384 #ifdef VAC
3528 3385 /*
3529 3386 * Routine that detects vac consistency for a large page. It also
3530 3387 * sets virtual color for all pp's for this big mapping.
3531 3388 */
3532 3389 static int
3533 3390 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3534 3391 {
3535 3392 int vcolor, ocolor;
3536 3393
3537 3394 ASSERT(sfmmu_mlist_held(pp));
3538 3395
3539 3396 if (PP_ISNC(pp)) {
3540 3397 return (HAT_TMPNC);
3541 3398 }
3542 3399
3543 3400 vcolor = addr_to_vcolor(addr);
3544 3401 if (PP_NEWPAGE(pp)) {
3545 3402 PP_SET_VCOLOR(pp, vcolor);
3546 3403 return (0);
3547 3404 }
3548 3405
3549 3406 ocolor = PP_GET_VCOLOR(pp);
3550 3407 if (ocolor == vcolor) {
3551 3408 return (0);
3552 3409 }
3553 3410
3554 3411 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3555 3412 /*
3556 3413 * Previous user of page had a differnet color
3557 3414 * but since there are no current users
3558 3415 * we just flush the cache and change the color.
3559 3416 * As an optimization for large pages we flush the
3560 3417 * entire cache of that color and set a flag.
3561 3418 */
3562 3419 SFMMU_STAT(sf_pgcolor_conflict);
3563 3420 if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3564 3421 CacheColor_SetFlushed(*cflags, ocolor);
3565 3422 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3566 3423 }
3567 3424 PP_SET_VCOLOR(pp, vcolor);
3568 3425 return (0);
3569 3426 }
3570 3427
3571 3428 /*
3572 3429 * We got a real conflict with a current mapping.
3573 3430 * set flags to start unencaching all mappings
3574 3431 * and return failure so we restart looping
3575 3432 * the pp array from the beginning.
3576 3433 */
3577 3434 return (HAT_TMPNC);
3578 3435 }
3579 3436 #endif /* VAC */
3580 3437
3581 3438 /*
3582 3439 * creates a large page shadow hmeblk for a tte.
3583 3440 * The purpose of this routine is to allow us to do quick unloads because
3584 3441 * the vm layer can easily pass a very large but sparsely populated range.
3585 3442 */
3586 3443 static struct hme_blk *
3587 3444 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3588 3445 {
3589 3446 struct hmehash_bucket *hmebp;
3590 3447 hmeblk_tag hblktag;
3591 3448 int hmeshift, size, vshift;
3592 3449 uint_t shw_mask, newshw_mask;
3593 3450 struct hme_blk *hmeblkp;
3594 3451
3595 3452 ASSERT(sfmmup != KHATID);
3596 3453 if (mmu_page_sizes == max_mmu_page_sizes) {
3597 3454 ASSERT(ttesz < TTE256M);
3598 3455 } else {
3599 3456 ASSERT(ttesz < TTE4M);
3600 3457 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3601 3458 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3602 3459 }
3603 3460
3604 3461 if (ttesz == TTE8K) {
3605 3462 size = TTE512K;
3606 3463 } else {
3607 3464 size = ++ttesz;
3608 3465 }
3609 3466
3610 3467 hblktag.htag_id = sfmmup;
3611 3468 hmeshift = HME_HASH_SHIFT(size);
3612 3469 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3613 3470 hblktag.htag_rehash = HME_HASH_REHASH(size);
3614 3471 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3615 3472 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3616 3473
3617 3474 SFMMU_HASH_LOCK(hmebp);
3618 3475
3619 3476 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3620 3477 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3621 3478 if (hmeblkp == NULL) {
3622 3479 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3623 3480 hblktag, flags, SFMMU_INVALID_SHMERID);
3624 3481 }
3625 3482 ASSERT(hmeblkp);
3626 3483 if (!hmeblkp->hblk_shw_mask) {
3627 3484 /*
3628 3485 * if this is a unused hblk it was just allocated or could
3629 3486 * potentially be a previous large page hblk so we need to
3630 3487 * set the shadow bit.
3631 3488 */
3632 3489 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3633 3490 hmeblkp->hblk_shw_bit = 1;
3634 3491 } else if (hmeblkp->hblk_shw_bit == 0) {
3635 3492 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3636 3493 (void *)hmeblkp);
3637 3494 }
3638 3495 ASSERT(hmeblkp->hblk_shw_bit == 1);
3639 3496 ASSERT(!hmeblkp->hblk_shared);
3640 3497 vshift = vaddr_to_vshift(hblktag, vaddr, size);
3641 3498 ASSERT(vshift < 8);
3642 3499 /*
3643 3500 * Atomically set shw mask bit
3644 3501 */
3645 3502 do {
3646 3503 shw_mask = hmeblkp->hblk_shw_mask;
3647 3504 newshw_mask = shw_mask | (1 << vshift);
3648 3505 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3649 3506 newshw_mask);
3650 3507 } while (newshw_mask != shw_mask);
3651 3508
3652 3509 SFMMU_HASH_UNLOCK(hmebp);
3653 3510
3654 3511 return (hmeblkp);
3655 3512 }
3656 3513
3657 3514 /*
3658 3515 * This routine cleanup a previous shadow hmeblk and changes it to
3659 3516 * a regular hblk. This happens rarely but it is possible
3660 3517 * when a process wants to use large pages and there are hblks still
3661 3518 * lying around from the previous as that used these hmeblks.
3662 3519 * The alternative was to cleanup the shadow hblks at unload time
3663 3520 * but since so few user processes actually use large pages, it is
3664 3521 * better to be lazy and cleanup at this time.
3665 3522 */
3666 3523 static void
3667 3524 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3668 3525 struct hmehash_bucket *hmebp)
3669 3526 {
3670 3527 caddr_t addr, endaddr;
3671 3528 int hashno, size;
3672 3529
3673 3530 ASSERT(hmeblkp->hblk_shw_bit);
3674 3531 ASSERT(!hmeblkp->hblk_shared);
3675 3532
3676 3533 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3677 3534
3678 3535 if (!hmeblkp->hblk_shw_mask) {
3679 3536 hmeblkp->hblk_shw_bit = 0;
3680 3537 return;
3681 3538 }
3682 3539 addr = (caddr_t)get_hblk_base(hmeblkp);
3683 3540 endaddr = get_hblk_endaddr(hmeblkp);
3684 3541 size = get_hblk_ttesz(hmeblkp);
3685 3542 hashno = size - 1;
3686 3543 ASSERT(hashno > 0);
3687 3544 SFMMU_HASH_UNLOCK(hmebp);
3688 3545
3689 3546 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3690 3547
3691 3548 SFMMU_HASH_LOCK(hmebp);
3692 3549 }
3693 3550
3694 3551 static void
3695 3552 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3696 3553 int hashno)
3697 3554 {
3698 3555 int hmeshift, shadow = 0;
3699 3556 hmeblk_tag hblktag;
3700 3557 struct hmehash_bucket *hmebp;
3701 3558 struct hme_blk *hmeblkp;
3702 3559 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3703 3560
3704 3561 ASSERT(hashno > 0);
3705 3562 hblktag.htag_id = sfmmup;
3706 3563 hblktag.htag_rehash = hashno;
3707 3564 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3708 3565
3709 3566 hmeshift = HME_HASH_SHIFT(hashno);
3710 3567
3711 3568 while (addr < endaddr) {
3712 3569 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3713 3570 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3714 3571 SFMMU_HASH_LOCK(hmebp);
3715 3572 /* inline HME_HASH_SEARCH */
3716 3573 hmeblkp = hmebp->hmeblkp;
3717 3574 pr_hblk = NULL;
3718 3575 while (hmeblkp) {
3719 3576 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3720 3577 /* found hme_blk */
3721 3578 ASSERT(!hmeblkp->hblk_shared);
3722 3579 if (hmeblkp->hblk_shw_bit) {
3723 3580 if (hmeblkp->hblk_shw_mask) {
3724 3581 shadow = 1;
3725 3582 sfmmu_shadow_hcleanup(sfmmup,
3726 3583 hmeblkp, hmebp);
3727 3584 break;
3728 3585 } else {
3729 3586 hmeblkp->hblk_shw_bit = 0;
3730 3587 }
3731 3588 }
3732 3589
3733 3590 /*
3734 3591 * Hblk_hmecnt and hblk_vcnt could be non zero
3735 3592 * since hblk_unload() does not gurantee that.
3736 3593 *
3737 3594 * XXX - this could cause tteload() to spin
3738 3595 * where sfmmu_shadow_hcleanup() is called.
3739 3596 */
3740 3597 }
3741 3598
3742 3599 nx_hblk = hmeblkp->hblk_next;
3743 3600 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3744 3601 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3745 3602 &list, 0);
3746 3603 } else {
3747 3604 pr_hblk = hmeblkp;
3748 3605 }
3749 3606 hmeblkp = nx_hblk;
3750 3607 }
3751 3608
3752 3609 SFMMU_HASH_UNLOCK(hmebp);
3753 3610
3754 3611 if (shadow) {
3755 3612 /*
3756 3613 * We found another shadow hblk so cleaned its
3757 3614 * children. We need to go back and cleanup
3758 3615 * the original hblk so we don't change the
3759 3616 * addr.
3760 3617 */
3761 3618 shadow = 0;
3762 3619 } else {
3763 3620 addr = (caddr_t)roundup((uintptr_t)addr + 1,
3764 3621 (1 << hmeshift));
3765 3622 }
3766 3623 }
3767 3624 sfmmu_hblks_list_purge(&list, 0);
3768 3625 }
3769 3626
3770 3627 /*
3771 3628 * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3772 3629 * may still linger on after pageunload.
3773 3630 */
3774 3631 static void
3775 3632 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3776 3633 {
3777 3634 int hmeshift;
3778 3635 hmeblk_tag hblktag;
3779 3636 struct hmehash_bucket *hmebp;
3780 3637 struct hme_blk *hmeblkp;
3781 3638 struct hme_blk *pr_hblk;
3782 3639 struct hme_blk *list = NULL;
3783 3640
3784 3641 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3785 3642 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3786 3643
3787 3644 hmeshift = HME_HASH_SHIFT(ttesz);
3788 3645 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3789 3646 hblktag.htag_rehash = ttesz;
3790 3647 hblktag.htag_rid = rid;
3791 3648 hblktag.htag_id = srdp;
3792 3649 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3793 3650
3794 3651 SFMMU_HASH_LOCK(hmebp);
3795 3652 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3796 3653 if (hmeblkp != NULL) {
3797 3654 ASSERT(hmeblkp->hblk_shared);
3798 3655 ASSERT(!hmeblkp->hblk_shw_bit);
3799 3656 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3800 3657 panic("sfmmu_cleanup_rhblk: valid hmeblk");
3801 3658 }
3802 3659 ASSERT(!hmeblkp->hblk_lckcnt);
3803 3660 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3804 3661 &list, 0);
3805 3662 }
3806 3663 SFMMU_HASH_UNLOCK(hmebp);
3807 3664 sfmmu_hblks_list_purge(&list, 0);
3808 3665 }
3809 3666
3810 3667 /* ARGSUSED */
3811 3668 static void
3812 3669 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3813 3670 size_t r_size, void *r_obj, u_offset_t r_objoff)
3814 3671 {
3815 3672 }
3816 3673
3817 3674 /*
3818 3675 * Searches for an hmeblk which maps addr, then unloads this mapping
3819 3676 * and updates *eaddrp, if the hmeblk is found.
3820 3677 */
3821 3678 static void
3822 3679 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3823 3680 caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3824 3681 {
3825 3682 int hmeshift;
3826 3683 hmeblk_tag hblktag;
3827 3684 struct hmehash_bucket *hmebp;
3828 3685 struct hme_blk *hmeblkp;
3829 3686 struct hme_blk *pr_hblk;
3830 3687 struct hme_blk *list = NULL;
3831 3688
3832 3689 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3833 3690 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3834 3691 ASSERT(ttesz >= HBLK_MIN_TTESZ);
3835 3692
3836 3693 hmeshift = HME_HASH_SHIFT(ttesz);
3837 3694 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3838 3695 hblktag.htag_rehash = ttesz;
3839 3696 hblktag.htag_rid = rid;
3840 3697 hblktag.htag_id = srdp;
3841 3698 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3842 3699
3843 3700 SFMMU_HASH_LOCK(hmebp);
3844 3701 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3845 3702 if (hmeblkp != NULL) {
3846 3703 ASSERT(hmeblkp->hblk_shared);
3847 3704 ASSERT(!hmeblkp->hblk_lckcnt);
3848 3705 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3849 3706 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3850 3707 eaddr, NULL, HAT_UNLOAD);
3851 3708 ASSERT(*eaddrp > addr);
3852 3709 }
3853 3710 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3854 3711 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3855 3712 &list, 0);
3856 3713 }
3857 3714 SFMMU_HASH_UNLOCK(hmebp);
3858 3715 sfmmu_hblks_list_purge(&list, 0);
3859 3716 }
3860 3717
3861 3718 static void
3862 3719 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3863 3720 {
3864 3721 int ttesz = rgnp->rgn_pgszc;
3865 3722 size_t rsz = rgnp->rgn_size;
3866 3723 caddr_t rsaddr = rgnp->rgn_saddr;
3867 3724 caddr_t readdr = rsaddr + rsz;
3868 3725 caddr_t rhsaddr;
3869 3726 caddr_t va;
3870 3727 uint_t rid = rgnp->rgn_id;
3871 3728 caddr_t cbsaddr;
3872 3729 caddr_t cbeaddr;
3873 3730 hat_rgn_cb_func_t rcbfunc;
3874 3731 ulong_t cnt;
3875 3732
3876 3733 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3877 3734 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3878 3735
3879 3736 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3880 3737 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3881 3738 if (ttesz < HBLK_MIN_TTESZ) {
3882 3739 ttesz = HBLK_MIN_TTESZ;
3883 3740 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3884 3741 } else {
3885 3742 rhsaddr = rsaddr;
3886 3743 }
3887 3744
3888 3745 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3889 3746 rcbfunc = sfmmu_rgn_cb_noop;
3890 3747 }
3891 3748
3892 3749 while (ttesz >= HBLK_MIN_TTESZ) {
3893 3750 cbsaddr = rsaddr;
3894 3751 cbeaddr = rsaddr;
3895 3752 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3896 3753 ttesz--;
3897 3754 continue;
3898 3755 }
3899 3756 cnt = 0;
3900 3757 va = rsaddr;
3901 3758 while (va < readdr) {
3902 3759 ASSERT(va >= rhsaddr);
3903 3760 if (va != cbeaddr) {
3904 3761 if (cbeaddr != cbsaddr) {
3905 3762 ASSERT(cbeaddr > cbsaddr);
3906 3763 (*rcbfunc)(cbsaddr, cbeaddr,
3907 3764 rsaddr, rsz, rgnp->rgn_obj,
3908 3765 rgnp->rgn_objoff);
3909 3766 }
3910 3767 cbsaddr = va;
3911 3768 cbeaddr = va;
3912 3769 }
3913 3770 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3914 3771 ttesz, &cbeaddr);
3915 3772 cnt++;
3916 3773 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3917 3774 }
3918 3775 if (cbeaddr != cbsaddr) {
3919 3776 ASSERT(cbeaddr > cbsaddr);
3920 3777 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3921 3778 rsz, rgnp->rgn_obj,
3922 3779 rgnp->rgn_objoff);
3923 3780 }
3924 3781 ttesz--;
3925 3782 }
3926 3783 }
3927 3784
3928 3785 /*
3929 3786 * Release one hardware address translation lock on the given address range.
3930 3787 */
3931 3788 void
3932 3789 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3933 3790 {
3934 3791 struct hmehash_bucket *hmebp;
3935 3792 hmeblk_tag hblktag;
3936 3793 int hmeshift, hashno = 1;
3937 3794 struct hme_blk *hmeblkp, *list = NULL;
3938 3795 caddr_t endaddr;
3939 3796
3940 3797 ASSERT(sfmmup != NULL);
3941 3798
3942 3799 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
3943 3800 ASSERT((len & MMU_PAGEOFFSET) == 0);
3944 3801 endaddr = addr + len;
3945 3802 hblktag.htag_id = sfmmup;
3946 3803 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3947 3804
3948 3805 /*
3949 3806 * Spitfire supports 4 page sizes.
3950 3807 * Most pages are expected to be of the smallest page size (8K) and
3951 3808 * these will not need to be rehashed. 64K pages also don't need to be
3952 3809 * rehashed because an hmeblk spans 64K of address space. 512K pages
3953 3810 * might need 1 rehash and and 4M pages might need 2 rehashes.
3954 3811 */
3955 3812 while (addr < endaddr) {
3956 3813 hmeshift = HME_HASH_SHIFT(hashno);
3957 3814 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3958 3815 hblktag.htag_rehash = hashno;
3959 3816 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3960 3817
3961 3818 SFMMU_HASH_LOCK(hmebp);
3962 3819
3963 3820 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3964 3821 if (hmeblkp != NULL) {
3965 3822 ASSERT(!hmeblkp->hblk_shared);
3966 3823 /*
3967 3824 * If we encounter a shadow hmeblk then
3968 3825 * we know there are no valid hmeblks mapping
3969 3826 * this address at this size or larger.
3970 3827 * Just increment address by the smallest
3971 3828 * page size.
3972 3829 */
3973 3830 if (hmeblkp->hblk_shw_bit) {
3974 3831 addr += MMU_PAGESIZE;
3975 3832 } else {
3976 3833 addr = sfmmu_hblk_unlock(hmeblkp, addr,
3977 3834 endaddr);
3978 3835 }
3979 3836 SFMMU_HASH_UNLOCK(hmebp);
3980 3837 hashno = 1;
3981 3838 continue;
3982 3839 }
3983 3840 SFMMU_HASH_UNLOCK(hmebp);
3984 3841
3985 3842 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
3986 3843 /*
3987 3844 * We have traversed the whole list and rehashed
3988 3845 * if necessary without finding the address to unlock
3989 3846 * which should never happen.
3990 3847 */
3991 3848 panic("sfmmu_unlock: addr not found. "
3992 3849 "addr %p hat %p", (void *)addr, (void *)sfmmup);
3993 3850 } else {
3994 3851 hashno++;
3995 3852 }
3996 3853 }
3997 3854
3998 3855 sfmmu_hblks_list_purge(&list, 0);
3999 3856 }
4000 3857
4001 3858 void
4002 3859 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4003 3860 hat_region_cookie_t rcookie)
4004 3861 {
4005 3862 sf_srd_t *srdp;
4006 3863 sf_region_t *rgnp;
4007 3864 int ttesz;
4008 3865 uint_t rid;
4009 3866 caddr_t eaddr;
4010 3867 caddr_t va;
4011 3868 int hmeshift;
4012 3869 hmeblk_tag hblktag;
4013 3870 struct hmehash_bucket *hmebp;
4014 3871 struct hme_blk *hmeblkp;
4015 3872 struct hme_blk *pr_hblk;
4016 3873 struct hme_blk *list;
4017 3874
4018 3875 if (rcookie == HAT_INVALID_REGION_COOKIE) {
4019 3876 hat_unlock(sfmmup, addr, len);
4020 3877 return;
4021 3878 }
4022 3879
4023 3880 ASSERT(sfmmup != NULL);
4024 3881 ASSERT(sfmmup != ksfmmup);
4025 3882
4026 3883 srdp = sfmmup->sfmmu_srdp;
4027 3884 rid = (uint_t)((uint64_t)rcookie);
4028 3885 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4029 3886 eaddr = addr + len;
4030 3887 va = addr;
4031 3888 list = NULL;
4032 3889 rgnp = srdp->srd_hmergnp[rid];
4033 3890 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4034 3891
4035 3892 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4036 3893 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4037 3894 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4038 3895 ttesz = HBLK_MIN_TTESZ;
4039 3896 } else {
4040 3897 ttesz = rgnp->rgn_pgszc;
4041 3898 }
4042 3899 while (va < eaddr) {
4043 3900 while (ttesz < rgnp->rgn_pgszc &&
4044 3901 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4045 3902 ttesz++;
4046 3903 }
4047 3904 while (ttesz >= HBLK_MIN_TTESZ) {
4048 3905 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4049 3906 ttesz--;
4050 3907 continue;
4051 3908 }
4052 3909 hmeshift = HME_HASH_SHIFT(ttesz);
4053 3910 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4054 3911 hblktag.htag_rehash = ttesz;
4055 3912 hblktag.htag_rid = rid;
4056 3913 hblktag.htag_id = srdp;
4057 3914 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4058 3915 SFMMU_HASH_LOCK(hmebp);
4059 3916 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4060 3917 &list);
4061 3918 if (hmeblkp == NULL) {
4062 3919 SFMMU_HASH_UNLOCK(hmebp);
4063 3920 ttesz--;
4064 3921 continue;
4065 3922 }
4066 3923 ASSERT(hmeblkp->hblk_shared);
4067 3924 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4068 3925 ASSERT(va >= eaddr ||
4069 3926 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4070 3927 SFMMU_HASH_UNLOCK(hmebp);
4071 3928 break;
4072 3929 }
4073 3930 if (ttesz < HBLK_MIN_TTESZ) {
4074 3931 panic("hat_unlock_region: addr not found "
4075 3932 "addr %p hat %p", (void *)va, (void *)sfmmup);
4076 3933 }
4077 3934 }
4078 3935 sfmmu_hblks_list_purge(&list, 0);
4079 3936 }
4080 3937
4081 3938 /*
4082 3939 * Function to unlock a range of addresses in an hmeblk. It returns the
4083 3940 * next address that needs to be unlocked.
4084 3941 * Should be called with the hash lock held.
4085 3942 */
4086 3943 static caddr_t
4087 3944 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4088 3945 {
4089 3946 struct sf_hment *sfhme;
4090 3947 tte_t tteold, ttemod;
4091 3948 int ttesz, ret;
4092 3949
4093 3950 ASSERT(in_hblk_range(hmeblkp, addr));
4094 3951 ASSERT(hmeblkp->hblk_shw_bit == 0);
4095 3952
4096 3953 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4097 3954 ttesz = get_hblk_ttesz(hmeblkp);
4098 3955
4099 3956 HBLKTOHME(sfhme, hmeblkp, addr);
4100 3957 while (addr < endaddr) {
4101 3958 readtte:
4102 3959 sfmmu_copytte(&sfhme->hme_tte, &tteold);
4103 3960 if (TTE_IS_VALID(&tteold)) {
4104 3961
4105 3962 ttemod = tteold;
4106 3963
4107 3964 ret = sfmmu_modifytte_try(&tteold, &ttemod,
4108 3965 &sfhme->hme_tte);
4109 3966
4110 3967 if (ret < 0)
4111 3968 goto readtte;
4112 3969
4113 3970 if (hmeblkp->hblk_lckcnt == 0)
4114 3971 panic("zero hblk lckcnt");
4115 3972
4116 3973 if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4117 3974 (uintptr_t)endaddr)
4118 3975 panic("can't unlock large tte");
4119 3976
4120 3977 ASSERT(hmeblkp->hblk_lckcnt > 0);
4121 3978 atomic_dec_32(&hmeblkp->hblk_lckcnt);
4122 3979 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4123 3980 } else {
4124 3981 panic("sfmmu_hblk_unlock: invalid tte");
4125 3982 }
4126 3983 addr += TTEBYTES(ttesz);
4127 3984 sfhme++;
4128 3985 }
4129 3986 return (addr);
4130 3987 }
4131 3988
4132 3989 /*
4133 3990 * Physical Address Mapping Framework
4134 3991 *
4135 3992 * General rules:
4136 3993 *
4137 3994 * (1) Applies only to seg_kmem memory pages. To make things easier,
4138 3995 * seg_kpm addresses are also accepted by the routines, but nothing
4139 3996 * is done with them since by definition their PA mappings are static.
4140 3997 * (2) hat_add_callback() may only be called while holding the page lock
4141 3998 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4142 3999 * or passing HAC_PAGELOCK flag.
4143 4000 * (3) prehandler() and posthandler() may not call hat_add_callback() or
4144 4001 * hat_delete_callback(), nor should they allocate memory. Post quiesce
4145 4002 * callbacks may not sleep or acquire adaptive mutex locks.
4146 4003 * (4) Either prehandler() or posthandler() (but not both) may be specified
4147 4004 * as being NULL. Specifying an errhandler() is optional.
4148 4005 *
4149 4006 * Details of using the framework:
4150 4007 *
4151 4008 * registering a callback (hat_register_callback())
4152 4009 *
4153 4010 * Pass prehandler, posthandler, errhandler addresses
4154 4011 * as described below. If capture_cpus argument is nonzero,
4155 4012 * suspend callback to the prehandler will occur with CPUs
4156 4013 * captured and executing xc_loop() and CPUs will remain
4157 4014 * captured until after the posthandler suspend callback
4158 4015 * occurs.
4159 4016 *
4160 4017 * adding a callback (hat_add_callback())
4161 4018 *
4162 4019 * as_pagelock();
4163 4020 * hat_add_callback();
4164 4021 * save returned pfn in private data structures or program registers;
4165 4022 * as_pageunlock();
4166 4023 *
4167 4024 * prehandler()
4168 4025 *
4169 4026 * Stop all accesses by physical address to this memory page.
4170 4027 * Called twice: the first, PRESUSPEND, is a context safe to acquire
4171 4028 * adaptive locks. The second, SUSPEND, is called at high PIL with
4172 4029 * CPUs captured so adaptive locks may NOT be acquired (and all spin
4173 4030 * locks must be XCALL_PIL or higher locks).
4174 4031 *
4175 4032 * May return the following errors:
4176 4033 * EIO: A fatal error has occurred. This will result in panic.
4177 4034 * EAGAIN: The page cannot be suspended. This will fail the
4178 4035 * relocation.
4179 4036 * 0: Success.
4180 4037 *
4181 4038 * posthandler()
4182 4039 *
4183 4040 * Save new pfn in private data structures or program registers;
4184 4041 * not allowed to fail (non-zero return values will result in panic).
4185 4042 *
4186 4043 * errhandler()
4187 4044 *
4188 4045 * called when an error occurs related to the callback. Currently
4189 4046 * the only such error is HAT_CB_ERR_LEAKED which indicates that
4190 4047 * a page is being freed, but there are still outstanding callback(s)
4191 4048 * registered on the page.
4192 4049 *
4193 4050 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4194 4051 *
4195 4052 * stop using physical address
4196 4053 * hat_delete_callback();
4197 4054 *
4198 4055 */
4199 4056
4200 4057 /*
4201 4058 * Register a callback class. Each subsystem should do this once and
4202 4059 * cache the id_t returned for use in setting up and tearing down callbacks.
4203 4060 *
4204 4061 * There is no facility for removing callback IDs once they are created;
4205 4062 * the "key" should be unique for each module, so in case a module is unloaded
4206 4063 * and subsequently re-loaded, we can recycle the module's previous entry.
4207 4064 */
4208 4065 id_t
4209 4066 hat_register_callback(int key,
4210 4067 int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4211 4068 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4212 4069 int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4213 4070 int capture_cpus)
4214 4071 {
4215 4072 id_t id;
4216 4073
4217 4074 /*
4218 4075 * Search the table for a pre-existing callback associated with
4219 4076 * the identifier "key". If one exists, we re-use that entry in
4220 4077 * the table for this instance, otherwise we assign the next
4221 4078 * available table slot.
4222 4079 */
4223 4080 for (id = 0; id < sfmmu_max_cb_id; id++) {
4224 4081 if (sfmmu_cb_table[id].key == key)
4225 4082 break;
4226 4083 }
4227 4084
4228 4085 if (id == sfmmu_max_cb_id) {
4229 4086 id = sfmmu_cb_nextid++;
4230 4087 if (id >= sfmmu_max_cb_id)
4231 4088 panic("hat_register_callback: out of callback IDs");
4232 4089 }
4233 4090
4234 4091 ASSERT(prehandler != NULL || posthandler != NULL);
4235 4092
4236 4093 sfmmu_cb_table[id].key = key;
4237 4094 sfmmu_cb_table[id].prehandler = prehandler;
4238 4095 sfmmu_cb_table[id].posthandler = posthandler;
4239 4096 sfmmu_cb_table[id].errhandler = errhandler;
4240 4097 sfmmu_cb_table[id].capture_cpus = capture_cpus;
4241 4098
4242 4099 return (id);
4243 4100 }
4244 4101
4245 4102 #define HAC_COOKIE_NONE (void *)-1
4246 4103
4247 4104 /*
4248 4105 * Add relocation callbacks to the specified addr/len which will be called
4249 4106 * when relocating the associated page. See the description of pre and
4250 4107 * posthandler above for more details.
4251 4108 *
4252 4109 * If HAC_PAGELOCK is included in flags, the underlying memory page is
4253 4110 * locked internally so the caller must be able to deal with the callback
4254 4111 * running even before this function has returned. If HAC_PAGELOCK is not
4255 4112 * set, it is assumed that the underlying memory pages are locked.
4256 4113 *
4257 4114 * Since the caller must track the individual page boundaries anyway,
4258 4115 * we only allow a callback to be added to a single page (large
4259 4116 * or small). Thus [addr, addr + len) MUST be contained within a single
4260 4117 * page.
4261 4118 *
4262 4119 * Registering multiple callbacks on the same [addr, addr+len) is supported,
4263 4120 * _provided_that_ a unique parameter is specified for each callback.
4264 4121 * If multiple callbacks are registered on the same range the callback will
4265 4122 * be invoked with each unique parameter. Registering the same callback with
4266 4123 * the same argument more than once will result in corrupted kernel state.
4267 4124 *
4268 4125 * Returns the pfn of the underlying kernel page in *rpfn
4269 4126 * on success, or PFN_INVALID on failure.
4270 4127 *
4271 4128 * cookiep (if passed) provides storage space for an opaque cookie
4272 4129 * to return later to hat_delete_callback(). This cookie makes the callback
4273 4130 * deletion significantly quicker by avoiding a potentially lengthy hash
4274 4131 * search.
4275 4132 *
4276 4133 * Returns values:
4277 4134 * 0: success
4278 4135 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4279 4136 * EINVAL: callback ID is not valid
4280 4137 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4281 4138 * space
4282 4139 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4283 4140 */
4284 4141 int
4285 4142 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4286 4143 void *pvt, pfn_t *rpfn, void **cookiep)
4287 4144 {
4288 4145 struct hmehash_bucket *hmebp;
4289 4146 hmeblk_tag hblktag;
4290 4147 struct hme_blk *hmeblkp;
4291 4148 int hmeshift, hashno;
4292 4149 caddr_t saddr, eaddr, baseaddr;
4293 4150 struct pa_hment *pahmep;
4294 4151 struct sf_hment *sfhmep, *osfhmep;
4295 4152 kmutex_t *pml;
4296 4153 tte_t tte;
4297 4154 page_t *pp;
4298 4155 vnode_t *vp;
4299 4156 u_offset_t off;
4300 4157 pfn_t pfn;
4301 4158 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4302 4159 int locked = 0;
4303 4160
4304 4161 /*
4305 4162 * For KPM mappings, just return the physical address since we
4306 4163 * don't need to register any callbacks.
4307 4164 */
4308 4165 if (IS_KPM_ADDR(vaddr)) {
4309 4166 uint64_t paddr;
4310 4167 SFMMU_KPM_VTOP(vaddr, paddr);
4311 4168 *rpfn = btop(paddr);
4312 4169 if (cookiep != NULL)
4313 4170 *cookiep = HAC_COOKIE_NONE;
4314 4171 return (0);
4315 4172 }
4316 4173
4317 4174 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4318 4175 *rpfn = PFN_INVALID;
4319 4176 return (EINVAL);
4320 4177 }
4321 4178
4322 4179 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4323 4180 *rpfn = PFN_INVALID;
4324 4181 return (ENOMEM);
4325 4182 }
4326 4183
4327 4184 sfhmep = &pahmep->sfment;
4328 4185
4329 4186 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4330 4187 eaddr = saddr + len;
4331 4188
4332 4189 rehash:
4333 4190 /* Find the mapping(s) for this page */
4334 4191 for (hashno = TTE64K, hmeblkp = NULL;
4335 4192 hmeblkp == NULL && hashno <= mmu_hashcnt;
4336 4193 hashno++) {
4337 4194 hmeshift = HME_HASH_SHIFT(hashno);
4338 4195 hblktag.htag_id = ksfmmup;
4339 4196 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4340 4197 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4341 4198 hblktag.htag_rehash = hashno;
4342 4199 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4343 4200
4344 4201 SFMMU_HASH_LOCK(hmebp);
4345 4202
4346 4203 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4347 4204
4348 4205 if (hmeblkp == NULL)
4349 4206 SFMMU_HASH_UNLOCK(hmebp);
4350 4207 }
4351 4208
4352 4209 if (hmeblkp == NULL) {
4353 4210 kmem_cache_free(pa_hment_cache, pahmep);
4354 4211 *rpfn = PFN_INVALID;
4355 4212 return (ENXIO);
4356 4213 }
4357 4214
4358 4215 ASSERT(!hmeblkp->hblk_shared);
4359 4216
4360 4217 HBLKTOHME(osfhmep, hmeblkp, saddr);
4361 4218 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4362 4219
4363 4220 if (!TTE_IS_VALID(&tte)) {
4364 4221 SFMMU_HASH_UNLOCK(hmebp);
4365 4222 kmem_cache_free(pa_hment_cache, pahmep);
4366 4223 *rpfn = PFN_INVALID;
4367 4224 return (ENXIO);
4368 4225 }
4369 4226
4370 4227 /*
4371 4228 * Make sure the boundaries for the callback fall within this
4372 4229 * single mapping.
4373 4230 */
4374 4231 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4375 4232 ASSERT(saddr >= baseaddr);
4376 4233 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4377 4234 SFMMU_HASH_UNLOCK(hmebp);
4378 4235 kmem_cache_free(pa_hment_cache, pahmep);
4379 4236 *rpfn = PFN_INVALID;
4380 4237 return (ERANGE);
4381 4238 }
4382 4239
4383 4240 pfn = sfmmu_ttetopfn(&tte, vaddr);
4384 4241
4385 4242 /*
4386 4243 * The pfn may not have a page_t underneath in which case we
4387 4244 * just return it. This can happen if we are doing I/O to a
4388 4245 * static portion of the kernel's address space, for instance.
4389 4246 */
4390 4247 pp = osfhmep->hme_page;
4391 4248 if (pp == NULL) {
4392 4249 SFMMU_HASH_UNLOCK(hmebp);
4393 4250 kmem_cache_free(pa_hment_cache, pahmep);
4394 4251 *rpfn = pfn;
4395 4252 if (cookiep)
4396 4253 *cookiep = HAC_COOKIE_NONE;
4397 4254 return (0);
4398 4255 }
4399 4256 ASSERT(pp == PP_PAGEROOT(pp));
4400 4257
4401 4258 vp = pp->p_vnode;
4402 4259 off = pp->p_offset;
4403 4260
4404 4261 pml = sfmmu_mlist_enter(pp);
4405 4262
4406 4263 if (flags & HAC_PAGELOCK) {
4407 4264 if (!page_trylock(pp, SE_SHARED)) {
4408 4265 /*
4409 4266 * Somebody is holding SE_EXCL lock. Might
4410 4267 * even be hat_page_relocate(). Drop all
4411 4268 * our locks, lookup the page in &kvp, and
4412 4269 * retry. If it doesn't exist in &kvp and &zvp,
4413 4270 * then we must be dealing with a kernel mapped
4414 4271 * page which doesn't actually belong to
4415 4272 * segkmem so we punt.
4416 4273 */
4417 4274 sfmmu_mlist_exit(pml);
4418 4275 SFMMU_HASH_UNLOCK(hmebp);
4419 4276 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4420 4277
4421 4278 /* check zvp before giving up */
4422 4279 if (pp == NULL)
4423 4280 pp = page_lookup(&zvp, (u_offset_t)saddr,
4424 4281 SE_SHARED);
4425 4282
4426 4283 /* Okay, we didn't find it, give up */
4427 4284 if (pp == NULL) {
4428 4285 kmem_cache_free(pa_hment_cache, pahmep);
4429 4286 *rpfn = pfn;
4430 4287 if (cookiep)
4431 4288 *cookiep = HAC_COOKIE_NONE;
4432 4289 return (0);
4433 4290 }
4434 4291 page_unlock(pp);
4435 4292 goto rehash;
4436 4293 }
4437 4294 locked = 1;
4438 4295 }
4439 4296
4440 4297 if (!PAGE_LOCKED(pp) && !panicstr)
4441 4298 panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4442 4299
4443 4300 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4444 4301 pp->p_offset != off) {
4445 4302 /*
4446 4303 * The page moved before we got our hands on it. Drop
4447 4304 * all the locks and try again.
4448 4305 */
4449 4306 ASSERT((flags & HAC_PAGELOCK) != 0);
4450 4307 sfmmu_mlist_exit(pml);
4451 4308 SFMMU_HASH_UNLOCK(hmebp);
4452 4309 page_unlock(pp);
4453 4310 locked = 0;
4454 4311 goto rehash;
4455 4312 }
4456 4313
4457 4314 if (!VN_ISKAS(vp)) {
4458 4315 /*
4459 4316 * This is not a segkmem page but another page which
4460 4317 * has been kernel mapped. It had better have at least
4461 4318 * a share lock on it. Return the pfn.
4462 4319 */
4463 4320 sfmmu_mlist_exit(pml);
4464 4321 SFMMU_HASH_UNLOCK(hmebp);
4465 4322 if (locked)
4466 4323 page_unlock(pp);
4467 4324 kmem_cache_free(pa_hment_cache, pahmep);
4468 4325 ASSERT(PAGE_LOCKED(pp));
4469 4326 *rpfn = pfn;
4470 4327 if (cookiep)
4471 4328 *cookiep = HAC_COOKIE_NONE;
4472 4329 return (0);
4473 4330 }
4474 4331
4475 4332 /*
4476 4333 * Setup this pa_hment and link its embedded dummy sf_hment into
4477 4334 * the mapping list.
4478 4335 */
4479 4336 pp->p_share++;
4480 4337 pahmep->cb_id = callback_id;
4481 4338 pahmep->addr = vaddr;
4482 4339 pahmep->len = len;
4483 4340 pahmep->refcnt = 1;
4484 4341 pahmep->flags = 0;
4485 4342 pahmep->pvt = pvt;
4486 4343
4487 4344 sfhmep->hme_tte.ll = 0;
4488 4345 sfhmep->hme_data = pahmep;
4489 4346 sfhmep->hme_prev = osfhmep;
4490 4347 sfhmep->hme_next = osfhmep->hme_next;
4491 4348
4492 4349 if (osfhmep->hme_next)
4493 4350 osfhmep->hme_next->hme_prev = sfhmep;
4494 4351
4495 4352 osfhmep->hme_next = sfhmep;
4496 4353
4497 4354 sfmmu_mlist_exit(pml);
4498 4355 SFMMU_HASH_UNLOCK(hmebp);
4499 4356
4500 4357 if (locked)
4501 4358 page_unlock(pp);
4502 4359
4503 4360 *rpfn = pfn;
4504 4361 if (cookiep)
4505 4362 *cookiep = (void *)pahmep;
4506 4363
4507 4364 return (0);
4508 4365 }
4509 4366
4510 4367 /*
4511 4368 * Remove the relocation callbacks from the specified addr/len.
4512 4369 */
4513 4370 void
4514 4371 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4515 4372 void *cookie)
4516 4373 {
4517 4374 struct hmehash_bucket *hmebp;
4518 4375 hmeblk_tag hblktag;
4519 4376 struct hme_blk *hmeblkp;
4520 4377 int hmeshift, hashno;
4521 4378 caddr_t saddr;
4522 4379 struct pa_hment *pahmep;
4523 4380 struct sf_hment *sfhmep, *osfhmep;
4524 4381 kmutex_t *pml;
4525 4382 tte_t tte;
4526 4383 page_t *pp;
4527 4384 vnode_t *vp;
4528 4385 u_offset_t off;
4529 4386 int locked = 0;
4530 4387
4531 4388 /*
4532 4389 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4533 4390 * remove so just return.
4534 4391 */
4535 4392 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4536 4393 return;
4537 4394
4538 4395 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4539 4396
4540 4397 rehash:
4541 4398 /* Find the mapping(s) for this page */
4542 4399 for (hashno = TTE64K, hmeblkp = NULL;
4543 4400 hmeblkp == NULL && hashno <= mmu_hashcnt;
4544 4401 hashno++) {
4545 4402 hmeshift = HME_HASH_SHIFT(hashno);
4546 4403 hblktag.htag_id = ksfmmup;
4547 4404 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4548 4405 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4549 4406 hblktag.htag_rehash = hashno;
4550 4407 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4551 4408
4552 4409 SFMMU_HASH_LOCK(hmebp);
4553 4410
4554 4411 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4555 4412
4556 4413 if (hmeblkp == NULL)
4557 4414 SFMMU_HASH_UNLOCK(hmebp);
4558 4415 }
4559 4416
4560 4417 if (hmeblkp == NULL)
4561 4418 return;
4562 4419
4563 4420 ASSERT(!hmeblkp->hblk_shared);
4564 4421
4565 4422 HBLKTOHME(osfhmep, hmeblkp, saddr);
4566 4423
4567 4424 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4568 4425 if (!TTE_IS_VALID(&tte)) {
4569 4426 SFMMU_HASH_UNLOCK(hmebp);
4570 4427 return;
4571 4428 }
4572 4429
4573 4430 pp = osfhmep->hme_page;
4574 4431 if (pp == NULL) {
4575 4432 SFMMU_HASH_UNLOCK(hmebp);
4576 4433 ASSERT(cookie == NULL);
4577 4434 return;
4578 4435 }
4579 4436
4580 4437 vp = pp->p_vnode;
4581 4438 off = pp->p_offset;
4582 4439
4583 4440 pml = sfmmu_mlist_enter(pp);
4584 4441
4585 4442 if (flags & HAC_PAGELOCK) {
4586 4443 if (!page_trylock(pp, SE_SHARED)) {
4587 4444 /*
4588 4445 * Somebody is holding SE_EXCL lock. Might
4589 4446 * even be hat_page_relocate(). Drop all
4590 4447 * our locks, lookup the page in &kvp, and
4591 4448 * retry. If it doesn't exist in &kvp and &zvp,
4592 4449 * then we must be dealing with a kernel mapped
4593 4450 * page which doesn't actually belong to
4594 4451 * segkmem so we punt.
4595 4452 */
4596 4453 sfmmu_mlist_exit(pml);
4597 4454 SFMMU_HASH_UNLOCK(hmebp);
4598 4455 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4599 4456 /* check zvp before giving up */
4600 4457 if (pp == NULL)
4601 4458 pp = page_lookup(&zvp, (u_offset_t)saddr,
4602 4459 SE_SHARED);
4603 4460
4604 4461 if (pp == NULL) {
4605 4462 ASSERT(cookie == NULL);
4606 4463 return;
4607 4464 }
4608 4465 page_unlock(pp);
4609 4466 goto rehash;
4610 4467 }
4611 4468 locked = 1;
4612 4469 }
4613 4470
4614 4471 ASSERT(PAGE_LOCKED(pp));
4615 4472
4616 4473 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4617 4474 pp->p_offset != off) {
4618 4475 /*
4619 4476 * The page moved before we got our hands on it. Drop
4620 4477 * all the locks and try again.
4621 4478 */
4622 4479 ASSERT((flags & HAC_PAGELOCK) != 0);
4623 4480 sfmmu_mlist_exit(pml);
4624 4481 SFMMU_HASH_UNLOCK(hmebp);
4625 4482 page_unlock(pp);
4626 4483 locked = 0;
4627 4484 goto rehash;
4628 4485 }
4629 4486
4630 4487 if (!VN_ISKAS(vp)) {
4631 4488 /*
4632 4489 * This is not a segkmem page but another page which
4633 4490 * has been kernel mapped.
4634 4491 */
4635 4492 sfmmu_mlist_exit(pml);
4636 4493 SFMMU_HASH_UNLOCK(hmebp);
4637 4494 if (locked)
4638 4495 page_unlock(pp);
4639 4496 ASSERT(cookie == NULL);
4640 4497 return;
4641 4498 }
4642 4499
4643 4500 if (cookie != NULL) {
4644 4501 pahmep = (struct pa_hment *)cookie;
4645 4502 sfhmep = &pahmep->sfment;
4646 4503 } else {
4647 4504 for (sfhmep = pp->p_mapping; sfhmep != NULL;
4648 4505 sfhmep = sfhmep->hme_next) {
4649 4506
4650 4507 /*
4651 4508 * skip va<->pa mappings
4652 4509 */
4653 4510 if (!IS_PAHME(sfhmep))
4654 4511 continue;
4655 4512
4656 4513 pahmep = sfhmep->hme_data;
4657 4514 ASSERT(pahmep != NULL);
4658 4515
4659 4516 /*
4660 4517 * if pa_hment matches, remove it
4661 4518 */
4662 4519 if ((pahmep->pvt == pvt) &&
4663 4520 (pahmep->addr == vaddr) &&
4664 4521 (pahmep->len == len)) {
4665 4522 break;
4666 4523 }
4667 4524 }
4668 4525 }
4669 4526
4670 4527 if (sfhmep == NULL) {
4671 4528 if (!panicstr) {
4672 4529 panic("hat_delete_callback: pa_hment not found, pp %p",
4673 4530 (void *)pp);
4674 4531 }
4675 4532 return;
4676 4533 }
4677 4534
4678 4535 /*
4679 4536 * Note: at this point a valid kernel mapping must still be
4680 4537 * present on this page.
4681 4538 */
4682 4539 pp->p_share--;
4683 4540 if (pp->p_share <= 0)
4684 4541 panic("hat_delete_callback: zero p_share");
4685 4542
4686 4543 if (--pahmep->refcnt == 0) {
4687 4544 if (pahmep->flags != 0)
4688 4545 panic("hat_delete_callback: pa_hment is busy");
4689 4546
4690 4547 /*
4691 4548 * Remove sfhmep from the mapping list for the page.
4692 4549 */
4693 4550 if (sfhmep->hme_prev) {
4694 4551 sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4695 4552 } else {
4696 4553 pp->p_mapping = sfhmep->hme_next;
4697 4554 }
4698 4555
4699 4556 if (sfhmep->hme_next)
4700 4557 sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4701 4558
4702 4559 sfmmu_mlist_exit(pml);
4703 4560 SFMMU_HASH_UNLOCK(hmebp);
4704 4561
4705 4562 if (locked)
4706 4563 page_unlock(pp);
4707 4564
4708 4565 kmem_cache_free(pa_hment_cache, pahmep);
4709 4566 return;
4710 4567 }
4711 4568
4712 4569 sfmmu_mlist_exit(pml);
4713 4570 SFMMU_HASH_UNLOCK(hmebp);
4714 4571 if (locked)
4715 4572 page_unlock(pp);
4716 4573 }
4717 4574
4718 4575 /*
4719 4576 * hat_probe returns 1 if the translation for the address 'addr' is
4720 4577 * loaded, zero otherwise.
4721 4578 *
4722 4579 * hat_probe should be used only for advisorary purposes because it may
4723 4580 * occasionally return the wrong value. The implementation must guarantee that
4724 4581 * returning the wrong value is a very rare event. hat_probe is used
4725 4582 * to implement optimizations in the segment drivers.
4726 4583 *
4727 4584 */
4728 4585 int
4729 4586 hat_probe(struct hat *sfmmup, caddr_t addr)
4730 4587 {
4731 4588 pfn_t pfn;
4732 4589 tte_t tte;
4733 4590
4734 4591 ASSERT(sfmmup != NULL);
4735 4592
4736 4593 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4737 4594
4738 4595 if (sfmmup == ksfmmup) {
4739 4596 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4740 4597 == PFN_SUSPENDED) {
4741 4598 sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4742 4599 }
4743 4600 } else {
4744 4601 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4745 4602 }
4746 4603
4747 4604 if (pfn != PFN_INVALID)
4748 4605 return (1);
4749 4606 else
4750 4607 return (0);
4751 4608 }
4752 4609
4753 4610 ssize_t
4754 4611 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4755 4612 {
4756 4613 tte_t tte;
4757 4614
4758 4615 if (sfmmup == ksfmmup) {
4759 4616 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4760 4617 return (-1);
4761 4618 }
4762 4619 } else {
4763 4620 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4764 4621 return (-1);
4765 4622 }
4766 4623 }
4767 4624
4768 4625 ASSERT(TTE_IS_VALID(&tte));
4769 4626 return (TTEBYTES(TTE_CSZ(&tte)));
4770 4627 }
4771 4628
4772 4629 uint_t
4773 4630 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4774 4631 {
4775 4632 tte_t tte;
4776 4633
4777 4634 if (sfmmup == ksfmmup) {
4778 4635 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4779 4636 tte.ll = 0;
4780 4637 }
4781 4638 } else {
4782 4639 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4783 4640 tte.ll = 0;
4784 4641 }
4785 4642 }
4786 4643 if (TTE_IS_VALID(&tte)) {
4787 4644 *attr = sfmmu_ptov_attr(&tte);
4788 4645 return (0);
4789 4646 }
4790 4647 *attr = 0;
4791 4648 return ((uint_t)0xffffffff);
4792 4649 }
4793 4650
4794 4651 /*
4795 4652 * Enables more attributes on specified address range (ie. logical OR)
4796 4653 */
4797 4654 void
4798 4655 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4799 4656 {
4800 4657 ASSERT(hat->sfmmu_as != NULL);
4801 4658
4802 4659 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4803 4660 }
4804 4661
4805 4662 /*
4806 4663 * Assigns attributes to the specified address range. All the attributes
4807 4664 * are specified.
4808 4665 */
4809 4666 void
4810 4667 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4811 4668 {
4812 4669 ASSERT(hat->sfmmu_as != NULL);
4813 4670
4814 4671 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4815 4672 }
4816 4673
4817 4674 /*
4818 4675 * Remove attributes on the specified address range (ie. loginal NAND)
4819 4676 */
4820 4677 void
4821 4678 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4822 4679 {
4823 4680 ASSERT(hat->sfmmu_as != NULL);
4824 4681
4825 4682 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4826 4683 }
4827 4684
4828 4685 /*
4829 4686 * Change attributes on an address range to that specified by attr and mode.
4830 4687 */
4831 4688 static void
4832 4689 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4833 4690 int mode)
4834 4691 {
4835 4692 struct hmehash_bucket *hmebp;
4836 4693 hmeblk_tag hblktag;
4837 4694 int hmeshift, hashno = 1;
4838 4695 struct hme_blk *hmeblkp, *list = NULL;
4839 4696 caddr_t endaddr;
4840 4697 cpuset_t cpuset;
4841 4698 demap_range_t dmr;
4842 4699
4843 4700 CPUSET_ZERO(cpuset);
4844 4701
4845 4702 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4846 4703 ASSERT((len & MMU_PAGEOFFSET) == 0);
4847 4704 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4848 4705
4849 4706 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4850 4707 ((addr + len) > (caddr_t)USERLIMIT)) {
4851 4708 panic("user addr %p in kernel space",
4852 4709 (void *)addr);
4853 4710 }
4854 4711
4855 4712 endaddr = addr + len;
4856 4713 hblktag.htag_id = sfmmup;
4857 4714 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4858 4715 DEMAP_RANGE_INIT(sfmmup, &dmr);
4859 4716
4860 4717 while (addr < endaddr) {
4861 4718 hmeshift = HME_HASH_SHIFT(hashno);
4862 4719 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4863 4720 hblktag.htag_rehash = hashno;
4864 4721 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4865 4722
4866 4723 SFMMU_HASH_LOCK(hmebp);
4867 4724
4868 4725 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4869 4726 if (hmeblkp != NULL) {
4870 4727 ASSERT(!hmeblkp->hblk_shared);
4871 4728 /*
4872 4729 * We've encountered a shadow hmeblk so skip the range
4873 4730 * of the next smaller mapping size.
4874 4731 */
4875 4732 if (hmeblkp->hblk_shw_bit) {
4876 4733 ASSERT(sfmmup != ksfmmup);
4877 4734 ASSERT(hashno > 1);
4878 4735 addr = (caddr_t)P2END((uintptr_t)addr,
4879 4736 TTEBYTES(hashno - 1));
4880 4737 } else {
4881 4738 addr = sfmmu_hblk_chgattr(sfmmup,
4882 4739 hmeblkp, addr, endaddr, &dmr, attr, mode);
4883 4740 }
4884 4741 SFMMU_HASH_UNLOCK(hmebp);
4885 4742 hashno = 1;
4886 4743 continue;
4887 4744 }
4888 4745 SFMMU_HASH_UNLOCK(hmebp);
4889 4746
4890 4747 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4891 4748 /*
4892 4749 * We have traversed the whole list and rehashed
4893 4750 * if necessary without finding the address to chgattr.
4894 4751 * This is ok, so we increment the address by the
4895 4752 * smallest hmeblk range for kernel mappings or for
4896 4753 * user mappings with no large pages, and the largest
4897 4754 * hmeblk range, to account for shadow hmeblks, for
4898 4755 * user mappings with large pages and continue.
4899 4756 */
4900 4757 if (sfmmup == ksfmmup)
4901 4758 addr = (caddr_t)P2END((uintptr_t)addr,
4902 4759 TTEBYTES(1));
4903 4760 else
4904 4761 addr = (caddr_t)P2END((uintptr_t)addr,
4905 4762 TTEBYTES(hashno));
4906 4763 hashno = 1;
4907 4764 } else {
4908 4765 hashno++;
4909 4766 }
4910 4767 }
4911 4768
4912 4769 sfmmu_hblks_list_purge(&list, 0);
4913 4770 DEMAP_RANGE_FLUSH(&dmr);
4914 4771 cpuset = sfmmup->sfmmu_cpusran;
4915 4772 xt_sync(cpuset);
4916 4773 }
4917 4774
4918 4775 /*
4919 4776 * This function chgattr on a range of addresses in an hmeblk. It returns the
4920 4777 * next addres that needs to be chgattr.
4921 4778 * It should be called with the hash lock held.
4922 4779 * XXX It should be possible to optimize chgattr by not flushing every time but
4923 4780 * on the other hand:
4924 4781 * 1. do one flush crosscall.
4925 4782 * 2. only flush if we are increasing permissions (make sure this will work)
4926 4783 */
4927 4784 static caddr_t
4928 4785 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4929 4786 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4930 4787 {
4931 4788 tte_t tte, tteattr, tteflags, ttemod;
4932 4789 struct sf_hment *sfhmep;
4933 4790 int ttesz;
4934 4791 struct page *pp = NULL;
4935 4792 kmutex_t *pml, *pmtx;
4936 4793 int ret;
4937 4794 int use_demap_range;
4938 4795 #if defined(SF_ERRATA_57)
4939 4796 int check_exec;
4940 4797 #endif
4941 4798
4942 4799 ASSERT(in_hblk_range(hmeblkp, addr));
4943 4800 ASSERT(hmeblkp->hblk_shw_bit == 0);
4944 4801 ASSERT(!hmeblkp->hblk_shared);
4945 4802
4946 4803 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4947 4804 ttesz = get_hblk_ttesz(hmeblkp);
4948 4805
4949 4806 /*
4950 4807 * Flush the current demap region if addresses have been
4951 4808 * skipped or the page size doesn't match.
4952 4809 */
4953 4810 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
4954 4811 if (use_demap_range) {
4955 4812 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4956 4813 } else if (dmrp != NULL) {
4957 4814 DEMAP_RANGE_FLUSH(dmrp);
4958 4815 }
4959 4816
4960 4817 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
4961 4818 #if defined(SF_ERRATA_57)
4962 4819 check_exec = (sfmmup != ksfmmup) &&
4963 4820 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4964 4821 TTE_IS_EXECUTABLE(&tteattr);
4965 4822 #endif
4966 4823 HBLKTOHME(sfhmep, hmeblkp, addr);
4967 4824 while (addr < endaddr) {
4968 4825 sfmmu_copytte(&sfhmep->hme_tte, &tte);
4969 4826 if (TTE_IS_VALID(&tte)) {
4970 4827 if ((tte.ll & tteflags.ll) == tteattr.ll) {
4971 4828 /*
4972 4829 * if the new attr is the same as old
4973 4830 * continue
4974 4831 */
4975 4832 goto next_addr;
4976 4833 }
4977 4834 if (!TTE_IS_WRITABLE(&tteattr)) {
4978 4835 /*
4979 4836 * make sure we clear hw modify bit if we
4980 4837 * removing write protections
4981 4838 */
4982 4839 tteflags.tte_intlo |= TTE_HWWR_INT;
4983 4840 }
4984 4841
4985 4842 pml = NULL;
4986 4843 pp = sfhmep->hme_page;
4987 4844 if (pp) {
4988 4845 pml = sfmmu_mlist_enter(pp);
4989 4846 }
4990 4847
4991 4848 if (pp != sfhmep->hme_page) {
4992 4849 /*
4993 4850 * tte must have been unloaded.
4994 4851 */
4995 4852 ASSERT(pml);
4996 4853 sfmmu_mlist_exit(pml);
4997 4854 continue;
4998 4855 }
4999 4856
5000 4857 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5001 4858
5002 4859 ttemod = tte;
5003 4860 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5004 4861 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5005 4862
5006 4863 #if defined(SF_ERRATA_57)
5007 4864 if (check_exec && addr < errata57_limit)
5008 4865 ttemod.tte_exec_perm = 0;
5009 4866 #endif
5010 4867 ret = sfmmu_modifytte_try(&tte, &ttemod,
5011 4868 &sfhmep->hme_tte);
5012 4869
5013 4870 if (ret < 0) {
5014 4871 /* tte changed underneath us */
5015 4872 if (pml) {
5016 4873 sfmmu_mlist_exit(pml);
5017 4874 }
5018 4875 continue;
5019 4876 }
5020 4877
5021 4878 if (tteflags.tte_intlo & TTE_HWWR_INT) {
5022 4879 /*
5023 4880 * need to sync if we are clearing modify bit.
5024 4881 */
5025 4882 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5026 4883 }
5027 4884
5028 4885 if (pp && PP_ISRO(pp)) {
5029 4886 if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5030 4887 pmtx = sfmmu_page_enter(pp);
5031 4888 PP_CLRRO(pp);
5032 4889 sfmmu_page_exit(pmtx);
5033 4890 }
5034 4891 }
5035 4892
5036 4893 if (ret > 0 && use_demap_range) {
5037 4894 DEMAP_RANGE_MARKPG(dmrp, addr);
5038 4895 } else if (ret > 0) {
5039 4896 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5040 4897 }
5041 4898
5042 4899 if (pml) {
5043 4900 sfmmu_mlist_exit(pml);
5044 4901 }
5045 4902 }
5046 4903 next_addr:
5047 4904 addr += TTEBYTES(ttesz);
5048 4905 sfhmep++;
5049 4906 DEMAP_RANGE_NEXTPG(dmrp);
5050 4907 }
5051 4908 return (addr);
5052 4909 }
5053 4910
5054 4911 /*
5055 4912 * This routine converts virtual attributes to physical ones. It will
5056 4913 * update the tteflags field with the tte mask corresponding to the attributes
5057 4914 * affected and it returns the new attributes. It will also clear the modify
5058 4915 * bit if we are taking away write permission. This is necessary since the
5059 4916 * modify bit is the hardware permission bit and we need to clear it in order
5060 4917 * to detect write faults.
5061 4918 */
5062 4919 static uint64_t
5063 4920 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5064 4921 {
5065 4922 tte_t ttevalue;
5066 4923
5067 4924 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5068 4925
5069 4926 switch (mode) {
5070 4927 case SFMMU_CHGATTR:
5071 4928 /* all attributes specified */
5072 4929 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5073 4930 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5074 4931 ttemaskp->tte_inthi = TTEINTHI_ATTR;
5075 4932 ttemaskp->tte_intlo = TTEINTLO_ATTR;
5076 4933 break;
5077 4934 case SFMMU_SETATTR:
5078 4935 ASSERT(!(attr & ~HAT_PROT_MASK));
5079 4936 ttemaskp->ll = 0;
5080 4937 ttevalue.ll = 0;
5081 4938 /*
5082 4939 * a valid tte implies exec and read for sfmmu
5083 4940 * so no need to do anything about them.
5084 4941 * since priviledged access implies user access
5085 4942 * PROT_USER doesn't make sense either.
5086 4943 */
5087 4944 if (attr & PROT_WRITE) {
5088 4945 ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5089 4946 ttevalue.tte_intlo |= TTE_WRPRM_INT;
5090 4947 }
5091 4948 break;
5092 4949 case SFMMU_CLRATTR:
5093 4950 /* attributes will be nand with current ones */
5094 4951 if (attr & ~(PROT_WRITE | PROT_USER)) {
5095 4952 panic("sfmmu: attr %x not supported", attr);
5096 4953 }
5097 4954 ttemaskp->ll = 0;
5098 4955 ttevalue.ll = 0;
5099 4956 if (attr & PROT_WRITE) {
5100 4957 /* clear both writable and modify bit */
5101 4958 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5102 4959 }
5103 4960 if (attr & PROT_USER) {
5104 4961 ttemaskp->tte_intlo |= TTE_PRIV_INT;
5105 4962 ttevalue.tte_intlo |= TTE_PRIV_INT;
5106 4963 }
5107 4964 break;
5108 4965 default:
5109 4966 panic("sfmmu_vtop_attr: bad mode %x", mode);
5110 4967 }
5111 4968 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5112 4969 return (ttevalue.ll);
5113 4970 }
5114 4971
5115 4972 static uint_t
5116 4973 sfmmu_ptov_attr(tte_t *ttep)
5117 4974 {
5118 4975 uint_t attr;
5119 4976
5120 4977 ASSERT(TTE_IS_VALID(ttep));
5121 4978
5122 4979 attr = PROT_READ;
5123 4980
5124 4981 if (TTE_IS_WRITABLE(ttep)) {
5125 4982 attr |= PROT_WRITE;
5126 4983 }
5127 4984 if (TTE_IS_EXECUTABLE(ttep)) {
5128 4985 attr |= PROT_EXEC;
5129 4986 }
5130 4987 if (!TTE_IS_PRIVILEGED(ttep)) {
5131 4988 attr |= PROT_USER;
5132 4989 }
5133 4990 if (TTE_IS_NFO(ttep)) {
5134 4991 attr |= HAT_NOFAULT;
5135 4992 }
5136 4993 if (TTE_IS_NOSYNC(ttep)) {
5137 4994 attr |= HAT_NOSYNC;
5138 4995 }
5139 4996 if (TTE_IS_SIDEFFECT(ttep)) {
5140 4997 attr |= SFMMU_SIDEFFECT;
5141 4998 }
5142 4999 if (!TTE_IS_VCACHEABLE(ttep)) {
5143 5000 attr |= SFMMU_UNCACHEVTTE;
5144 5001 }
5145 5002 if (!TTE_IS_PCACHEABLE(ttep)) {
5146 5003 attr |= SFMMU_UNCACHEPTTE;
5147 5004 }
5148 5005 return (attr);
5149 5006 }
5150 5007
5151 5008 /*
5152 5009 * hat_chgprot is a deprecated hat call. New segment drivers
5153 5010 * should store all attributes and use hat_*attr calls.
5154 5011 *
5155 5012 * Change the protections in the virtual address range
5156 5013 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
5157 5014 * then remove write permission, leaving the other
5158 5015 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions.
5159 5016 *
5160 5017 */
5161 5018 void
5162 5019 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5163 5020 {
5164 5021 struct hmehash_bucket *hmebp;
5165 5022 hmeblk_tag hblktag;
5166 5023 int hmeshift, hashno = 1;
5167 5024 struct hme_blk *hmeblkp, *list = NULL;
5168 5025 caddr_t endaddr;
5169 5026 cpuset_t cpuset;
5170 5027 demap_range_t dmr;
5171 5028
5172 5029 ASSERT((len & MMU_PAGEOFFSET) == 0);
5173 5030 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5174 5031
5175 5032 ASSERT(sfmmup->sfmmu_as != NULL);
5176 5033
5177 5034 CPUSET_ZERO(cpuset);
5178 5035
5179 5036 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5180 5037 ((addr + len) > (caddr_t)USERLIMIT)) {
5181 5038 panic("user addr %p vprot %x in kernel space",
5182 5039 (void *)addr, vprot);
5183 5040 }
5184 5041 endaddr = addr + len;
5185 5042 hblktag.htag_id = sfmmup;
5186 5043 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5187 5044 DEMAP_RANGE_INIT(sfmmup, &dmr);
5188 5045
5189 5046 while (addr < endaddr) {
5190 5047 hmeshift = HME_HASH_SHIFT(hashno);
5191 5048 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5192 5049 hblktag.htag_rehash = hashno;
5193 5050 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5194 5051
5195 5052 SFMMU_HASH_LOCK(hmebp);
5196 5053
5197 5054 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5198 5055 if (hmeblkp != NULL) {
5199 5056 ASSERT(!hmeblkp->hblk_shared);
5200 5057 /*
5201 5058 * We've encountered a shadow hmeblk so skip the range
5202 5059 * of the next smaller mapping size.
5203 5060 */
5204 5061 if (hmeblkp->hblk_shw_bit) {
5205 5062 ASSERT(sfmmup != ksfmmup);
5206 5063 ASSERT(hashno > 1);
5207 5064 addr = (caddr_t)P2END((uintptr_t)addr,
5208 5065 TTEBYTES(hashno - 1));
5209 5066 } else {
5210 5067 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5211 5068 addr, endaddr, &dmr, vprot);
5212 5069 }
5213 5070 SFMMU_HASH_UNLOCK(hmebp);
5214 5071 hashno = 1;
5215 5072 continue;
5216 5073 }
5217 5074 SFMMU_HASH_UNLOCK(hmebp);
5218 5075
5219 5076 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5220 5077 /*
5221 5078 * We have traversed the whole list and rehashed
5222 5079 * if necessary without finding the address to chgprot.
5223 5080 * This is ok so we increment the address by the
5224 5081 * smallest hmeblk range for kernel mappings and the
5225 5082 * largest hmeblk range, to account for shadow hmeblks,
5226 5083 * for user mappings and continue.
5227 5084 */
5228 5085 if (sfmmup == ksfmmup)
5229 5086 addr = (caddr_t)P2END((uintptr_t)addr,
5230 5087 TTEBYTES(1));
5231 5088 else
5232 5089 addr = (caddr_t)P2END((uintptr_t)addr,
5233 5090 TTEBYTES(hashno));
5234 5091 hashno = 1;
5235 5092 } else {
5236 5093 hashno++;
5237 5094 }
5238 5095 }
5239 5096
5240 5097 sfmmu_hblks_list_purge(&list, 0);
5241 5098 DEMAP_RANGE_FLUSH(&dmr);
5242 5099 cpuset = sfmmup->sfmmu_cpusran;
5243 5100 xt_sync(cpuset);
5244 5101 }
5245 5102
5246 5103 /*
5247 5104 * This function chgprots a range of addresses in an hmeblk. It returns the
5248 5105 * next addres that needs to be chgprot.
5249 5106 * It should be called with the hash lock held.
5250 5107 * XXX It shold be possible to optimize chgprot by not flushing every time but
5251 5108 * on the other hand:
5252 5109 * 1. do one flush crosscall.
5253 5110 * 2. only flush if we are increasing permissions (make sure this will work)
5254 5111 */
5255 5112 static caddr_t
5256 5113 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5257 5114 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5258 5115 {
5259 5116 uint_t pprot;
5260 5117 tte_t tte, ttemod;
5261 5118 struct sf_hment *sfhmep;
5262 5119 uint_t tteflags;
5263 5120 int ttesz;
5264 5121 struct page *pp = NULL;
5265 5122 kmutex_t *pml, *pmtx;
5266 5123 int ret;
5267 5124 int use_demap_range;
5268 5125 #if defined(SF_ERRATA_57)
5269 5126 int check_exec;
5270 5127 #endif
5271 5128
5272 5129 ASSERT(in_hblk_range(hmeblkp, addr));
5273 5130 ASSERT(hmeblkp->hblk_shw_bit == 0);
5274 5131 ASSERT(!hmeblkp->hblk_shared);
5275 5132
5276 5133 #ifdef DEBUG
5277 5134 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5278 5135 (endaddr < get_hblk_endaddr(hmeblkp))) {
5279 5136 panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5280 5137 }
5281 5138 #endif /* DEBUG */
5282 5139
5283 5140 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5284 5141 ttesz = get_hblk_ttesz(hmeblkp);
5285 5142
5286 5143 pprot = sfmmu_vtop_prot(vprot, &tteflags);
5287 5144 #if defined(SF_ERRATA_57)
5288 5145 check_exec = (sfmmup != ksfmmup) &&
5289 5146 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5290 5147 ((vprot & PROT_EXEC) == PROT_EXEC);
5291 5148 #endif
5292 5149 HBLKTOHME(sfhmep, hmeblkp, addr);
5293 5150
5294 5151 /*
5295 5152 * Flush the current demap region if addresses have been
5296 5153 * skipped or the page size doesn't match.
5297 5154 */
5298 5155 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5299 5156 if (use_demap_range) {
5300 5157 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5301 5158 } else if (dmrp != NULL) {
5302 5159 DEMAP_RANGE_FLUSH(dmrp);
5303 5160 }
5304 5161
5305 5162 while (addr < endaddr) {
5306 5163 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5307 5164 if (TTE_IS_VALID(&tte)) {
5308 5165 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5309 5166 /*
5310 5167 * if the new protection is the same as old
5311 5168 * continue
5312 5169 */
5313 5170 goto next_addr;
5314 5171 }
5315 5172 pml = NULL;
5316 5173 pp = sfhmep->hme_page;
5317 5174 if (pp) {
5318 5175 pml = sfmmu_mlist_enter(pp);
5319 5176 }
5320 5177 if (pp != sfhmep->hme_page) {
5321 5178 /*
5322 5179 * tte most have been unloaded
5323 5180 * underneath us. Recheck
5324 5181 */
5325 5182 ASSERT(pml);
5326 5183 sfmmu_mlist_exit(pml);
5327 5184 continue;
5328 5185 }
5329 5186
5330 5187 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5331 5188
5332 5189 ttemod = tte;
5333 5190 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5334 5191 #if defined(SF_ERRATA_57)
5335 5192 if (check_exec && addr < errata57_limit)
5336 5193 ttemod.tte_exec_perm = 0;
5337 5194 #endif
5338 5195 ret = sfmmu_modifytte_try(&tte, &ttemod,
5339 5196 &sfhmep->hme_tte);
5340 5197
5341 5198 if (ret < 0) {
5342 5199 /* tte changed underneath us */
5343 5200 if (pml) {
5344 5201 sfmmu_mlist_exit(pml);
5345 5202 }
5346 5203 continue;
5347 5204 }
5348 5205
5349 5206 if (tteflags & TTE_HWWR_INT) {
5350 5207 /*
5351 5208 * need to sync if we are clearing modify bit.
5352 5209 */
5353 5210 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5354 5211 }
5355 5212
5356 5213 if (pp && PP_ISRO(pp)) {
5357 5214 if (pprot & TTE_WRPRM_INT) {
5358 5215 pmtx = sfmmu_page_enter(pp);
5359 5216 PP_CLRRO(pp);
5360 5217 sfmmu_page_exit(pmtx);
5361 5218 }
5362 5219 }
5363 5220
5364 5221 if (ret > 0 && use_demap_range) {
5365 5222 DEMAP_RANGE_MARKPG(dmrp, addr);
5366 5223 } else if (ret > 0) {
5367 5224 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5368 5225 }
5369 5226
5370 5227 if (pml) {
5371 5228 sfmmu_mlist_exit(pml);
5372 5229 }
5373 5230 }
5374 5231 next_addr:
5375 5232 addr += TTEBYTES(ttesz);
5376 5233 sfhmep++;
5377 5234 DEMAP_RANGE_NEXTPG(dmrp);
5378 5235 }
5379 5236 return (addr);
5380 5237 }
5381 5238
5382 5239 /*
5383 5240 * This routine is deprecated and should only be used by hat_chgprot.
5384 5241 * The correct routine is sfmmu_vtop_attr.
5385 5242 * This routine converts virtual page protections to physical ones. It will
5386 5243 * update the tteflags field with the tte mask corresponding to the protections
5387 5244 * affected and it returns the new protections. It will also clear the modify
5388 5245 * bit if we are taking away write permission. This is necessary since the
5389 5246 * modify bit is the hardware permission bit and we need to clear it in order
5390 5247 * to detect write faults.
5391 5248 * It accepts the following special protections:
5392 5249 * ~PROT_WRITE = remove write permissions.
5393 5250 * ~PROT_USER = remove user permissions.
5394 5251 */
5395 5252 static uint_t
5396 5253 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5397 5254 {
5398 5255 if (vprot == (uint_t)~PROT_WRITE) {
5399 5256 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5400 5257 return (0); /* will cause wrprm to be cleared */
5401 5258 }
5402 5259 if (vprot == (uint_t)~PROT_USER) {
5403 5260 *tteflagsp = TTE_PRIV_INT;
5404 5261 return (0); /* will cause privprm to be cleared */
5405 5262 }
5406 5263 if ((vprot == 0) || (vprot == PROT_USER) ||
5407 5264 ((vprot & PROT_ALL) != vprot)) {
5408 5265 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5409 5266 }
5410 5267
5411 5268 switch (vprot) {
5412 5269 case (PROT_READ):
5413 5270 case (PROT_EXEC):
5414 5271 case (PROT_EXEC | PROT_READ):
5415 5272 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5416 5273 return (TTE_PRIV_INT); /* set prv and clr wrt */
5417 5274 case (PROT_WRITE):
5418 5275 case (PROT_WRITE | PROT_READ):
5419 5276 case (PROT_EXEC | PROT_WRITE):
5420 5277 case (PROT_EXEC | PROT_WRITE | PROT_READ):
5421 5278 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5422 5279 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */
5423 5280 case (PROT_USER | PROT_READ):
5424 5281 case (PROT_USER | PROT_EXEC):
5425 5282 case (PROT_USER | PROT_EXEC | PROT_READ):
5426 5283 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5427 5284 return (0); /* clr prv and wrt */
5428 5285 case (PROT_USER | PROT_WRITE):
5429 5286 case (PROT_USER | PROT_WRITE | PROT_READ):
5430 5287 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5431 5288 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5432 5289 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5433 5290 return (TTE_WRPRM_INT); /* clr prv and set wrt */
5434 5291 default:
5435 5292 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5436 5293 }
5437 5294 return (0);
5438 5295 }
5439 5296
5440 5297 /*
5441 5298 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5442 5299 * the normal algorithm would take too long for a very large VA range with
5443 5300 * few real mappings. This routine just walks thru all HMEs in the global
5444 5301 * hash table to find and remove mappings.
5445 5302 */
5446 5303 static void
5447 5304 hat_unload_large_virtual(
5448 5305 struct hat *sfmmup,
5449 5306 caddr_t startaddr,
5450 5307 size_t len,
5451 5308 uint_t flags,
5452 5309 hat_callback_t *callback)
5453 5310 {
5454 5311 struct hmehash_bucket *hmebp;
5455 5312 struct hme_blk *hmeblkp;
5456 5313 struct hme_blk *pr_hblk = NULL;
5457 5314 struct hme_blk *nx_hblk;
5458 5315 struct hme_blk *list = NULL;
5459 5316 int i;
5460 5317 demap_range_t dmr, *dmrp;
5461 5318 cpuset_t cpuset;
5462 5319 caddr_t endaddr = startaddr + len;
5463 5320 caddr_t sa;
5464 5321 caddr_t ea;
5465 5322 caddr_t cb_sa[MAX_CB_ADDR];
5466 5323 caddr_t cb_ea[MAX_CB_ADDR];
5467 5324 int addr_cnt = 0;
5468 5325 int a = 0;
5469 5326
5470 5327 if (sfmmup->sfmmu_free) {
5471 5328 dmrp = NULL;
5472 5329 } else {
5473 5330 dmrp = &dmr;
5474 5331 DEMAP_RANGE_INIT(sfmmup, dmrp);
5475 5332 }
5476 5333
5477 5334 /*
5478 5335 * Loop through all the hash buckets of HME blocks looking for matches.
5479 5336 */
5480 5337 for (i = 0; i <= UHMEHASH_SZ; i++) {
5481 5338 hmebp = &uhme_hash[i];
5482 5339 SFMMU_HASH_LOCK(hmebp);
5483 5340 hmeblkp = hmebp->hmeblkp;
5484 5341 pr_hblk = NULL;
5485 5342 while (hmeblkp) {
5486 5343 nx_hblk = hmeblkp->hblk_next;
5487 5344
5488 5345 /*
5489 5346 * skip if not this context, if a shadow block or
5490 5347 * if the mapping is not in the requested range
5491 5348 */
5492 5349 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5493 5350 hmeblkp->hblk_shw_bit ||
5494 5351 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5495 5352 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5496 5353 pr_hblk = hmeblkp;
5497 5354 goto next_block;
5498 5355 }
5499 5356
5500 5357 ASSERT(!hmeblkp->hblk_shared);
5501 5358 /*
5502 5359 * unload if there are any current valid mappings
5503 5360 */
5504 5361 if (hmeblkp->hblk_vcnt != 0 ||
5505 5362 hmeblkp->hblk_hmecnt != 0)
5506 5363 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5507 5364 sa, ea, dmrp, flags);
5508 5365
5509 5366 /*
5510 5367 * on unmap we also release the HME block itself, once
5511 5368 * all mappings are gone.
5512 5369 */
5513 5370 if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5514 5371 !hmeblkp->hblk_vcnt &&
5515 5372 !hmeblkp->hblk_hmecnt) {
5516 5373 ASSERT(!hmeblkp->hblk_lckcnt);
5517 5374 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5518 5375 &list, 0);
5519 5376 } else {
5520 5377 pr_hblk = hmeblkp;
5521 5378 }
5522 5379
5523 5380 if (callback == NULL)
5524 5381 goto next_block;
5525 5382
5526 5383 /*
5527 5384 * HME blocks may span more than one page, but we may be
5528 5385 * unmapping only one page, so check for a smaller range
5529 5386 * for the callback
5530 5387 */
5531 5388 if (sa < startaddr)
5532 5389 sa = startaddr;
5533 5390 if (--ea > endaddr)
5534 5391 ea = endaddr - 1;
5535 5392
5536 5393 cb_sa[addr_cnt] = sa;
5537 5394 cb_ea[addr_cnt] = ea;
5538 5395 if (++addr_cnt == MAX_CB_ADDR) {
5539 5396 if (dmrp != NULL) {
5540 5397 DEMAP_RANGE_FLUSH(dmrp);
5541 5398 cpuset = sfmmup->sfmmu_cpusran;
5542 5399 xt_sync(cpuset);
5543 5400 }
5544 5401
5545 5402 for (a = 0; a < MAX_CB_ADDR; ++a) {
5546 5403 callback->hcb_start_addr = cb_sa[a];
5547 5404 callback->hcb_end_addr = cb_ea[a];
5548 5405 callback->hcb_function(callback);
5549 5406 }
5550 5407 addr_cnt = 0;
5551 5408 }
5552 5409
5553 5410 next_block:
5554 5411 hmeblkp = nx_hblk;
5555 5412 }
5556 5413 SFMMU_HASH_UNLOCK(hmebp);
5557 5414 }
5558 5415
5559 5416 sfmmu_hblks_list_purge(&list, 0);
5560 5417 if (dmrp != NULL) {
5561 5418 DEMAP_RANGE_FLUSH(dmrp);
5562 5419 cpuset = sfmmup->sfmmu_cpusran;
5563 5420 xt_sync(cpuset);
5564 5421 }
5565 5422
5566 5423 for (a = 0; a < addr_cnt; ++a) {
5567 5424 callback->hcb_start_addr = cb_sa[a];
5568 5425 callback->hcb_end_addr = cb_ea[a];
5569 5426 callback->hcb_function(callback);
5570 5427 }
5571 5428
5572 5429 /*
5573 5430 * Check TSB and TLB page sizes if the process isn't exiting.
5574 5431 */
5575 5432 if (!sfmmup->sfmmu_free)
5576 5433 sfmmu_check_page_sizes(sfmmup, 0);
5577 5434 }
5578 5435
5579 5436 /*
5580 5437 * Unload all the mappings in the range [addr..addr+len). addr and len must
5581 5438 * be MMU_PAGESIZE aligned.
5582 5439 */
5583 5440
5584 5441 extern struct seg *segkmap;
5585 5442 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5586 5443 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5587 5444
5588 5445
5589 5446 void
5590 5447 hat_unload_callback(
5591 5448 struct hat *sfmmup,
5592 5449 caddr_t addr,
5593 5450 size_t len,
5594 5451 uint_t flags,
5595 5452 hat_callback_t *callback)
5596 5453 {
5597 5454 struct hmehash_bucket *hmebp;
5598 5455 hmeblk_tag hblktag;
5599 5456 int hmeshift, hashno, iskernel;
5600 5457 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5601 5458 caddr_t endaddr;
5602 5459 cpuset_t cpuset;
5603 5460 int addr_count = 0;
5604 5461 int a;
5605 5462 caddr_t cb_start_addr[MAX_CB_ADDR];
5606 5463 caddr_t cb_end_addr[MAX_CB_ADDR];
5607 5464 int issegkmap = ISSEGKMAP(sfmmup, addr);
5608 5465 demap_range_t dmr, *dmrp;
5609 5466
5610 5467 ASSERT(sfmmup->sfmmu_as != NULL);
5611 5468
5612 5469 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5613 5470 AS_LOCK_HELD(sfmmup->sfmmu_as));
5614 5471
5615 5472 ASSERT(sfmmup != NULL);
5616 5473 ASSERT((len & MMU_PAGEOFFSET) == 0);
5617 5474 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5618 5475
5619 5476 /*
5620 5477 * Probing through a large VA range (say 63 bits) will be slow, even
5621 5478 * at 4 Meg steps between the probes. So, when the virtual address range
5622 5479 * is very large, search the HME entries for what to unload.
5623 5480 *
5624 5481 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5625 5482 *
5626 5483 * UHMEHASH_SZ is number of hash buckets to examine
5627 5484 *
5628 5485 */
5629 5486 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5630 5487 hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5631 5488 return;
5632 5489 }
5633 5490
5634 5491 CPUSET_ZERO(cpuset);
5635 5492
5636 5493 /*
5637 5494 * If the process is exiting, we can save a lot of fuss since
5638 5495 * we'll flush the TLB when we free the ctx anyway.
5639 5496 */
5640 5497 if (sfmmup->sfmmu_free) {
5641 5498 dmrp = NULL;
5642 5499 } else {
5643 5500 dmrp = &dmr;
5644 5501 DEMAP_RANGE_INIT(sfmmup, dmrp);
5645 5502 }
5646 5503
5647 5504 endaddr = addr + len;
5648 5505 hblktag.htag_id = sfmmup;
5649 5506 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5650 5507
5651 5508 /*
5652 5509 * It is likely for the vm to call unload over a wide range of
5653 5510 * addresses that are actually very sparsely populated by
5654 5511 * translations. In order to speed this up the sfmmu hat supports
5655 5512 * the concept of shadow hmeblks. Dummy large page hmeblks that
5656 5513 * correspond to actual small translations are allocated at tteload
5657 5514 * time and are referred to as shadow hmeblks. Now, during unload
5658 5515 * time, we first check if we have a shadow hmeblk for that
5659 5516 * translation. The absence of one means the corresponding address
5660 5517 * range is empty and can be skipped.
5661 5518 *
5662 5519 * The kernel is an exception to above statement and that is why
5663 5520 * we don't use shadow hmeblks and hash starting from the smallest
5664 5521 * page size.
5665 5522 */
5666 5523 if (sfmmup == KHATID) {
5667 5524 iskernel = 1;
5668 5525 hashno = TTE64K;
5669 5526 } else {
5670 5527 iskernel = 0;
5671 5528 if (mmu_page_sizes == max_mmu_page_sizes) {
5672 5529 hashno = TTE256M;
5673 5530 } else {
5674 5531 hashno = TTE4M;
5675 5532 }
5676 5533 }
5677 5534 while (addr < endaddr) {
5678 5535 hmeshift = HME_HASH_SHIFT(hashno);
5679 5536 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5680 5537 hblktag.htag_rehash = hashno;
5681 5538 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5682 5539
5683 5540 SFMMU_HASH_LOCK(hmebp);
5684 5541
5685 5542 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5686 5543 if (hmeblkp == NULL) {
5687 5544 /*
5688 5545 * didn't find an hmeblk. skip the appropiate
5689 5546 * address range.
5690 5547 */
5691 5548 SFMMU_HASH_UNLOCK(hmebp);
5692 5549 if (iskernel) {
5693 5550 if (hashno < mmu_hashcnt) {
5694 5551 hashno++;
5695 5552 continue;
5696 5553 } else {
5697 5554 hashno = TTE64K;
5698 5555 addr = (caddr_t)roundup((uintptr_t)addr
5699 5556 + 1, MMU_PAGESIZE64K);
5700 5557 continue;
5701 5558 }
5702 5559 }
5703 5560 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5704 5561 (1 << hmeshift));
5705 5562 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5706 5563 ASSERT(hashno == TTE64K);
5707 5564 continue;
5708 5565 }
5709 5566 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5710 5567 hashno = TTE512K;
5711 5568 continue;
5712 5569 }
5713 5570 if (mmu_page_sizes == max_mmu_page_sizes) {
5714 5571 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5715 5572 hashno = TTE4M;
5716 5573 continue;
5717 5574 }
5718 5575 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5719 5576 hashno = TTE32M;
5720 5577 continue;
5721 5578 }
5722 5579 hashno = TTE256M;
5723 5580 continue;
5724 5581 } else {
5725 5582 hashno = TTE4M;
5726 5583 continue;
5727 5584 }
5728 5585 }
5729 5586 ASSERT(hmeblkp);
5730 5587 ASSERT(!hmeblkp->hblk_shared);
5731 5588 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5732 5589 /*
5733 5590 * If the valid count is zero we can skip the range
5734 5591 * mapped by this hmeblk.
5735 5592 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP
5736 5593 * is used by segment drivers as a hint
5737 5594 * that the mapping resource won't be used any longer.
5738 5595 * The best example of this is during exit().
5739 5596 */
5740 5597 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5741 5598 get_hblk_span(hmeblkp));
5742 5599 if ((flags & HAT_UNLOAD_UNMAP) ||
5743 5600 (iskernel && !issegkmap)) {
5744 5601 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5745 5602 &list, 0);
5746 5603 }
5747 5604 SFMMU_HASH_UNLOCK(hmebp);
5748 5605
5749 5606 if (iskernel) {
5750 5607 hashno = TTE64K;
5751 5608 continue;
5752 5609 }
5753 5610 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5754 5611 ASSERT(hashno == TTE64K);
5755 5612 continue;
5756 5613 }
5757 5614 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5758 5615 hashno = TTE512K;
5759 5616 continue;
5760 5617 }
5761 5618 if (mmu_page_sizes == max_mmu_page_sizes) {
5762 5619 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5763 5620 hashno = TTE4M;
5764 5621 continue;
5765 5622 }
5766 5623 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5767 5624 hashno = TTE32M;
5768 5625 continue;
5769 5626 }
5770 5627 hashno = TTE256M;
5771 5628 continue;
5772 5629 } else {
5773 5630 hashno = TTE4M;
5774 5631 continue;
5775 5632 }
5776 5633 }
5777 5634 if (hmeblkp->hblk_shw_bit) {
5778 5635 /*
5779 5636 * If we encounter a shadow hmeblk we know there is
5780 5637 * smaller sized hmeblks mapping the same address space.
5781 5638 * Decrement the hash size and rehash.
5782 5639 */
5783 5640 ASSERT(sfmmup != KHATID);
5784 5641 hashno--;
5785 5642 SFMMU_HASH_UNLOCK(hmebp);
5786 5643 continue;
5787 5644 }
5788 5645
5789 5646 /*
5790 5647 * track callback address ranges.
5791 5648 * only start a new range when it's not contiguous
5792 5649 */
5793 5650 if (callback != NULL) {
5794 5651 if (addr_count > 0 &&
5795 5652 addr == cb_end_addr[addr_count - 1])
5796 5653 --addr_count;
5797 5654 else
5798 5655 cb_start_addr[addr_count] = addr;
5799 5656 }
5800 5657
5801 5658 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5802 5659 dmrp, flags);
5803 5660
5804 5661 if (callback != NULL)
5805 5662 cb_end_addr[addr_count++] = addr;
5806 5663
5807 5664 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5808 5665 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5809 5666 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5810 5667 }
5811 5668 SFMMU_HASH_UNLOCK(hmebp);
5812 5669
5813 5670 /*
5814 5671 * Notify our caller as to exactly which pages
5815 5672 * have been unloaded. We do these in clumps,
5816 5673 * to minimize the number of xt_sync()s that need to occur.
5817 5674 */
5818 5675 if (callback != NULL && addr_count == MAX_CB_ADDR) {
5819 5676 if (dmrp != NULL) {
5820 5677 DEMAP_RANGE_FLUSH(dmrp);
5821 5678 cpuset = sfmmup->sfmmu_cpusran;
5822 5679 xt_sync(cpuset);
5823 5680 }
5824 5681
5825 5682 for (a = 0; a < MAX_CB_ADDR; ++a) {
5826 5683 callback->hcb_start_addr = cb_start_addr[a];
5827 5684 callback->hcb_end_addr = cb_end_addr[a];
5828 5685 callback->hcb_function(callback);
5829 5686 }
5830 5687 addr_count = 0;
5831 5688 }
5832 5689 if (iskernel) {
5833 5690 hashno = TTE64K;
5834 5691 continue;
5835 5692 }
5836 5693 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5837 5694 ASSERT(hashno == TTE64K);
5838 5695 continue;
5839 5696 }
5840 5697 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5841 5698 hashno = TTE512K;
5842 5699 continue;
5843 5700 }
5844 5701 if (mmu_page_sizes == max_mmu_page_sizes) {
5845 5702 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5846 5703 hashno = TTE4M;
5847 5704 continue;
5848 5705 }
5849 5706 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5850 5707 hashno = TTE32M;
5851 5708 continue;
5852 5709 }
5853 5710 hashno = TTE256M;
5854 5711 } else {
5855 5712 hashno = TTE4M;
5856 5713 }
5857 5714 }
5858 5715
5859 5716 sfmmu_hblks_list_purge(&list, 0);
5860 5717 if (dmrp != NULL) {
5861 5718 DEMAP_RANGE_FLUSH(dmrp);
5862 5719 cpuset = sfmmup->sfmmu_cpusran;
5863 5720 xt_sync(cpuset);
5864 5721 }
5865 5722 if (callback && addr_count != 0) {
5866 5723 for (a = 0; a < addr_count; ++a) {
5867 5724 callback->hcb_start_addr = cb_start_addr[a];
5868 5725 callback->hcb_end_addr = cb_end_addr[a];
5869 5726 callback->hcb_function(callback);
5870 5727 }
5871 5728 }
5872 5729
5873 5730 /*
5874 5731 * Check TSB and TLB page sizes if the process isn't exiting.
5875 5732 */
5876 5733 if (!sfmmup->sfmmu_free)
5877 5734 sfmmu_check_page_sizes(sfmmup, 0);
5878 5735 }
5879 5736
5880 5737 /*
5881 5738 * Unload all the mappings in the range [addr..addr+len). addr and len must
5882 5739 * be MMU_PAGESIZE aligned.
5883 5740 */
5884 5741 void
5885 5742 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5886 5743 {
5887 5744 hat_unload_callback(sfmmup, addr, len, flags, NULL);
5888 5745 }
5889 5746
5890 5747
5891 5748 /*
5892 5749 * Find the largest mapping size for this page.
5893 5750 */
5894 5751 int
5895 5752 fnd_mapping_sz(page_t *pp)
5896 5753 {
5897 5754 int sz;
5898 5755 int p_index;
5899 5756
5900 5757 p_index = PP_MAPINDEX(pp);
5901 5758
5902 5759 sz = 0;
5903 5760 p_index >>= 1; /* don't care about 8K bit */
5904 5761 for (; p_index; p_index >>= 1) {
5905 5762 sz++;
5906 5763 }
5907 5764
5908 5765 return (sz);
5909 5766 }
5910 5767
5911 5768 /*
5912 5769 * This function unloads a range of addresses for an hmeblk.
5913 5770 * It returns the next address to be unloaded.
5914 5771 * It should be called with the hash lock held.
5915 5772 */
5916 5773 static caddr_t
5917 5774 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5918 5775 caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5919 5776 {
5920 5777 tte_t tte, ttemod;
5921 5778 struct sf_hment *sfhmep;
5922 5779 int ttesz;
5923 5780 long ttecnt;
5924 5781 page_t *pp;
5925 5782 kmutex_t *pml;
5926 5783 int ret;
5927 5784 int use_demap_range;
5928 5785
5929 5786 ASSERT(in_hblk_range(hmeblkp, addr));
5930 5787 ASSERT(!hmeblkp->hblk_shw_bit);
5931 5788 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
5932 5789 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
5933 5790 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
5934 5791
5935 5792 #ifdef DEBUG
5936 5793 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5937 5794 (endaddr < get_hblk_endaddr(hmeblkp))) {
5938 5795 panic("sfmmu_hblk_unload: partial unload of large page");
5939 5796 }
5940 5797 #endif /* DEBUG */
5941 5798
5942 5799 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5943 5800 ttesz = get_hblk_ttesz(hmeblkp);
5944 5801
5945 5802 use_demap_range = ((dmrp == NULL) ||
5946 5803 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
5947 5804
5948 5805 if (use_demap_range) {
5949 5806 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5950 5807 } else if (dmrp != NULL) {
5951 5808 DEMAP_RANGE_FLUSH(dmrp);
5952 5809 }
5953 5810 ttecnt = 0;
5954 5811 HBLKTOHME(sfhmep, hmeblkp, addr);
5955 5812
5956 5813 while (addr < endaddr) {
5957 5814 pml = NULL;
5958 5815 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5959 5816 if (TTE_IS_VALID(&tte)) {
5960 5817 pp = sfhmep->hme_page;
5961 5818 if (pp != NULL) {
5962 5819 pml = sfmmu_mlist_enter(pp);
5963 5820 }
5964 5821
5965 5822 /*
5966 5823 * Verify if hme still points to 'pp' now that
5967 5824 * we have p_mapping lock.
5968 5825 */
5969 5826 if (sfhmep->hme_page != pp) {
5970 5827 if (pp != NULL && sfhmep->hme_page != NULL) {
5971 5828 ASSERT(pml != NULL);
5972 5829 sfmmu_mlist_exit(pml);
5973 5830 /* Re-start this iteration. */
5974 5831 continue;
5975 5832 }
5976 5833 ASSERT((pp != NULL) &&
5977 5834 (sfhmep->hme_page == NULL));
5978 5835 goto tte_unloaded;
5979 5836 }
5980 5837
5981 5838 /*
5982 5839 * This point on we have both HASH and p_mapping
5983 5840 * lock.
5984 5841 */
5985 5842 ASSERT(pp == sfhmep->hme_page);
5986 5843 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5987 5844
5988 5845 /*
5989 5846 * We need to loop on modify tte because it is
5990 5847 * possible for pagesync to come along and
5991 5848 * change the software bits beneath us.
5992 5849 *
5993 5850 * Page_unload can also invalidate the tte after
5994 5851 * we read tte outside of p_mapping lock.
5995 5852 */
5996 5853 again:
5997 5854 ttemod = tte;
5998 5855
5999 5856 TTE_SET_INVALID(&ttemod);
6000 5857 ret = sfmmu_modifytte_try(&tte, &ttemod,
6001 5858 &sfhmep->hme_tte);
6002 5859
6003 5860 if (ret <= 0) {
6004 5861 if (TTE_IS_VALID(&tte)) {
6005 5862 ASSERT(ret < 0);
6006 5863 goto again;
6007 5864 }
6008 5865 if (pp != NULL) {
6009 5866 panic("sfmmu_hblk_unload: pp = 0x%p "
6010 5867 "tte became invalid under mlist"
6011 5868 " lock = 0x%p", (void *)pp,
6012 5869 (void *)pml);
6013 5870 }
6014 5871 continue;
6015 5872 }
6016 5873
6017 5874 if (!(flags & HAT_UNLOAD_NOSYNC)) {
6018 5875 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6019 5876 }
6020 5877
6021 5878 /*
6022 5879 * Ok- we invalidated the tte. Do the rest of the job.
6023 5880 */
6024 5881 ttecnt++;
6025 5882
6026 5883 if (flags & HAT_UNLOAD_UNLOCK) {
6027 5884 ASSERT(hmeblkp->hblk_lckcnt > 0);
6028 5885 atomic_dec_32(&hmeblkp->hblk_lckcnt);
6029 5886 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6030 5887 }
6031 5888
6032 5889 /*
6033 5890 * Normally we would need to flush the page
6034 5891 * from the virtual cache at this point in
6035 5892 * order to prevent a potential cache alias
6036 5893 * inconsistency.
6037 5894 * The particular scenario we need to worry
6038 5895 * about is:
6039 5896 * Given: va1 and va2 are two virtual address
6040 5897 * that alias and map the same physical
6041 5898 * address.
6042 5899 * 1. mapping exists from va1 to pa and data
6043 5900 * has been read into the cache.
6044 5901 * 2. unload va1.
6045 5902 * 3. load va2 and modify data using va2.
6046 5903 * 4 unload va2.
6047 5904 * 5. load va1 and reference data. Unless we
6048 5905 * flush the data cache when we unload we will
6049 5906 * get stale data.
6050 5907 * Fortunately, page coloring eliminates the
6051 5908 * above scenario by remembering the color a
6052 5909 * physical page was last or is currently
6053 5910 * mapped to. Now, we delay the flush until
6054 5911 * the loading of translations. Only when the
6055 5912 * new translation is of a different color
6056 5913 * are we forced to flush.
6057 5914 */
6058 5915 if (use_demap_range) {
6059 5916 /*
6060 5917 * Mark this page as needing a demap.
6061 5918 */
6062 5919 DEMAP_RANGE_MARKPG(dmrp, addr);
6063 5920 } else {
6064 5921 ASSERT(sfmmup != NULL);
6065 5922 ASSERT(!hmeblkp->hblk_shared);
6066 5923 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6067 5924 sfmmup->sfmmu_free, 0);
6068 5925 }
6069 5926
6070 5927 if (pp) {
6071 5928 /*
6072 5929 * Remove the hment from the mapping list
6073 5930 */
6074 5931 ASSERT(hmeblkp->hblk_hmecnt > 0);
6075 5932
6076 5933 /*
6077 5934 * Again, we cannot
6078 5935 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6079 5936 */
6080 5937 HME_SUB(sfhmep, pp);
6081 5938 membar_stst();
6082 5939 atomic_dec_16(&hmeblkp->hblk_hmecnt);
6083 5940 }
6084 5941
6085 5942 ASSERT(hmeblkp->hblk_vcnt > 0);
6086 5943 atomic_dec_16(&hmeblkp->hblk_vcnt);
6087 5944
6088 5945 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6089 5946 !hmeblkp->hblk_lckcnt);
6090 5947
6091 5948 #ifdef VAC
6092 5949 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6093 5950 if (PP_ISTNC(pp)) {
6094 5951 /*
6095 5952 * If page was temporary
6096 5953 * uncached, try to recache
6097 5954 * it. Note that HME_SUB() was
6098 5955 * called above so p_index and
6099 5956 * mlist had been updated.
6100 5957 */
6101 5958 conv_tnc(pp, ttesz);
6102 5959 } else if (pp->p_mapping == NULL) {
6103 5960 ASSERT(kpm_enable);
6104 5961 /*
6105 5962 * Page is marked to be in VAC conflict
6106 5963 * to an existing kpm mapping and/or is
6107 5964 * kpm mapped using only the regular
6108 5965 * pagesize.
6109 5966 */
6110 5967 sfmmu_kpm_hme_unload(pp);
6111 5968 }
6112 5969 }
6113 5970 #endif /* VAC */
6114 5971 } else if ((pp = sfhmep->hme_page) != NULL) {
6115 5972 /*
6116 5973 * TTE is invalid but the hme
6117 5974 * still exists. let pageunload
6118 5975 * complete its job.
6119 5976 */
6120 5977 ASSERT(pml == NULL);
6121 5978 pml = sfmmu_mlist_enter(pp);
6122 5979 if (sfhmep->hme_page != NULL) {
6123 5980 sfmmu_mlist_exit(pml);
6124 5981 continue;
6125 5982 }
6126 5983 ASSERT(sfhmep->hme_page == NULL);
6127 5984 } else if (hmeblkp->hblk_hmecnt != 0) {
6128 5985 /*
6129 5986 * pageunload may have not finished decrementing
6130 5987 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6131 5988 * wait for pageunload to finish. Rely on pageunload
6132 5989 * to decrement hblk_hmecnt after hblk_vcnt.
6133 5990 */
6134 5991 pfn_t pfn = TTE_TO_TTEPFN(&tte);
6135 5992 ASSERT(pml == NULL);
6136 5993 if (pf_is_memory(pfn)) {
6137 5994 pp = page_numtopp_nolock(pfn);
6138 5995 if (pp != NULL) {
6139 5996 pml = sfmmu_mlist_enter(pp);
6140 5997 sfmmu_mlist_exit(pml);
6141 5998 pml = NULL;
6142 5999 }
6143 6000 }
6144 6001 }
6145 6002
6146 6003 tte_unloaded:
6147 6004 /*
6148 6005 * At this point, the tte we are looking at
6149 6006 * should be unloaded, and hme has been unlinked
6150 6007 * from page too. This is important because in
6151 6008 * pageunload, it does ttesync() then HME_SUB.
6152 6009 * We need to make sure HME_SUB has been completed
6153 6010 * so we know ttesync() has been completed. Otherwise,
6154 6011 * at exit time, after return from hat layer, VM will
6155 6012 * release as structure which hat_setstat() (called
6156 6013 * by ttesync()) needs.
6157 6014 */
6158 6015 #ifdef DEBUG
6159 6016 {
6160 6017 tte_t dtte;
6161 6018
6162 6019 ASSERT(sfhmep->hme_page == NULL);
6163 6020
6164 6021 sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6165 6022 ASSERT(!TTE_IS_VALID(&dtte));
6166 6023 }
6167 6024 #endif
6168 6025
6169 6026 if (pml) {
6170 6027 sfmmu_mlist_exit(pml);
6171 6028 }
6172 6029
6173 6030 addr += TTEBYTES(ttesz);
6174 6031 sfhmep++;
6175 6032 DEMAP_RANGE_NEXTPG(dmrp);
6176 6033 }
6177 6034 /*
6178 6035 * For shared hmeblks this routine is only called when region is freed
6179 6036 * and no longer referenced. So no need to decrement ttecnt
6180 6037 * in the region structure here.
6181 6038 */
6182 6039 if (ttecnt > 0 && sfmmup != NULL) {
6183 6040 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6184 6041 }
6185 6042 return (addr);
6186 6043 }
6187 6044
6188 6045 /*
6189 6046 * Invalidate a virtual address range for the local CPU.
6190 6047 * For best performance ensure that the va range is completely
6191 6048 * mapped, otherwise the entire TLB will be flushed.
6192 6049 */
6193 6050 void
6194 6051 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6195 6052 {
6196 6053 ssize_t sz;
6197 6054 caddr_t endva = va + size;
6198 6055
6199 6056 while (va < endva) {
6200 6057 sz = hat_getpagesize(sfmmup, va);
6201 6058 if (sz < 0) {
6202 6059 vtag_flushall();
6203 6060 break;
6204 6061 }
6205 6062 vtag_flushpage(va, (uint64_t)sfmmup);
6206 6063 va += sz;
6207 6064 }
6208 6065 }
6209 6066
6210 6067 /*
6211 6068 * Synchronize all the mappings in the range [addr..addr+len).
6212 6069 * Can be called with clearflag having two states:
6213 6070 * HAT_SYNC_DONTZERO means just return the rm stats
6214 6071 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6215 6072 */
6216 6073 void
6217 6074 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6218 6075 {
6219 6076 struct hmehash_bucket *hmebp;
6220 6077 hmeblk_tag hblktag;
6221 6078 int hmeshift, hashno = 1;
6222 6079 struct hme_blk *hmeblkp, *list = NULL;
6223 6080 caddr_t endaddr;
6224 6081 cpuset_t cpuset;
6225 6082
6226 6083 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
6227 6084 ASSERT((len & MMU_PAGEOFFSET) == 0);
6228 6085 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6229 6086 (clearflag == HAT_SYNC_ZERORM));
6230 6087
6231 6088 CPUSET_ZERO(cpuset);
6232 6089
6233 6090 endaddr = addr + len;
6234 6091 hblktag.htag_id = sfmmup;
6235 6092 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6236 6093
6237 6094 /*
6238 6095 * Spitfire supports 4 page sizes.
6239 6096 * Most pages are expected to be of the smallest page
6240 6097 * size (8K) and these will not need to be rehashed. 64K
6241 6098 * pages also don't need to be rehashed because the an hmeblk
6242 6099 * spans 64K of address space. 512K pages might need 1 rehash and
6243 6100 * and 4M pages 2 rehashes.
6244 6101 */
6245 6102 while (addr < endaddr) {
6246 6103 hmeshift = HME_HASH_SHIFT(hashno);
6247 6104 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6248 6105 hblktag.htag_rehash = hashno;
6249 6106 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6250 6107
6251 6108 SFMMU_HASH_LOCK(hmebp);
6252 6109
6253 6110 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6254 6111 if (hmeblkp != NULL) {
6255 6112 ASSERT(!hmeblkp->hblk_shared);
6256 6113 /*
6257 6114 * We've encountered a shadow hmeblk so skip the range
6258 6115 * of the next smaller mapping size.
6259 6116 */
6260 6117 if (hmeblkp->hblk_shw_bit) {
6261 6118 ASSERT(sfmmup != ksfmmup);
6262 6119 ASSERT(hashno > 1);
6263 6120 addr = (caddr_t)P2END((uintptr_t)addr,
6264 6121 TTEBYTES(hashno - 1));
6265 6122 } else {
6266 6123 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6267 6124 addr, endaddr, clearflag);
6268 6125 }
6269 6126 SFMMU_HASH_UNLOCK(hmebp);
6270 6127 hashno = 1;
6271 6128 continue;
6272 6129 }
6273 6130 SFMMU_HASH_UNLOCK(hmebp);
6274 6131
6275 6132 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6276 6133 /*
6277 6134 * We have traversed the whole list and rehashed
6278 6135 * if necessary without finding the address to sync.
6279 6136 * This is ok so we increment the address by the
6280 6137 * smallest hmeblk range for kernel mappings and the
6281 6138 * largest hmeblk range, to account for shadow hmeblks,
6282 6139 * for user mappings and continue.
6283 6140 */
6284 6141 if (sfmmup == ksfmmup)
6285 6142 addr = (caddr_t)P2END((uintptr_t)addr,
6286 6143 TTEBYTES(1));
6287 6144 else
6288 6145 addr = (caddr_t)P2END((uintptr_t)addr,
6289 6146 TTEBYTES(hashno));
6290 6147 hashno = 1;
6291 6148 } else {
6292 6149 hashno++;
6293 6150 }
6294 6151 }
6295 6152 sfmmu_hblks_list_purge(&list, 0);
6296 6153 cpuset = sfmmup->sfmmu_cpusran;
6297 6154 xt_sync(cpuset);
6298 6155 }
6299 6156
6300 6157 static caddr_t
6301 6158 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6302 6159 caddr_t endaddr, int clearflag)
6303 6160 {
6304 6161 tte_t tte, ttemod;
6305 6162 struct sf_hment *sfhmep;
6306 6163 int ttesz;
6307 6164 struct page *pp;
6308 6165 kmutex_t *pml;
6309 6166 int ret;
6310 6167
6311 6168 ASSERT(hmeblkp->hblk_shw_bit == 0);
6312 6169 ASSERT(!hmeblkp->hblk_shared);
6313 6170
6314 6171 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6315 6172
6316 6173 ttesz = get_hblk_ttesz(hmeblkp);
6317 6174 HBLKTOHME(sfhmep, hmeblkp, addr);
6318 6175
6319 6176 while (addr < endaddr) {
6320 6177 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6321 6178 if (TTE_IS_VALID(&tte)) {
6322 6179 pml = NULL;
6323 6180 pp = sfhmep->hme_page;
6324 6181 if (pp) {
6325 6182 pml = sfmmu_mlist_enter(pp);
6326 6183 }
6327 6184 if (pp != sfhmep->hme_page) {
6328 6185 /*
6329 6186 * tte most have been unloaded
6330 6187 * underneath us. Recheck
6331 6188 */
6332 6189 ASSERT(pml);
6333 6190 sfmmu_mlist_exit(pml);
6334 6191 continue;
6335 6192 }
6336 6193
6337 6194 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6338 6195
6339 6196 if (clearflag == HAT_SYNC_ZERORM) {
6340 6197 ttemod = tte;
6341 6198 TTE_CLR_RM(&ttemod);
6342 6199 ret = sfmmu_modifytte_try(&tte, &ttemod,
6343 6200 &sfhmep->hme_tte);
6344 6201 if (ret < 0) {
6345 6202 if (pml) {
6346 6203 sfmmu_mlist_exit(pml);
6347 6204 }
6348 6205 continue;
6349 6206 }
6350 6207
6351 6208 if (ret > 0) {
6352 6209 sfmmu_tlb_demap(addr, sfmmup,
6353 6210 hmeblkp, 0, 0);
6354 6211 }
6355 6212 }
6356 6213 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6357 6214 if (pml) {
6358 6215 sfmmu_mlist_exit(pml);
6359 6216 }
6360 6217 }
6361 6218 addr += TTEBYTES(ttesz);
6362 6219 sfhmep++;
6363 6220 }
6364 6221 return (addr);
6365 6222 }
6366 6223
6367 6224 /*
6368 6225 * This function will sync a tte to the page struct and it will
6369 6226 * update the hat stats. Currently it allows us to pass a NULL pp
6370 6227 * and we will simply update the stats. We may want to change this
6371 6228 * so we only keep stats for pages backed by pp's.
6372 6229 */
6373 6230 static void
6374 6231 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6375 6232 {
6376 6233 uint_t rm = 0;
6377 6234 int sz;
6378 6235 pgcnt_t npgs;
6379 6236
6380 6237 ASSERT(TTE_IS_VALID(ttep));
6381 6238
6382 6239 if (TTE_IS_NOSYNC(ttep)) {
6383 6240 return;
6384 6241 }
6385 6242
6386 6243 if (TTE_IS_REF(ttep)) {
6387 6244 rm = P_REF;
6388 6245 }
6389 6246 if (TTE_IS_MOD(ttep)) {
6390 6247 rm |= P_MOD;
6391 6248 }
6392 6249
6393 6250 if (rm == 0) {
6394 6251 return;
6395 6252 }
6396 6253
6397 6254 sz = TTE_CSZ(ttep);
6398 6255 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6399 6256 int i;
6400 6257 caddr_t vaddr = addr;
6401 6258
6402 6259 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6403 6260 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6404 6261 }
6405 6262
6406 6263 }
6407 6264
6408 6265 /*
6409 6266 * XXX I want to use cas to update nrm bits but they
6410 6267 * currently belong in common/vm and not in hat where
6411 6268 * they should be.
6412 6269 * The nrm bits are protected by the same mutex as
6413 6270 * the one that protects the page's mapping list.
6414 6271 */
6415 6272 if (!pp)
6416 6273 return;
6417 6274 ASSERT(sfmmu_mlist_held(pp));
6418 6275 /*
6419 6276 * If the tte is for a large page, we need to sync all the
6420 6277 * pages covered by the tte.
6421 6278 */
6422 6279 if (sz != TTE8K) {
6423 6280 ASSERT(pp->p_szc != 0);
6424 6281 pp = PP_GROUPLEADER(pp, sz);
6425 6282 ASSERT(sfmmu_mlist_held(pp));
6426 6283 }
6427 6284
6428 6285 /* Get number of pages from tte size. */
6429 6286 npgs = TTEPAGES(sz);
6430 6287
6431 6288 do {
6432 6289 ASSERT(pp);
6433 6290 ASSERT(sfmmu_mlist_held(pp));
6434 6291 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6435 6292 ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6436 6293 hat_page_setattr(pp, rm);
6437 6294
6438 6295 /*
6439 6296 * Are we done? If not, we must have a large mapping.
6440 6297 * For large mappings we need to sync the rest of the pages
6441 6298 * covered by this tte; goto the next page.
6442 6299 */
6443 6300 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6444 6301 }
6445 6302
6446 6303 /*
6447 6304 * Execute pre-callback handler of each pa_hment linked to pp
6448 6305 *
6449 6306 * Inputs:
6450 6307 * flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6451 6308 * capture_cpus: pointer to return value (below)
6452 6309 *
6453 6310 * Returns:
6454 6311 * Propagates the subsystem callback return values back to the caller;
6455 6312 * returns 0 on success. If capture_cpus is non-NULL, the value returned
6456 6313 * is zero if all of the pa_hments are of a type that do not require
6457 6314 * capturing CPUs prior to suspending the mapping, else it is 1.
6458 6315 */
6459 6316 static int
6460 6317 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6461 6318 {
6462 6319 struct sf_hment *sfhmep;
6463 6320 struct pa_hment *pahmep;
6464 6321 int (*f)(caddr_t, uint_t, uint_t, void *);
6465 6322 int ret;
6466 6323 id_t id;
6467 6324 int locked = 0;
6468 6325 kmutex_t *pml;
6469 6326
6470 6327 ASSERT(PAGE_EXCL(pp));
6471 6328 if (!sfmmu_mlist_held(pp)) {
6472 6329 pml = sfmmu_mlist_enter(pp);
6473 6330 locked = 1;
6474 6331 }
6475 6332
6476 6333 if (capture_cpus)
6477 6334 *capture_cpus = 0;
6478 6335
6479 6336 top:
6480 6337 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6481 6338 /*
6482 6339 * skip sf_hments corresponding to VA<->PA mappings;
6483 6340 * for pa_hment's, hme_tte.ll is zero
6484 6341 */
6485 6342 if (!IS_PAHME(sfhmep))
6486 6343 continue;
6487 6344
6488 6345 pahmep = sfhmep->hme_data;
6489 6346 ASSERT(pahmep != NULL);
6490 6347
6491 6348 /*
6492 6349 * skip if pre-handler has been called earlier in this loop
6493 6350 */
6494 6351 if (pahmep->flags & flag)
6495 6352 continue;
6496 6353
6497 6354 id = pahmep->cb_id;
6498 6355 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6499 6356 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6500 6357 *capture_cpus = 1;
6501 6358 if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6502 6359 pahmep->flags |= flag;
6503 6360 continue;
6504 6361 }
6505 6362
6506 6363 /*
6507 6364 * Drop the mapping list lock to avoid locking order issues.
6508 6365 */
6509 6366 if (locked)
6510 6367 sfmmu_mlist_exit(pml);
6511 6368
6512 6369 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6513 6370 if (ret != 0)
6514 6371 return (ret); /* caller must do the cleanup */
6515 6372
6516 6373 if (locked) {
6517 6374 pml = sfmmu_mlist_enter(pp);
6518 6375 pahmep->flags |= flag;
6519 6376 goto top;
6520 6377 }
6521 6378
6522 6379 pahmep->flags |= flag;
6523 6380 }
6524 6381
6525 6382 if (locked)
6526 6383 sfmmu_mlist_exit(pml);
6527 6384
6528 6385 return (0);
6529 6386 }
6530 6387
6531 6388 /*
6532 6389 * Execute post-callback handler of each pa_hment linked to pp
6533 6390 *
6534 6391 * Same overall assumptions and restrictions apply as for
6535 6392 * hat_pageprocess_precallbacks().
6536 6393 */
6537 6394 static void
6538 6395 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6539 6396 {
6540 6397 pfn_t pgpfn = pp->p_pagenum;
6541 6398 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6542 6399 pfn_t newpfn;
6543 6400 struct sf_hment *sfhmep;
6544 6401 struct pa_hment *pahmep;
6545 6402 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6546 6403 id_t id;
6547 6404 int locked = 0;
6548 6405 kmutex_t *pml;
6549 6406
6550 6407 ASSERT(PAGE_EXCL(pp));
6551 6408 if (!sfmmu_mlist_held(pp)) {
6552 6409 pml = sfmmu_mlist_enter(pp);
6553 6410 locked = 1;
6554 6411 }
6555 6412
6556 6413 top:
6557 6414 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6558 6415 /*
6559 6416 * skip sf_hments corresponding to VA<->PA mappings;
6560 6417 * for pa_hment's, hme_tte.ll is zero
6561 6418 */
6562 6419 if (!IS_PAHME(sfhmep))
6563 6420 continue;
6564 6421
6565 6422 pahmep = sfhmep->hme_data;
6566 6423 ASSERT(pahmep != NULL);
6567 6424
6568 6425 if ((pahmep->flags & flag) == 0)
6569 6426 continue;
6570 6427
6571 6428 pahmep->flags &= ~flag;
6572 6429
6573 6430 id = pahmep->cb_id;
6574 6431 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6575 6432 if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6576 6433 continue;
6577 6434
6578 6435 /*
6579 6436 * Convert the base page PFN into the constituent PFN
6580 6437 * which is needed by the callback handler.
6581 6438 */
6582 6439 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6583 6440
6584 6441 /*
6585 6442 * Drop the mapping list lock to avoid locking order issues.
6586 6443 */
6587 6444 if (locked)
6588 6445 sfmmu_mlist_exit(pml);
6589 6446
6590 6447 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6591 6448 != 0)
6592 6449 panic("sfmmu: posthandler failed");
6593 6450
6594 6451 if (locked) {
6595 6452 pml = sfmmu_mlist_enter(pp);
6596 6453 goto top;
6597 6454 }
6598 6455 }
6599 6456
6600 6457 if (locked)
6601 6458 sfmmu_mlist_exit(pml);
6602 6459 }
6603 6460
6604 6461 /*
6605 6462 * Suspend locked kernel mapping
6606 6463 */
6607 6464 void
6608 6465 hat_pagesuspend(struct page *pp)
6609 6466 {
6610 6467 struct sf_hment *sfhmep;
6611 6468 sfmmu_t *sfmmup;
6612 6469 tte_t tte, ttemod;
6613 6470 struct hme_blk *hmeblkp;
6614 6471 caddr_t addr;
6615 6472 int index, cons;
6616 6473 cpuset_t cpuset;
6617 6474
6618 6475 ASSERT(PAGE_EXCL(pp));
6619 6476 ASSERT(sfmmu_mlist_held(pp));
6620 6477
6621 6478 mutex_enter(&kpr_suspendlock);
6622 6479
6623 6480 /*
6624 6481 * We're about to suspend a kernel mapping so mark this thread as
6625 6482 * non-traceable by DTrace. This prevents us from running into issues
6626 6483 * with probe context trying to touch a suspended page
6627 6484 * in the relocation codepath itself.
6628 6485 */
6629 6486 curthread->t_flag |= T_DONTDTRACE;
6630 6487
6631 6488 index = PP_MAPINDEX(pp);
6632 6489 cons = TTE8K;
6633 6490
6634 6491 retry:
6635 6492 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6636 6493
6637 6494 if (IS_PAHME(sfhmep))
6638 6495 continue;
6639 6496
6640 6497 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6641 6498 continue;
6642 6499
6643 6500 /*
6644 6501 * Loop until we successfully set the suspend bit in
6645 6502 * the TTE.
6646 6503 */
6647 6504 again:
6648 6505 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6649 6506 ASSERT(TTE_IS_VALID(&tte));
6650 6507
6651 6508 ttemod = tte;
6652 6509 TTE_SET_SUSPEND(&ttemod);
6653 6510 if (sfmmu_modifytte_try(&tte, &ttemod,
6654 6511 &sfhmep->hme_tte) < 0)
6655 6512 goto again;
6656 6513
6657 6514 /*
6658 6515 * Invalidate TSB entry
6659 6516 */
6660 6517 hmeblkp = sfmmu_hmetohblk(sfhmep);
6661 6518
6662 6519 sfmmup = hblktosfmmu(hmeblkp);
6663 6520 ASSERT(sfmmup == ksfmmup);
6664 6521 ASSERT(!hmeblkp->hblk_shared);
6665 6522
6666 6523 addr = tte_to_vaddr(hmeblkp, tte);
6667 6524
6668 6525 /*
6669 6526 * No need to make sure that the TSB for this sfmmu is
6670 6527 * not being relocated since it is ksfmmup and thus it
6671 6528 * will never be relocated.
6672 6529 */
6673 6530 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6674 6531
6675 6532 /*
6676 6533 * Update xcall stats
6677 6534 */
6678 6535 cpuset = cpu_ready_set;
6679 6536 CPUSET_DEL(cpuset, CPU->cpu_id);
6680 6537
6681 6538 /* LINTED: constant in conditional context */
6682 6539 SFMMU_XCALL_STATS(ksfmmup);
6683 6540
6684 6541 /*
6685 6542 * Flush TLB entry on remote CPU's
6686 6543 */
6687 6544 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6688 6545 (uint64_t)ksfmmup);
6689 6546 xt_sync(cpuset);
6690 6547
6691 6548 /*
6692 6549 * Flush TLB entry on local CPU
6693 6550 */
6694 6551 vtag_flushpage(addr, (uint64_t)ksfmmup);
6695 6552 }
6696 6553
6697 6554 while (index != 0) {
6698 6555 index = index >> 1;
6699 6556 if (index != 0)
6700 6557 cons++;
6701 6558 if (index & 0x1) {
6702 6559 pp = PP_GROUPLEADER(pp, cons);
6703 6560 goto retry;
6704 6561 }
6705 6562 }
6706 6563 }
6707 6564
6708 6565 #ifdef DEBUG
6709 6566
6710 6567 #define N_PRLE 1024
6711 6568 struct prle {
6712 6569 page_t *targ;
6713 6570 page_t *repl;
6714 6571 int status;
6715 6572 int pausecpus;
6716 6573 hrtime_t whence;
6717 6574 };
6718 6575
6719 6576 static struct prle page_relocate_log[N_PRLE];
6720 6577 static int prl_entry;
6721 6578 static kmutex_t prl_mutex;
6722 6579
6723 6580 #define PAGE_RELOCATE_LOG(t, r, s, p) \
6724 6581 mutex_enter(&prl_mutex); \
6725 6582 page_relocate_log[prl_entry].targ = *(t); \
6726 6583 page_relocate_log[prl_entry].repl = *(r); \
6727 6584 page_relocate_log[prl_entry].status = (s); \
6728 6585 page_relocate_log[prl_entry].pausecpus = (p); \
6729 6586 page_relocate_log[prl_entry].whence = gethrtime(); \
6730 6587 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \
6731 6588 mutex_exit(&prl_mutex);
6732 6589
6733 6590 #else /* !DEBUG */
6734 6591 #define PAGE_RELOCATE_LOG(t, r, s, p)
6735 6592 #endif
6736 6593
6737 6594 /*
6738 6595 * Core Kernel Page Relocation Algorithm
6739 6596 *
6740 6597 * Input:
6741 6598 *
6742 6599 * target : constituent pages are SE_EXCL locked.
6743 6600 * replacement: constituent pages are SE_EXCL locked.
6744 6601 *
6745 6602 * Output:
6746 6603 *
6747 6604 * nrelocp: number of pages relocated
6748 6605 */
6749 6606 int
6750 6607 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6751 6608 {
6752 6609 page_t *targ, *repl;
6753 6610 page_t *tpp, *rpp;
6754 6611 kmutex_t *low, *high;
6755 6612 spgcnt_t npages, i;
6756 6613 page_t *pl = NULL;
6757 6614 int old_pil;
6758 6615 cpuset_t cpuset;
6759 6616 int cap_cpus;
6760 6617 int ret;
6761 6618 #ifdef VAC
6762 6619 int cflags = 0;
6763 6620 #endif
6764 6621
6765 6622 if (!kcage_on || PP_ISNORELOC(*target)) {
6766 6623 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6767 6624 return (EAGAIN);
6768 6625 }
6769 6626
6770 6627 mutex_enter(&kpr_mutex);
6771 6628 kreloc_thread = curthread;
6772 6629
6773 6630 targ = *target;
6774 6631 repl = *replacement;
6775 6632 ASSERT(repl != NULL);
6776 6633 ASSERT(targ->p_szc == repl->p_szc);
6777 6634
6778 6635 npages = page_get_pagecnt(targ->p_szc);
6779 6636
6780 6637 /*
6781 6638 * unload VA<->PA mappings that are not locked
6782 6639 */
6783 6640 tpp = targ;
6784 6641 for (i = 0; i < npages; i++) {
6785 6642 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6786 6643 tpp++;
6787 6644 }
6788 6645
6789 6646 /*
6790 6647 * Do "presuspend" callbacks, in a context from which we can still
6791 6648 * block as needed. Note that we don't hold the mapping list lock
6792 6649 * of "targ" at this point due to potential locking order issues;
6793 6650 * we assume that between the hat_pageunload() above and holding
6794 6651 * the SE_EXCL lock that the mapping list *cannot* change at this
6795 6652 * point.
6796 6653 */
6797 6654 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6798 6655 if (ret != 0) {
6799 6656 /*
6800 6657 * EIO translates to fatal error, for all others cleanup
6801 6658 * and return EAGAIN.
6802 6659 */
6803 6660 ASSERT(ret != EIO);
6804 6661 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6805 6662 PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6806 6663 kreloc_thread = NULL;
6807 6664 mutex_exit(&kpr_mutex);
6808 6665 return (EAGAIN);
6809 6666 }
6810 6667
6811 6668 /*
6812 6669 * acquire p_mapping list lock for both the target and replacement
6813 6670 * root pages.
6814 6671 *
6815 6672 * low and high refer to the need to grab the mlist locks in a
6816 6673 * specific order in order to prevent race conditions. Thus the
6817 6674 * lower lock must be grabbed before the higher lock.
6818 6675 *
6819 6676 * This will block hat_unload's accessing p_mapping list. Since
6820 6677 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6821 6678 * blocked. Thus, no one else will be accessing the p_mapping list
6822 6679 * while we suspend and reload the locked mapping below.
6823 6680 */
6824 6681 tpp = targ;
6825 6682 rpp = repl;
6826 6683 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6827 6684
6828 6685 kpreempt_disable();
6829 6686
6830 6687 /*
6831 6688 * We raise our PIL to 13 so that we don't get captured by
6832 6689 * another CPU or pinned by an interrupt thread. We can't go to
6833 6690 * PIL 14 since the nexus driver(s) may need to interrupt at
6834 6691 * that level in the case of IOMMU pseudo mappings.
6835 6692 */
6836 6693 cpuset = cpu_ready_set;
6837 6694 CPUSET_DEL(cpuset, CPU->cpu_id);
6838 6695 if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6839 6696 old_pil = splr(XCALL_PIL);
6840 6697 } else {
6841 6698 old_pil = -1;
6842 6699 xc_attention(cpuset);
6843 6700 }
6844 6701 ASSERT(getpil() == XCALL_PIL);
6845 6702
6846 6703 /*
6847 6704 * Now do suspend callbacks. In the case of an IOMMU mapping
6848 6705 * this will suspend all DMA activity to the page while it is
6849 6706 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6850 6707 * may be captured at this point we should have acquired any needed
6851 6708 * locks in the presuspend callback.
6852 6709 */
6853 6710 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6854 6711 if (ret != 0) {
6855 6712 repl = targ;
6856 6713 goto suspend_fail;
6857 6714 }
6858 6715
6859 6716 /*
6860 6717 * Raise the PIL yet again, this time to block all high-level
6861 6718 * interrupts on this CPU. This is necessary to prevent an
6862 6719 * interrupt routine from pinning the thread which holds the
6863 6720 * mapping suspended and then touching the suspended page.
6864 6721 *
6865 6722 * Once the page is suspended we also need to be careful to
6866 6723 * avoid calling any functions which touch any seg_kmem memory
6867 6724 * since that memory may be backed by the very page we are
6868 6725 * relocating in here!
6869 6726 */
6870 6727 hat_pagesuspend(targ);
6871 6728
6872 6729 /*
6873 6730 * Now that we are confident everybody has stopped using this page,
6874 6731 * copy the page contents. Note we use a physical copy to prevent
6875 6732 * locking issues and to avoid fpRAS because we can't handle it in
6876 6733 * this context.
6877 6734 */
6878 6735 for (i = 0; i < npages; i++, tpp++, rpp++) {
6879 6736 #ifdef VAC
6880 6737 /*
6881 6738 * If the replacement has a different vcolor than
6882 6739 * the one being replacd, we need to handle VAC
6883 6740 * consistency for it just as we were setting up
6884 6741 * a new mapping to it.
6885 6742 */
6886 6743 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6887 6744 (tpp->p_vcolor != rpp->p_vcolor) &&
6888 6745 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6889 6746 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
6890 6747 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
6891 6748 rpp->p_pagenum);
6892 6749 }
6893 6750 #endif
6894 6751 /*
6895 6752 * Copy the contents of the page.
6896 6753 */
6897 6754 ppcopy_kernel(tpp, rpp);
6898 6755 }
6899 6756
6900 6757 tpp = targ;
6901 6758 rpp = repl;
6902 6759 for (i = 0; i < npages; i++, tpp++, rpp++) {
6903 6760 /*
6904 6761 * Copy attributes. VAC consistency was handled above,
6905 6762 * if required.
6906 6763 */
6907 6764 rpp->p_nrm = tpp->p_nrm;
6908 6765 tpp->p_nrm = 0;
6909 6766 rpp->p_index = tpp->p_index;
6910 6767 tpp->p_index = 0;
6911 6768 #ifdef VAC
6912 6769 rpp->p_vcolor = tpp->p_vcolor;
6913 6770 #endif
6914 6771 }
6915 6772
6916 6773 /*
6917 6774 * First, unsuspend the page, if we set the suspend bit, and transfer
6918 6775 * the mapping list from the target page to the replacement page.
6919 6776 * Next process postcallbacks; since pa_hment's are linked only to the
6920 6777 * p_mapping list of root page, we don't iterate over the constituent
6921 6778 * pages.
6922 6779 */
6923 6780 hat_pagereload(targ, repl);
6924 6781
6925 6782 suspend_fail:
6926 6783 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
6927 6784
6928 6785 /*
6929 6786 * Now lower our PIL and release any captured CPUs since we
6930 6787 * are out of the "danger zone". After this it will again be
6931 6788 * safe to acquire adaptive mutex locks, or to drop them...
6932 6789 */
6933 6790 if (old_pil != -1) {
6934 6791 splx(old_pil);
6935 6792 } else {
6936 6793 xc_dismissed(cpuset);
6937 6794 }
6938 6795
6939 6796 kpreempt_enable();
6940 6797
6941 6798 sfmmu_mlist_reloc_exit(low, high);
6942 6799
6943 6800 /*
6944 6801 * Postsuspend callbacks should drop any locks held across
6945 6802 * the suspend callbacks. As before, we don't hold the mapping
6946 6803 * list lock at this point.. our assumption is that the mapping
6947 6804 * list still can't change due to our holding SE_EXCL lock and
6948 6805 * there being no unlocked mappings left. Hence the restriction
6949 6806 * on calling context to hat_delete_callback()
6950 6807 */
6951 6808 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
6952 6809 if (ret != 0) {
6953 6810 /*
6954 6811 * The second presuspend call failed: we got here through
6955 6812 * the suspend_fail label above.
6956 6813 */
6957 6814 ASSERT(ret != EIO);
6958 6815 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
6959 6816 kreloc_thread = NULL;
6960 6817 mutex_exit(&kpr_mutex);
6961 6818 return (EAGAIN);
6962 6819 }
6963 6820
6964 6821 /*
6965 6822 * Now that we're out of the performance critical section we can
6966 6823 * take care of updating the hash table, since we still
6967 6824 * hold all the pages locked SE_EXCL at this point we
6968 6825 * needn't worry about things changing out from under us.
6969 6826 */
6970 6827 tpp = targ;
6971 6828 rpp = repl;
6972 6829 for (i = 0; i < npages; i++, tpp++, rpp++) {
6973 6830
6974 6831 /*
6975 6832 * replace targ with replacement in page_hash table
6976 6833 */
6977 6834 targ = tpp;
6978 6835 page_relocate_hash(rpp, targ);
6979 6836
6980 6837 /*
6981 6838 * concatenate target; caller of platform_page_relocate()
6982 6839 * expects target to be concatenated after returning.
6983 6840 */
6984 6841 ASSERT(targ->p_next == targ);
6985 6842 ASSERT(targ->p_prev == targ);
6986 6843 page_list_concat(&pl, &targ);
6987 6844 }
6988 6845
6989 6846 ASSERT(*target == pl);
6990 6847 *nrelocp = npages;
6991 6848 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
6992 6849 kreloc_thread = NULL;
6993 6850 mutex_exit(&kpr_mutex);
6994 6851 return (0);
6995 6852 }
6996 6853
6997 6854 /*
6998 6855 * Called when stray pa_hments are found attached to a page which is
6999 6856 * being freed. Notify the subsystem which attached the pa_hment of
7000 6857 * the error if it registered a suitable handler, else panic.
7001 6858 */
7002 6859 static void
7003 6860 sfmmu_pahment_leaked(struct pa_hment *pahmep)
7004 6861 {
7005 6862 id_t cb_id = pahmep->cb_id;
7006 6863
7007 6864 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7008 6865 if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7009 6866 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7010 6867 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7011 6868 return; /* non-fatal */
7012 6869 }
7013 6870 panic("pa_hment leaked: 0x%p", (void *)pahmep);
7014 6871 }
7015 6872
7016 6873 /*
7017 6874 * Remove all mappings to page 'pp'.
7018 6875 */
7019 6876 int
7020 6877 hat_pageunload(struct page *pp, uint_t forceflag)
7021 6878 {
7022 6879 struct page *origpp = pp;
7023 6880 struct sf_hment *sfhme, *tmphme;
7024 6881 struct hme_blk *hmeblkp;
7025 6882 kmutex_t *pml;
7026 6883 #ifdef VAC
7027 6884 kmutex_t *pmtx;
7028 6885 #endif
7029 6886 cpuset_t cpuset, tset;
7030 6887 int index, cons;
7031 6888 int pa_hments;
7032 6889
7033 6890 ASSERT(PAGE_EXCL(pp));
7034 6891
7035 6892 tmphme = NULL;
7036 6893 pa_hments = 0;
7037 6894 CPUSET_ZERO(cpuset);
7038 6895
7039 6896 pml = sfmmu_mlist_enter(pp);
7040 6897
7041 6898 #ifdef VAC
7042 6899 if (pp->p_kpmref)
7043 6900 sfmmu_kpm_pageunload(pp);
7044 6901 ASSERT(!PP_ISMAPPED_KPM(pp));
7045 6902 #endif
7046 6903 /*
7047 6904 * Clear vpm reference. Since the page is exclusively locked
7048 6905 * vpm cannot be referencing it.
7049 6906 */
7050 6907 if (vpm_enable) {
7051 6908 pp->p_vpmref = 0;
7052 6909 }
7053 6910
7054 6911 index = PP_MAPINDEX(pp);
7055 6912 cons = TTE8K;
7056 6913 retry:
7057 6914 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7058 6915 tmphme = sfhme->hme_next;
7059 6916
7060 6917 if (IS_PAHME(sfhme)) {
7061 6918 ASSERT(sfhme->hme_data != NULL);
7062 6919 pa_hments++;
7063 6920 continue;
7064 6921 }
7065 6922
7066 6923 hmeblkp = sfmmu_hmetohblk(sfhme);
7067 6924
7068 6925 /*
7069 6926 * If there are kernel mappings don't unload them, they will
7070 6927 * be suspended.
7071 6928 */
7072 6929 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7073 6930 hmeblkp->hblk_tag.htag_id == ksfmmup)
7074 6931 continue;
7075 6932
7076 6933 tset = sfmmu_pageunload(pp, sfhme, cons);
7077 6934 CPUSET_OR(cpuset, tset);
7078 6935 }
7079 6936
7080 6937 while (index != 0) {
7081 6938 index = index >> 1;
7082 6939 if (index != 0)
7083 6940 cons++;
7084 6941 if (index & 0x1) {
7085 6942 /* Go to leading page */
7086 6943 pp = PP_GROUPLEADER(pp, cons);
7087 6944 ASSERT(sfmmu_mlist_held(pp));
7088 6945 goto retry;
7089 6946 }
7090 6947 }
7091 6948
7092 6949 /*
7093 6950 * cpuset may be empty if the page was only mapped by segkpm,
7094 6951 * in which case we won't actually cross-trap.
7095 6952 */
7096 6953 xt_sync(cpuset);
7097 6954
7098 6955 /*
7099 6956 * The page should have no mappings at this point, unless
7100 6957 * we were called from hat_page_relocate() in which case we
7101 6958 * leave the locked mappings which will be suspended later.
7102 6959 */
7103 6960 ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
7104 6961 (forceflag == SFMMU_KERNEL_RELOC));
7105 6962
7106 6963 #ifdef VAC
7107 6964 if (PP_ISTNC(pp)) {
7108 6965 if (cons == TTE8K) {
7109 6966 pmtx = sfmmu_page_enter(pp);
7110 6967 PP_CLRTNC(pp);
7111 6968 sfmmu_page_exit(pmtx);
7112 6969 } else {
7113 6970 conv_tnc(pp, cons);
7114 6971 }
7115 6972 }
7116 6973 #endif /* VAC */
7117 6974
7118 6975 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7119 6976 /*
7120 6977 * Unlink any pa_hments and free them, calling back
7121 6978 * the responsible subsystem to notify it of the error.
7122 6979 * This can occur in situations such as drivers leaking
7123 6980 * DMA handles: naughty, but common enough that we'd like
7124 6981 * to keep the system running rather than bringing it
7125 6982 * down with an obscure error like "pa_hment leaked"
7126 6983 * which doesn't aid the user in debugging their driver.
7127 6984 */
7128 6985 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7129 6986 tmphme = sfhme->hme_next;
7130 6987 if (IS_PAHME(sfhme)) {
7131 6988 struct pa_hment *pahmep = sfhme->hme_data;
7132 6989 sfmmu_pahment_leaked(pahmep);
7133 6990 HME_SUB(sfhme, pp);
7134 6991 kmem_cache_free(pa_hment_cache, pahmep);
7135 6992 }
7136 6993 }
7137 6994
7138 6995 ASSERT(!PP_ISMAPPED(origpp));
7139 6996 }
7140 6997
7141 6998 sfmmu_mlist_exit(pml);
7142 6999
7143 7000 return (0);
7144 7001 }
7145 7002
7146 7003 cpuset_t
7147 7004 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7148 7005 {
7149 7006 struct hme_blk *hmeblkp;
7150 7007 sfmmu_t *sfmmup;
7151 7008 tte_t tte, ttemod;
7152 7009 #ifdef DEBUG
7153 7010 tte_t orig_old;
7154 7011 #endif /* DEBUG */
7155 7012 caddr_t addr;
7156 7013 int ttesz;
7157 7014 int ret;
7158 7015 cpuset_t cpuset;
7159 7016
7160 7017 ASSERT(pp != NULL);
7161 7018 ASSERT(sfmmu_mlist_held(pp));
7162 7019 ASSERT(!PP_ISKAS(pp));
7163 7020
7164 7021 CPUSET_ZERO(cpuset);
7165 7022
7166 7023 hmeblkp = sfmmu_hmetohblk(sfhme);
7167 7024
7168 7025 readtte:
7169 7026 sfmmu_copytte(&sfhme->hme_tte, &tte);
7170 7027 if (TTE_IS_VALID(&tte)) {
7171 7028 sfmmup = hblktosfmmu(hmeblkp);
7172 7029 ttesz = get_hblk_ttesz(hmeblkp);
7173 7030 /*
7174 7031 * Only unload mappings of 'cons' size.
7175 7032 */
7176 7033 if (ttesz != cons)
7177 7034 return (cpuset);
7178 7035
7179 7036 /*
7180 7037 * Note that we have p_mapping lock, but no hash lock here.
7181 7038 * hblk_unload() has to have both hash lock AND p_mapping
7182 7039 * lock before it tries to modify tte. So, the tte could
7183 7040 * not become invalid in the sfmmu_modifytte_try() below.
7184 7041 */
7185 7042 ttemod = tte;
7186 7043 #ifdef DEBUG
7187 7044 orig_old = tte;
7188 7045 #endif /* DEBUG */
7189 7046
7190 7047 TTE_SET_INVALID(&ttemod);
7191 7048 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7192 7049 if (ret < 0) {
7193 7050 #ifdef DEBUG
7194 7051 /* only R/M bits can change. */
7195 7052 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7196 7053 #endif /* DEBUG */
7197 7054 goto readtte;
7198 7055 }
7199 7056
7200 7057 if (ret == 0) {
7201 7058 panic("pageunload: cas failed?");
7202 7059 }
7203 7060
7204 7061 addr = tte_to_vaddr(hmeblkp, tte);
7205 7062
7206 7063 if (hmeblkp->hblk_shared) {
7207 7064 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7208 7065 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7209 7066 sf_region_t *rgnp;
7210 7067 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7211 7068 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7212 7069 ASSERT(srdp != NULL);
7213 7070 rgnp = srdp->srd_hmergnp[rid];
7214 7071 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7215 7072 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7216 7073 sfmmu_ttesync(NULL, addr, &tte, pp);
7217 7074 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7218 7075 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7219 7076 } else {
7220 7077 sfmmu_ttesync(sfmmup, addr, &tte, pp);
7221 7078 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7222 7079
7223 7080 /*
7224 7081 * We need to flush the page from the virtual cache
7225 7082 * in order to prevent a virtual cache alias
7226 7083 * inconsistency. The particular scenario we need
7227 7084 * to worry about is:
7228 7085 * Given: va1 and va2 are two virtual address that
7229 7086 * alias and will map the same physical address.
7230 7087 * 1. mapping exists from va1 to pa and data has
7231 7088 * been read into the cache.
7232 7089 * 2. unload va1.
7233 7090 * 3. load va2 and modify data using va2.
7234 7091 * 4 unload va2.
7235 7092 * 5. load va1 and reference data. Unless we flush
7236 7093 * the data cache when we unload we will get
7237 7094 * stale data.
7238 7095 * This scenario is taken care of by using virtual
7239 7096 * page coloring.
7240 7097 */
7241 7098 if (sfmmup->sfmmu_ismhat) {
7242 7099 /*
7243 7100 * Flush TSBs, TLBs and caches
7244 7101 * of every process
7245 7102 * sharing this ism segment.
7246 7103 */
7247 7104 sfmmu_hat_lock_all();
7248 7105 mutex_enter(&ism_mlist_lock);
7249 7106 kpreempt_disable();
7250 7107 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7251 7108 pp->p_pagenum, CACHE_NO_FLUSH);
7252 7109 kpreempt_enable();
7253 7110 mutex_exit(&ism_mlist_lock);
7254 7111 sfmmu_hat_unlock_all();
7255 7112 cpuset = cpu_ready_set;
7256 7113 } else {
7257 7114 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7258 7115 cpuset = sfmmup->sfmmu_cpusran;
7259 7116 }
7260 7117 }
7261 7118
7262 7119 /*
7263 7120 * Hme_sub has to run after ttesync() and a_rss update.
7264 7121 * See hblk_unload().
7265 7122 */
7266 7123 HME_SUB(sfhme, pp);
7267 7124 membar_stst();
7268 7125
7269 7126 /*
7270 7127 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7271 7128 * since pteload may have done a HME_ADD() right after
7272 7129 * we did the HME_SUB() above. Hmecnt is now maintained
7273 7130 * by cas only. no lock guranteed its value. The only
7274 7131 * gurantee we have is the hmecnt should not be less than
7275 7132 * what it should be so the hblk will not be taken away.
7276 7133 * It's also important that we decremented the hmecnt after
7277 7134 * we are done with hmeblkp so that this hmeblk won't be
7278 7135 * stolen.
7279 7136 */
7280 7137 ASSERT(hmeblkp->hblk_hmecnt > 0);
7281 7138 ASSERT(hmeblkp->hblk_vcnt > 0);
7282 7139 atomic_dec_16(&hmeblkp->hblk_vcnt);
7283 7140 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7284 7141 /*
7285 7142 * This is bug 4063182.
7286 7143 * XXX: fixme
7287 7144 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7288 7145 * !hmeblkp->hblk_lckcnt);
7289 7146 */
7290 7147 } else {
7291 7148 panic("invalid tte? pp %p &tte %p",
7292 7149 (void *)pp, (void *)&tte);
7293 7150 }
7294 7151
7295 7152 return (cpuset);
7296 7153 }
7297 7154
7298 7155 /*
7299 7156 * While relocating a kernel page, this function will move the mappings
7300 7157 * from tpp to dpp and modify any associated data with these mappings.
7301 7158 * It also unsuspends the suspended kernel mapping.
7302 7159 */
7303 7160 static void
7304 7161 hat_pagereload(struct page *tpp, struct page *dpp)
7305 7162 {
7306 7163 struct sf_hment *sfhme;
7307 7164 tte_t tte, ttemod;
7308 7165 int index, cons;
7309 7166
7310 7167 ASSERT(getpil() == PIL_MAX);
7311 7168 ASSERT(sfmmu_mlist_held(tpp));
7312 7169 ASSERT(sfmmu_mlist_held(dpp));
7313 7170
7314 7171 index = PP_MAPINDEX(tpp);
7315 7172 cons = TTE8K;
7316 7173
7317 7174 /* Update real mappings to the page */
7318 7175 retry:
7319 7176 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7320 7177 if (IS_PAHME(sfhme))
7321 7178 continue;
7322 7179 sfmmu_copytte(&sfhme->hme_tte, &tte);
7323 7180 ttemod = tte;
7324 7181
7325 7182 /*
7326 7183 * replace old pfn with new pfn in TTE
7327 7184 */
7328 7185 PFN_TO_TTE(ttemod, dpp->p_pagenum);
7329 7186
7330 7187 /*
7331 7188 * clear suspend bit
7332 7189 */
7333 7190 ASSERT(TTE_IS_SUSPEND(&ttemod));
7334 7191 TTE_CLR_SUSPEND(&ttemod);
7335 7192
7336 7193 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7337 7194 panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7338 7195
7339 7196 /*
7340 7197 * set hme_page point to new page
7341 7198 */
7342 7199 sfhme->hme_page = dpp;
7343 7200 }
7344 7201
7345 7202 /*
7346 7203 * move p_mapping list from old page to new page
7347 7204 */
7348 7205 dpp->p_mapping = tpp->p_mapping;
7349 7206 tpp->p_mapping = NULL;
7350 7207 dpp->p_share = tpp->p_share;
7351 7208 tpp->p_share = 0;
7352 7209
7353 7210 while (index != 0) {
7354 7211 index = index >> 1;
7355 7212 if (index != 0)
7356 7213 cons++;
7357 7214 if (index & 0x1) {
7358 7215 tpp = PP_GROUPLEADER(tpp, cons);
7359 7216 dpp = PP_GROUPLEADER(dpp, cons);
7360 7217 goto retry;
7361 7218 }
7362 7219 }
7363 7220
7364 7221 curthread->t_flag &= ~T_DONTDTRACE;
7365 7222 mutex_exit(&kpr_suspendlock);
7366 7223 }
7367 7224
7368 7225 uint_t
7369 7226 hat_pagesync(struct page *pp, uint_t clearflag)
7370 7227 {
7371 7228 struct sf_hment *sfhme, *tmphme = NULL;
7372 7229 struct hme_blk *hmeblkp;
7373 7230 kmutex_t *pml;
7374 7231 cpuset_t cpuset, tset;
7375 7232 int index, cons;
7376 7233 extern ulong_t po_share;
7377 7234 page_t *save_pp = pp;
7378 7235 int stop_on_sh = 0;
7379 7236 uint_t shcnt;
7380 7237
7381 7238 CPUSET_ZERO(cpuset);
7382 7239
7383 7240 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7384 7241 return (PP_GENERIC_ATTR(pp));
7385 7242 }
7386 7243
7387 7244 if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7388 7245 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7389 7246 return (PP_GENERIC_ATTR(pp));
7390 7247 }
7391 7248 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7392 7249 return (PP_GENERIC_ATTR(pp));
7393 7250 }
7394 7251 if (clearflag & HAT_SYNC_STOPON_SHARED) {
7395 7252 if (pp->p_share > po_share) {
7396 7253 hat_page_setattr(pp, P_REF);
7397 7254 return (PP_GENERIC_ATTR(pp));
7398 7255 }
7399 7256 stop_on_sh = 1;
7400 7257 shcnt = 0;
7401 7258 }
7402 7259 }
7403 7260
7404 7261 clearflag &= ~HAT_SYNC_STOPON_SHARED;
7405 7262 pml = sfmmu_mlist_enter(pp);
7406 7263 index = PP_MAPINDEX(pp);
7407 7264 cons = TTE8K;
7408 7265 retry:
7409 7266 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7410 7267 /*
7411 7268 * We need to save the next hment on the list since
7412 7269 * it is possible for pagesync to remove an invalid hment
7413 7270 * from the list.
7414 7271 */
7415 7272 tmphme = sfhme->hme_next;
7416 7273 if (IS_PAHME(sfhme))
7417 7274 continue;
7418 7275 /*
7419 7276 * If we are looking for large mappings and this hme doesn't
7420 7277 * reach the range we are seeking, just ignore it.
7421 7278 */
7422 7279 hmeblkp = sfmmu_hmetohblk(sfhme);
7423 7280
7424 7281 if (hme_size(sfhme) < cons)
7425 7282 continue;
7426 7283
7427 7284 if (stop_on_sh) {
7428 7285 if (hmeblkp->hblk_shared) {
7429 7286 sf_srd_t *srdp = hblktosrd(hmeblkp);
7430 7287 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7431 7288 sf_region_t *rgnp;
7432 7289 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7433 7290 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7434 7291 ASSERT(srdp != NULL);
7435 7292 rgnp = srdp->srd_hmergnp[rid];
7436 7293 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7437 7294 rgnp, rid);
7438 7295 shcnt += rgnp->rgn_refcnt;
7439 7296 } else {
7440 7297 shcnt++;
7441 7298 }
7442 7299 if (shcnt > po_share) {
7443 7300 /*
7444 7301 * tell the pager to spare the page this time
7445 7302 * around.
7446 7303 */
7447 7304 hat_page_setattr(save_pp, P_REF);
7448 7305 index = 0;
7449 7306 break;
7450 7307 }
7451 7308 }
7452 7309 tset = sfmmu_pagesync(pp, sfhme,
7453 7310 clearflag & ~HAT_SYNC_STOPON_RM);
7454 7311 CPUSET_OR(cpuset, tset);
7455 7312
7456 7313 /*
7457 7314 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7458 7315 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7459 7316 */
7460 7317 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7461 7318 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7462 7319 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7463 7320 index = 0;
7464 7321 break;
7465 7322 }
7466 7323 }
7467 7324
7468 7325 while (index) {
7469 7326 index = index >> 1;
7470 7327 cons++;
7471 7328 if (index & 0x1) {
7472 7329 /* Go to leading page */
7473 7330 pp = PP_GROUPLEADER(pp, cons);
7474 7331 goto retry;
7475 7332 }
7476 7333 }
7477 7334
7478 7335 xt_sync(cpuset);
7479 7336 sfmmu_mlist_exit(pml);
7480 7337 return (PP_GENERIC_ATTR(save_pp));
7481 7338 }
7482 7339
7483 7340 /*
7484 7341 * Get all the hardware dependent attributes for a page struct
7485 7342 */
7486 7343 static cpuset_t
7487 7344 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7488 7345 uint_t clearflag)
7489 7346 {
7490 7347 caddr_t addr;
7491 7348 tte_t tte, ttemod;
7492 7349 struct hme_blk *hmeblkp;
7493 7350 int ret;
7494 7351 sfmmu_t *sfmmup;
7495 7352 cpuset_t cpuset;
7496 7353
7497 7354 ASSERT(pp != NULL);
7498 7355 ASSERT(sfmmu_mlist_held(pp));
7499 7356 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7500 7357 (clearflag == HAT_SYNC_ZERORM));
7501 7358
7502 7359 SFMMU_STAT(sf_pagesync);
7503 7360
7504 7361 CPUSET_ZERO(cpuset);
7505 7362
7506 7363 sfmmu_pagesync_retry:
7507 7364
7508 7365 sfmmu_copytte(&sfhme->hme_tte, &tte);
7509 7366 if (TTE_IS_VALID(&tte)) {
7510 7367 hmeblkp = sfmmu_hmetohblk(sfhme);
7511 7368 sfmmup = hblktosfmmu(hmeblkp);
7512 7369 addr = tte_to_vaddr(hmeblkp, tte);
7513 7370 if (clearflag == HAT_SYNC_ZERORM) {
7514 7371 ttemod = tte;
7515 7372 TTE_CLR_RM(&ttemod);
7516 7373 ret = sfmmu_modifytte_try(&tte, &ttemod,
7517 7374 &sfhme->hme_tte);
7518 7375 if (ret < 0) {
7519 7376 /*
7520 7377 * cas failed and the new value is not what
7521 7378 * we want.
7522 7379 */
7523 7380 goto sfmmu_pagesync_retry;
7524 7381 }
7525 7382
7526 7383 if (ret > 0) {
7527 7384 /* we win the cas */
7528 7385 if (hmeblkp->hblk_shared) {
7529 7386 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7530 7387 uint_t rid =
7531 7388 hmeblkp->hblk_tag.htag_rid;
7532 7389 sf_region_t *rgnp;
7533 7390 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7534 7391 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7535 7392 ASSERT(srdp != NULL);
7536 7393 rgnp = srdp->srd_hmergnp[rid];
7537 7394 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7538 7395 srdp, rgnp, rid);
7539 7396 cpuset = sfmmu_rgntlb_demap(addr,
7540 7397 rgnp, hmeblkp, 1);
7541 7398 } else {
7542 7399 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7543 7400 0, 0);
7544 7401 cpuset = sfmmup->sfmmu_cpusran;
7545 7402 }
7546 7403 }
7547 7404 }
7548 7405 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7549 7406 &tte, pp);
7550 7407 }
7551 7408 return (cpuset);
7552 7409 }
7553 7410
7554 7411 /*
7555 7412 * Remove write permission from a mappings to a page, so that
7556 7413 * we can detect the next modification of it. This requires modifying
7557 7414 * the TTE then invalidating (demap) any TLB entry using that TTE.
7558 7415 * This code is similar to sfmmu_pagesync().
7559 7416 */
7560 7417 static cpuset_t
7561 7418 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7562 7419 {
7563 7420 caddr_t addr;
7564 7421 tte_t tte;
7565 7422 tte_t ttemod;
7566 7423 struct hme_blk *hmeblkp;
7567 7424 int ret;
7568 7425 sfmmu_t *sfmmup;
7569 7426 cpuset_t cpuset;
7570 7427
7571 7428 ASSERT(pp != NULL);
7572 7429 ASSERT(sfmmu_mlist_held(pp));
7573 7430
7574 7431 CPUSET_ZERO(cpuset);
7575 7432 SFMMU_STAT(sf_clrwrt);
7576 7433
7577 7434 retry:
7578 7435
7579 7436 sfmmu_copytte(&sfhme->hme_tte, &tte);
7580 7437 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7581 7438 hmeblkp = sfmmu_hmetohblk(sfhme);
7582 7439 sfmmup = hblktosfmmu(hmeblkp);
7583 7440 addr = tte_to_vaddr(hmeblkp, tte);
7584 7441
7585 7442 ttemod = tte;
7586 7443 TTE_CLR_WRT(&ttemod);
7587 7444 TTE_CLR_MOD(&ttemod);
7588 7445 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7589 7446
7590 7447 /*
7591 7448 * if cas failed and the new value is not what
7592 7449 * we want retry
7593 7450 */
7594 7451 if (ret < 0)
7595 7452 goto retry;
7596 7453
7597 7454 /* we win the cas */
7598 7455 if (ret > 0) {
7599 7456 if (hmeblkp->hblk_shared) {
7600 7457 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7601 7458 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7602 7459 sf_region_t *rgnp;
7603 7460 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7604 7461 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7605 7462 ASSERT(srdp != NULL);
7606 7463 rgnp = srdp->srd_hmergnp[rid];
7607 7464 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7608 7465 srdp, rgnp, rid);
7609 7466 cpuset = sfmmu_rgntlb_demap(addr,
7610 7467 rgnp, hmeblkp, 1);
7611 7468 } else {
7612 7469 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7613 7470 cpuset = sfmmup->sfmmu_cpusran;
7614 7471 }
7615 7472 }
7616 7473 }
7617 7474
7618 7475 return (cpuset);
7619 7476 }
7620 7477
7621 7478 /*
7622 7479 * Walk all mappings of a page, removing write permission and clearing the
7623 7480 * ref/mod bits. This code is similar to hat_pagesync()
7624 7481 */
7625 7482 static void
7626 7483 hat_page_clrwrt(page_t *pp)
7627 7484 {
7628 7485 struct sf_hment *sfhme;
7629 7486 struct sf_hment *tmphme = NULL;
7630 7487 kmutex_t *pml;
7631 7488 cpuset_t cpuset;
7632 7489 cpuset_t tset;
7633 7490 int index;
7634 7491 int cons;
7635 7492
7636 7493 CPUSET_ZERO(cpuset);
7637 7494
7638 7495 pml = sfmmu_mlist_enter(pp);
7639 7496 index = PP_MAPINDEX(pp);
7640 7497 cons = TTE8K;
7641 7498 retry:
7642 7499 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7643 7500 tmphme = sfhme->hme_next;
7644 7501
7645 7502 /*
7646 7503 * If we are looking for large mappings and this hme doesn't
7647 7504 * reach the range we are seeking, just ignore its.
7648 7505 */
7649 7506
7650 7507 if (hme_size(sfhme) < cons)
7651 7508 continue;
7652 7509
7653 7510 tset = sfmmu_pageclrwrt(pp, sfhme);
7654 7511 CPUSET_OR(cpuset, tset);
7655 7512 }
7656 7513
7657 7514 while (index) {
7658 7515 index = index >> 1;
7659 7516 cons++;
7660 7517 if (index & 0x1) {
7661 7518 /* Go to leading page */
7662 7519 pp = PP_GROUPLEADER(pp, cons);
7663 7520 goto retry;
7664 7521 }
7665 7522 }
7666 7523
7667 7524 xt_sync(cpuset);
7668 7525 sfmmu_mlist_exit(pml);
7669 7526 }
7670 7527
7671 7528 /*
7672 7529 * Set the given REF/MOD/RO bits for the given page.
7673 7530 * For a vnode with a sorted v_pages list, we need to change
7674 7531 * the attributes and the v_pages list together under page_vnode_mutex.
7675 7532 */
7676 7533 void
7677 7534 hat_page_setattr(page_t *pp, uint_t flag)
7678 7535 {
7679 7536 vnode_t *vp = pp->p_vnode;
7680 7537 page_t **listp;
7681 7538 kmutex_t *pmtx;
7682 7539 kmutex_t *vphm = NULL;
7683 7540 int noshuffle;
7684 7541
7685 7542 noshuffle = flag & P_NSH;
7686 7543 flag &= ~P_NSH;
7687 7544
7688 7545 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7689 7546
7690 7547 /*
7691 7548 * nothing to do if attribute already set
7692 7549 */
7693 7550 if ((pp->p_nrm & flag) == flag)
7694 7551 return;
7695 7552
7696 7553 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7697 7554 !noshuffle) {
7698 7555 vphm = page_vnode_mutex(vp);
7699 7556 mutex_enter(vphm);
7700 7557 }
7701 7558
7702 7559 pmtx = sfmmu_page_enter(pp);
7703 7560 pp->p_nrm |= flag;
7704 7561 sfmmu_page_exit(pmtx);
7705 7562
7706 7563 if (vphm != NULL) {
7707 7564 /*
7708 7565 * Some File Systems examine v_pages for NULL w/o
7709 7566 * grabbing the vphm mutex. Must not let it become NULL when
7710 7567 * pp is the only page on the list.
7711 7568 */
7712 7569 if (pp->p_vpnext != pp) {
7713 7570 page_vpsub(&vp->v_pages, pp);
7714 7571 if (vp->v_pages != NULL)
7715 7572 listp = &vp->v_pages->p_vpprev->p_vpnext;
7716 7573 else
7717 7574 listp = &vp->v_pages;
7718 7575 page_vpadd(listp, pp);
7719 7576 }
7720 7577 mutex_exit(vphm);
7721 7578 }
7722 7579 }
7723 7580
7724 7581 void
7725 7582 hat_page_clrattr(page_t *pp, uint_t flag)
7726 7583 {
7727 7584 vnode_t *vp = pp->p_vnode;
7728 7585 kmutex_t *pmtx;
7729 7586
7730 7587 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7731 7588
7732 7589 pmtx = sfmmu_page_enter(pp);
7733 7590
7734 7591 /*
7735 7592 * Caller is expected to hold page's io lock for VMODSORT to work
7736 7593 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7737 7594 * bit is cleared.
7738 7595 * We don't have assert to avoid tripping some existing third party
7739 7596 * code. The dirty page is moved back to top of the v_page list
7740 7597 * after IO is done in pvn_write_done().
7741 7598 */
7742 7599 pp->p_nrm &= ~flag;
7743 7600 sfmmu_page_exit(pmtx);
7744 7601
7745 7602 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7746 7603
7747 7604 /*
7748 7605 * VMODSORT works by removing write permissions and getting
7749 7606 * a fault when a page is made dirty. At this point
7750 7607 * we need to remove write permission from all mappings
7751 7608 * to this page.
7752 7609 */
7753 7610 hat_page_clrwrt(pp);
7754 7611 }
7755 7612 }
7756 7613
7757 7614 uint_t
7758 7615 hat_page_getattr(page_t *pp, uint_t flag)
7759 7616 {
7760 7617 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7761 7618 return ((uint_t)(pp->p_nrm & flag));
7762 7619 }
7763 7620
7764 7621 /*
7765 7622 * DEBUG kernels: verify that a kernel va<->pa translation
7766 7623 * is safe by checking the underlying page_t is in a page
7767 7624 * relocation-safe state.
7768 7625 */
7769 7626 #ifdef DEBUG
7770 7627 void
7771 7628 sfmmu_check_kpfn(pfn_t pfn)
7772 7629 {
7773 7630 page_t *pp;
7774 7631 int index, cons;
7775 7632
7776 7633 if (hat_check_vtop == 0)
7777 7634 return;
7778 7635
7779 7636 if (kvseg.s_base == NULL || panicstr)
7780 7637 return;
7781 7638
7782 7639 pp = page_numtopp_nolock(pfn);
7783 7640 if (!pp)
7784 7641 return;
7785 7642
7786 7643 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7787 7644 return;
7788 7645
7789 7646 /*
7790 7647 * Handed a large kernel page, we dig up the root page since we
7791 7648 * know the root page might have the lock also.
7792 7649 */
7793 7650 if (pp->p_szc != 0) {
7794 7651 index = PP_MAPINDEX(pp);
7795 7652 cons = TTE8K;
7796 7653 again:
7797 7654 while (index != 0) {
7798 7655 index >>= 1;
7799 7656 if (index != 0)
7800 7657 cons++;
7801 7658 if (index & 0x1) {
7802 7659 pp = PP_GROUPLEADER(pp, cons);
7803 7660 goto again;
7804 7661 }
7805 7662 }
7806 7663 }
7807 7664
7808 7665 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7809 7666 return;
7810 7667
7811 7668 /*
7812 7669 * Pages need to be locked or allocated "permanent" (either from
7813 7670 * static_arena arena or explicitly setting PG_NORELOC when calling
7814 7671 * page_create_va()) for VA->PA translations to be valid.
7815 7672 */
7816 7673 if (!PP_ISNORELOC(pp))
7817 7674 panic("Illegal VA->PA translation, pp 0x%p not permanent",
7818 7675 (void *)pp);
7819 7676 else
7820 7677 panic("Illegal VA->PA translation, pp 0x%p not locked",
7821 7678 (void *)pp);
7822 7679 }
7823 7680 #endif /* DEBUG */
7824 7681
7825 7682 /*
7826 7683 * Returns a page frame number for a given virtual address.
7827 7684 * Returns PFN_INVALID to indicate an invalid mapping
7828 7685 */
7829 7686 pfn_t
7830 7687 hat_getpfnum(struct hat *hat, caddr_t addr)
7831 7688 {
7832 7689 pfn_t pfn;
7833 7690 tte_t tte;
7834 7691
7835 7692 /*
7836 7693 * We would like to
7837 7694 * ASSERT(AS_LOCK_HELD(as));
7838 7695 * but we can't because the iommu driver will call this
7839 7696 * routine at interrupt time and it can't grab the as lock
7840 7697 * or it will deadlock: A thread could have the as lock
7841 7698 * and be waiting for io. The io can't complete
7842 7699 * because the interrupt thread is blocked trying to grab
7843 7700 * the as lock.
7844 7701 */
7845 7702
7846 7703 if (hat == ksfmmup) {
7847 7704 if (IS_KMEM_VA_LARGEPAGE(addr)) {
7848 7705 ASSERT(segkmem_lpszc > 0);
7849 7706 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7850 7707 if (pfn != PFN_INVALID) {
7851 7708 sfmmu_check_kpfn(pfn);
7852 7709 return (pfn);
7853 7710 }
7854 7711 } else if (segkpm && IS_KPM_ADDR(addr)) {
7855 7712 return (sfmmu_kpm_vatopfn(addr));
7856 7713 }
7857 7714 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7858 7715 == PFN_SUSPENDED) {
7859 7716 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7860 7717 }
7861 7718 sfmmu_check_kpfn(pfn);
7862 7719 return (pfn);
7863 7720 } else {
7864 7721 return (sfmmu_uvatopfn(addr, hat, NULL));
7865 7722 }
7866 7723 }
7867 7724
7868 7725 /*
7869 7726 * This routine will return both pfn and tte for the vaddr.
7870 7727 */
7871 7728 static pfn_t
7872 7729 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
7873 7730 {
7874 7731 struct hmehash_bucket *hmebp;
7875 7732 hmeblk_tag hblktag;
7876 7733 int hmeshift, hashno = 1;
7877 7734 struct hme_blk *hmeblkp = NULL;
7878 7735 tte_t tte;
7879 7736
7880 7737 struct sf_hment *sfhmep;
7881 7738 pfn_t pfn;
7882 7739
7883 7740 /* support for ISM */
7884 7741 ism_map_t *ism_map;
7885 7742 ism_blk_t *ism_blkp;
7886 7743 int i;
7887 7744 sfmmu_t *ism_hatid = NULL;
7888 7745 sfmmu_t *locked_hatid = NULL;
7889 7746 sfmmu_t *sv_sfmmup = sfmmup;
7890 7747 caddr_t sv_vaddr = vaddr;
7891 7748 sf_srd_t *srdp;
7892 7749
7893 7750 if (ttep == NULL) {
7894 7751 ttep = &tte;
7895 7752 } else {
7896 7753 ttep->ll = 0;
7897 7754 }
7898 7755
7899 7756 ASSERT(sfmmup != ksfmmup);
7900 7757 SFMMU_STAT(sf_user_vtop);
7901 7758 /*
7902 7759 * Set ism_hatid if vaddr falls in a ISM segment.
7903 7760 */
7904 7761 ism_blkp = sfmmup->sfmmu_iblk;
7905 7762 if (ism_blkp != NULL) {
7906 7763 sfmmu_ismhat_enter(sfmmup, 0);
7907 7764 locked_hatid = sfmmup;
7908 7765 }
7909 7766 while (ism_blkp != NULL && ism_hatid == NULL) {
7910 7767 ism_map = ism_blkp->iblk_maps;
7911 7768 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
7912 7769 if (vaddr >= ism_start(ism_map[i]) &&
7913 7770 vaddr < ism_end(ism_map[i])) {
7914 7771 sfmmup = ism_hatid = ism_map[i].imap_ismhat;
7915 7772 vaddr = (caddr_t)(vaddr -
7916 7773 ism_start(ism_map[i]));
7917 7774 break;
7918 7775 }
7919 7776 }
7920 7777 ism_blkp = ism_blkp->iblk_next;
7921 7778 }
7922 7779 if (locked_hatid) {
7923 7780 sfmmu_ismhat_exit(locked_hatid, 0);
7924 7781 }
7925 7782
7926 7783 hblktag.htag_id = sfmmup;
7927 7784 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
7928 7785 do {
7929 7786 hmeshift = HME_HASH_SHIFT(hashno);
7930 7787 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
7931 7788 hblktag.htag_rehash = hashno;
7932 7789 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
7933 7790
7934 7791 SFMMU_HASH_LOCK(hmebp);
7935 7792
7936 7793 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7937 7794 if (hmeblkp != NULL) {
7938 7795 ASSERT(!hmeblkp->hblk_shared);
7939 7796 HBLKTOHME(sfhmep, hmeblkp, vaddr);
7940 7797 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7941 7798 SFMMU_HASH_UNLOCK(hmebp);
7942 7799 if (TTE_IS_VALID(ttep)) {
7943 7800 pfn = TTE_TO_PFN(vaddr, ttep);
7944 7801 return (pfn);
7945 7802 }
7946 7803 break;
7947 7804 }
7948 7805 SFMMU_HASH_UNLOCK(hmebp);
7949 7806 hashno++;
7950 7807 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
7951 7808
7952 7809 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
7953 7810 return (PFN_INVALID);
7954 7811 }
7955 7812 srdp = sv_sfmmup->sfmmu_srdp;
7956 7813 ASSERT(srdp != NULL);
7957 7814 ASSERT(srdp->srd_refcnt != 0);
7958 7815 hblktag.htag_id = srdp;
7959 7816 hashno = 1;
7960 7817 do {
7961 7818 hmeshift = HME_HASH_SHIFT(hashno);
7962 7819 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
7963 7820 hblktag.htag_rehash = hashno;
7964 7821 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
7965 7822
7966 7823 SFMMU_HASH_LOCK(hmebp);
7967 7824 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
7968 7825 hmeblkp = hmeblkp->hblk_next) {
7969 7826 uint_t rid;
7970 7827 sf_region_t *rgnp;
7971 7828 caddr_t rsaddr;
7972 7829 caddr_t readdr;
7973 7830
7974 7831 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
7975 7832 sv_sfmmup->sfmmu_hmeregion_map)) {
7976 7833 continue;
7977 7834 }
7978 7835 ASSERT(hmeblkp->hblk_shared);
7979 7836 rid = hmeblkp->hblk_tag.htag_rid;
7980 7837 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7981 7838 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7982 7839 rgnp = srdp->srd_hmergnp[rid];
7983 7840 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7984 7841 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
7985 7842 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7986 7843 rsaddr = rgnp->rgn_saddr;
7987 7844 readdr = rsaddr + rgnp->rgn_size;
7988 7845 #ifdef DEBUG
7989 7846 if (TTE_IS_VALID(ttep) ||
7990 7847 get_hblk_ttesz(hmeblkp) > TTE8K) {
7991 7848 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
7992 7849 ASSERT(eva > sv_vaddr);
7993 7850 ASSERT(sv_vaddr >= rsaddr);
7994 7851 ASSERT(sv_vaddr < readdr);
7995 7852 ASSERT(eva <= readdr);
7996 7853 }
7997 7854 #endif /* DEBUG */
7998 7855 /*
7999 7856 * Continue the search if we
8000 7857 * found an invalid 8K tte outside of the area
8001 7858 * covered by this hmeblk's region.
8002 7859 */
8003 7860 if (TTE_IS_VALID(ttep)) {
8004 7861 SFMMU_HASH_UNLOCK(hmebp);
8005 7862 pfn = TTE_TO_PFN(sv_vaddr, ttep);
8006 7863 return (pfn);
8007 7864 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8008 7865 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8009 7866 SFMMU_HASH_UNLOCK(hmebp);
8010 7867 pfn = PFN_INVALID;
8011 7868 return (pfn);
8012 7869 }
8013 7870 }
8014 7871 SFMMU_HASH_UNLOCK(hmebp);
8015 7872 hashno++;
8016 7873 } while (hashno <= mmu_hashcnt);
8017 7874 return (PFN_INVALID);
8018 7875 }
8019 7876
8020 7877
8021 7878 /*
8022 7879 * For compatability with AT&T and later optimizations
8023 7880 */
8024 7881 /* ARGSUSED */
8025 7882 void
8026 7883 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8027 7884 {
8028 7885 ASSERT(hat != NULL);
8029 7886 }
8030 7887
8031 7888 /*
8032 7889 * Return the number of mappings to a particular page. This number is an
8033 7890 * approximation of the number of people sharing the page.
8034 7891 *
8035 7892 * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8036 7893 * hat_page_checkshare() can be used to compare threshold to share
8037 7894 * count that reflects the number of region sharers albeit at higher cost.
8038 7895 */
8039 7896 ulong_t
8040 7897 hat_page_getshare(page_t *pp)
8041 7898 {
8042 7899 page_t *spp = pp; /* start page */
8043 7900 kmutex_t *pml;
8044 7901 ulong_t cnt;
8045 7902 int index, sz = TTE64K;
8046 7903
8047 7904 /*
8048 7905 * We need to grab the mlist lock to make sure any outstanding
8049 7906 * load/unloads complete. Otherwise we could return zero
8050 7907 * even though the unload(s) hasn't finished yet.
8051 7908 */
8052 7909 pml = sfmmu_mlist_enter(spp);
8053 7910 cnt = spp->p_share;
8054 7911
8055 7912 #ifdef VAC
8056 7913 if (kpm_enable)
8057 7914 cnt += spp->p_kpmref;
8058 7915 #endif
8059 7916 if (vpm_enable && pp->p_vpmref) {
8060 7917 cnt += 1;
8061 7918 }
8062 7919
8063 7920 /*
8064 7921 * If we have any large mappings, we count the number of
8065 7922 * mappings that this large page is part of.
8066 7923 */
8067 7924 index = PP_MAPINDEX(spp);
8068 7925 index >>= 1;
8069 7926 while (index) {
8070 7927 pp = PP_GROUPLEADER(spp, sz);
8071 7928 if ((index & 0x1) && pp != spp) {
8072 7929 cnt += pp->p_share;
8073 7930 spp = pp;
8074 7931 }
8075 7932 index >>= 1;
8076 7933 sz++;
8077 7934 }
8078 7935 sfmmu_mlist_exit(pml);
8079 7936 return (cnt);
8080 7937 }
8081 7938
8082 7939 /*
8083 7940 * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8084 7941 * otherwise. Count shared hmeblks by region's refcnt.
8085 7942 */
8086 7943 int
8087 7944 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8088 7945 {
8089 7946 kmutex_t *pml;
8090 7947 ulong_t cnt = 0;
8091 7948 int index, sz = TTE8K;
8092 7949 struct sf_hment *sfhme, *tmphme = NULL;
8093 7950 struct hme_blk *hmeblkp;
8094 7951
8095 7952 pml = sfmmu_mlist_enter(pp);
8096 7953
8097 7954 #ifdef VAC
8098 7955 if (kpm_enable)
8099 7956 cnt = pp->p_kpmref;
8100 7957 #endif
8101 7958
8102 7959 if (vpm_enable && pp->p_vpmref) {
8103 7960 cnt += 1;
8104 7961 }
8105 7962
8106 7963 if (pp->p_share + cnt > sh_thresh) {
8107 7964 sfmmu_mlist_exit(pml);
8108 7965 return (1);
8109 7966 }
8110 7967
8111 7968 index = PP_MAPINDEX(pp);
8112 7969
8113 7970 again:
8114 7971 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8115 7972 tmphme = sfhme->hme_next;
8116 7973 if (IS_PAHME(sfhme)) {
8117 7974 continue;
8118 7975 }
8119 7976
8120 7977 hmeblkp = sfmmu_hmetohblk(sfhme);
8121 7978 if (hme_size(sfhme) != sz) {
8122 7979 continue;
8123 7980 }
8124 7981
8125 7982 if (hmeblkp->hblk_shared) {
8126 7983 sf_srd_t *srdp = hblktosrd(hmeblkp);
8127 7984 uint_t rid = hmeblkp->hblk_tag.htag_rid;
8128 7985 sf_region_t *rgnp;
8129 7986 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8130 7987 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8131 7988 ASSERT(srdp != NULL);
8132 7989 rgnp = srdp->srd_hmergnp[rid];
8133 7990 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8134 7991 rgnp, rid);
8135 7992 cnt += rgnp->rgn_refcnt;
8136 7993 } else {
8137 7994 cnt++;
8138 7995 }
8139 7996 if (cnt > sh_thresh) {
8140 7997 sfmmu_mlist_exit(pml);
8141 7998 return (1);
8142 7999 }
8143 8000 }
8144 8001
8145 8002 index >>= 1;
8146 8003 sz++;
8147 8004 while (index) {
8148 8005 pp = PP_GROUPLEADER(pp, sz);
8149 8006 ASSERT(sfmmu_mlist_held(pp));
8150 8007 if (index & 0x1) {
8151 8008 goto again;
8152 8009 }
8153 8010 index >>= 1;
8154 8011 sz++;
8155 8012 }
8156 8013 sfmmu_mlist_exit(pml);
8157 8014 return (0);
8158 8015 }
8159 8016
8160 8017 /*
8161 8018 * Unload all large mappings to the pp and reset the p_szc field of every
8162 8019 * constituent page according to the remaining mappings.
8163 8020 *
8164 8021 * pp must be locked SE_EXCL. Even though no other constituent pages are
8165 8022 * locked it's legal to unload the large mappings to the pp because all
8166 8023 * constituent pages of large locked mappings have to be locked SE_SHARED.
8167 8024 * This means if we have SE_EXCL lock on one of constituent pages none of the
8168 8025 * large mappings to pp are locked.
8169 8026 *
8170 8027 * Decrease p_szc field starting from the last constituent page and ending
8171 8028 * with the root page. This method is used because other threads rely on the
8172 8029 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8173 8030 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8174 8031 * ensures that p_szc changes of the constituent pages appears atomic for all
8175 8032 * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8176 8033 *
8177 8034 * This mechanism is only used for file system pages where it's not always
8178 8035 * possible to get SE_EXCL locks on all constituent pages to demote the size
8179 8036 * code (as is done for anonymous or kernel large pages).
8180 8037 *
8181 8038 * See more comments in front of sfmmu_mlspl_enter().
8182 8039 */
8183 8040 void
8184 8041 hat_page_demote(page_t *pp)
8185 8042 {
8186 8043 int index;
8187 8044 int sz;
8188 8045 cpuset_t cpuset;
8189 8046 int sync = 0;
8190 8047 page_t *rootpp;
8191 8048 struct sf_hment *sfhme;
8192 8049 struct sf_hment *tmphme = NULL;
8193 8050 struct hme_blk *hmeblkp;
8194 8051 uint_t pszc;
8195 8052 page_t *lastpp;
8196 8053 cpuset_t tset;
8197 8054 pgcnt_t npgs;
8198 8055 kmutex_t *pml;
8199 8056 kmutex_t *pmtx = NULL;
8200 8057
8201 8058 ASSERT(PAGE_EXCL(pp));
8202 8059 ASSERT(!PP_ISFREE(pp));
8203 8060 ASSERT(!PP_ISKAS(pp));
8204 8061 ASSERT(page_szc_lock_assert(pp));
8205 8062 pml = sfmmu_mlist_enter(pp);
8206 8063
8207 8064 pszc = pp->p_szc;
8208 8065 if (pszc == 0) {
8209 8066 goto out;
8210 8067 }
8211 8068
8212 8069 index = PP_MAPINDEX(pp) >> 1;
8213 8070
8214 8071 if (index) {
8215 8072 CPUSET_ZERO(cpuset);
8216 8073 sz = TTE64K;
8217 8074 sync = 1;
8218 8075 }
8219 8076
8220 8077 while (index) {
8221 8078 if (!(index & 0x1)) {
8222 8079 index >>= 1;
8223 8080 sz++;
8224 8081 continue;
8225 8082 }
8226 8083 ASSERT(sz <= pszc);
8227 8084 rootpp = PP_GROUPLEADER(pp, sz);
8228 8085 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8229 8086 tmphme = sfhme->hme_next;
8230 8087 ASSERT(!IS_PAHME(sfhme));
8231 8088 hmeblkp = sfmmu_hmetohblk(sfhme);
8232 8089 if (hme_size(sfhme) != sz) {
8233 8090 continue;
8234 8091 }
8235 8092 tset = sfmmu_pageunload(rootpp, sfhme, sz);
8236 8093 CPUSET_OR(cpuset, tset);
8237 8094 }
8238 8095 if (index >>= 1) {
8239 8096 sz++;
8240 8097 }
8241 8098 }
8242 8099
8243 8100 ASSERT(!PP_ISMAPPED_LARGE(pp));
8244 8101
8245 8102 if (sync) {
8246 8103 xt_sync(cpuset);
8247 8104 #ifdef VAC
8248 8105 if (PP_ISTNC(pp)) {
8249 8106 conv_tnc(rootpp, sz);
8250 8107 }
8251 8108 #endif /* VAC */
8252 8109 }
8253 8110
8254 8111 pmtx = sfmmu_page_enter(pp);
8255 8112
8256 8113 ASSERT(pp->p_szc == pszc);
8257 8114 rootpp = PP_PAGEROOT(pp);
8258 8115 ASSERT(rootpp->p_szc == pszc);
8259 8116 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8260 8117
8261 8118 while (lastpp != rootpp) {
8262 8119 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8263 8120 ASSERT(sz < pszc);
8264 8121 npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8265 8122 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8266 8123 while (--npgs > 0) {
8267 8124 lastpp->p_szc = (uchar_t)sz;
8268 8125 lastpp = PP_PAGEPREV(lastpp);
8269 8126 }
8270 8127 if (sz) {
8271 8128 /*
8272 8129 * make sure before current root's pszc
8273 8130 * is updated all updates to constituent pages pszc
8274 8131 * fields are globally visible.
8275 8132 */
8276 8133 membar_producer();
8277 8134 }
8278 8135 lastpp->p_szc = sz;
8279 8136 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8280 8137 if (lastpp != rootpp) {
8281 8138 lastpp = PP_PAGEPREV(lastpp);
8282 8139 }
8283 8140 }
8284 8141 if (sz == 0) {
8285 8142 /* the loop above doesn't cover this case */
8286 8143 rootpp->p_szc = 0;
8287 8144 }
8288 8145 out:
8289 8146 ASSERT(pp->p_szc == 0);
8290 8147 if (pmtx != NULL) {
8291 8148 sfmmu_page_exit(pmtx);
8292 8149 }
8293 8150 sfmmu_mlist_exit(pml);
8294 8151 }
8295 8152
8296 8153 /*
8297 8154 * Refresh the HAT ismttecnt[] element for size szc.
8298 8155 * Caller must have set ISM busy flag to prevent mapping
8299 8156 * lists from changing while we're traversing them.
8300 8157 */
8301 8158 pgcnt_t
8302 8159 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8303 8160 {
8304 8161 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk;
8305 8162 ism_map_t *ism_map;
8306 8163 pgcnt_t npgs = 0;
8307 8164 pgcnt_t npgs_scd = 0;
8308 8165 int j;
8309 8166 sf_scd_t *scdp;
8310 8167 uchar_t rid;
8311 8168
8312 8169 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8313 8170 scdp = sfmmup->sfmmu_scdp;
8314 8171
8315 8172 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8316 8173 ism_map = ism_blkp->iblk_maps;
8317 8174 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8318 8175 rid = ism_map[j].imap_rid;
8319 8176 ASSERT(rid == SFMMU_INVALID_ISMRID ||
8320 8177 rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8321 8178
8322 8179 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8323 8180 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8324 8181 /* ISM is in sfmmup's SCD */
8325 8182 npgs_scd +=
8326 8183 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8327 8184 } else {
8328 8185 /* ISMs is not in SCD */
8329 8186 npgs +=
8330 8187 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8331 8188 }
8332 8189 }
8333 8190 }
8334 8191 sfmmup->sfmmu_ismttecnt[szc] = npgs;
8335 8192 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8336 8193 return (npgs);
8337 8194 }
8338 8195
8339 8196 /*
8340 8197 * Yield the memory claim requirement for an address space.
8341 8198 *
8342 8199 * This is currently implemented as the number of bytes that have active
8343 8200 * hardware translations that have page structures. Therefore, it can
8344 8201 * underestimate the traditional resident set size, eg, if the
8345 8202 * physical page is present and the hardware translation is missing;
8346 8203 * and it can overestimate the rss, eg, if there are active
8347 8204 * translations to a frame buffer with page structs.
8348 8205 * Also, it does not take sharing into account.
8349 8206 *
8350 8207 * Note that we don't acquire locks here since this function is most often
8351 8208 * called from the clock thread.
8352 8209 */
8353 8210 size_t
8354 8211 hat_get_mapped_size(struct hat *hat)
8355 8212 {
8356 8213 size_t assize = 0;
8357 8214 int i;
8358 8215
8359 8216 if (hat == NULL)
8360 8217 return (0);
8361 8218
8362 8219 for (i = 0; i < mmu_page_sizes; i++)
8363 8220 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8364 8221 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8365 8222
8366 8223 if (hat->sfmmu_iblk == NULL)
8367 8224 return (assize);
8368 8225
8369 8226 for (i = 0; i < mmu_page_sizes; i++)
8370 8227 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8371 8228 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8372 8229
8373 8230 return (assize);
8374 8231 }
8375 8232
8376 8233 int
8377 8234 hat_stats_enable(struct hat *hat)
8378 8235 {
8379 8236 hatlock_t *hatlockp;
8380 8237
8381 8238 hatlockp = sfmmu_hat_enter(hat);
8382 8239 hat->sfmmu_rmstat++;
8383 8240 sfmmu_hat_exit(hatlockp);
8384 8241 return (1);
8385 8242 }
8386 8243
8387 8244 void
8388 8245 hat_stats_disable(struct hat *hat)
8389 8246 {
8390 8247 hatlock_t *hatlockp;
8391 8248
8392 8249 hatlockp = sfmmu_hat_enter(hat);
8393 8250 hat->sfmmu_rmstat--;
8394 8251 sfmmu_hat_exit(hatlockp);
8395 8252 }
8396 8253
8397 8254 /*
8398 8255 * Routines for entering or removing ourselves from the
8399 8256 * ism_hat's mapping list. This is used for both private and
8400 8257 * SCD hats.
8401 8258 */
8402 8259 static void
8403 8260 iment_add(struct ism_ment *iment, struct hat *ism_hat)
8404 8261 {
8405 8262 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8406 8263
8407 8264 iment->iment_prev = NULL;
8408 8265 iment->iment_next = ism_hat->sfmmu_iment;
8409 8266 if (ism_hat->sfmmu_iment) {
8410 8267 ism_hat->sfmmu_iment->iment_prev = iment;
8411 8268 }
8412 8269 ism_hat->sfmmu_iment = iment;
8413 8270 }
8414 8271
8415 8272 static void
8416 8273 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8417 8274 {
8418 8275 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8419 8276
8420 8277 if (ism_hat->sfmmu_iment == NULL) {
8421 8278 panic("ism map entry remove - no entries");
8422 8279 }
8423 8280
8424 8281 if (iment->iment_prev) {
8425 8282 ASSERT(ism_hat->sfmmu_iment != iment);
8426 8283 iment->iment_prev->iment_next = iment->iment_next;
8427 8284 } else {
8428 8285 ASSERT(ism_hat->sfmmu_iment == iment);
8429 8286 ism_hat->sfmmu_iment = iment->iment_next;
8430 8287 }
8431 8288
8432 8289 if (iment->iment_next) {
8433 8290 iment->iment_next->iment_prev = iment->iment_prev;
8434 8291 }
8435 8292
8436 8293 /*
8437 8294 * zero out the entry
8438 8295 */
8439 8296 iment->iment_next = NULL;
8440 8297 iment->iment_prev = NULL;
8441 8298 iment->iment_hat = NULL;
8442 8299 iment->iment_base_va = 0;
8443 8300 }
8444 8301
8445 8302 /*
8446 8303 * Hat_share()/unshare() return an (non-zero) error
8447 8304 * when saddr and daddr are not properly aligned.
8448 8305 *
8449 8306 * The top level mapping element determines the alignment
8450 8307 * requirement for saddr and daddr, depending on different
8451 8308 * architectures.
8452 8309 *
8453 8310 * When hat_share()/unshare() are not supported,
8454 8311 * HATOP_SHARE()/UNSHARE() return 0
8455 8312 */
8456 8313 int
8457 8314 hat_share(struct hat *sfmmup, caddr_t addr,
8458 8315 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8459 8316 {
8460 8317 ism_blk_t *ism_blkp;
8461 8318 ism_blk_t *new_iblk;
8462 8319 ism_map_t *ism_map;
8463 8320 ism_ment_t *ism_ment;
8464 8321 int i, added;
8465 8322 hatlock_t *hatlockp;
8466 8323 int reload_mmu = 0;
8467 8324 uint_t ismshift = page_get_shift(ismszc);
8468 8325 size_t ismpgsz = page_get_pagesize(ismszc);
8469 8326 uint_t ismmask = (uint_t)ismpgsz - 1;
8470 8327 size_t sh_size = ISM_SHIFT(ismshift, len);
8471 8328 ushort_t ismhatflag;
8472 8329 hat_region_cookie_t rcookie;
8473 8330 sf_scd_t *old_scdp;
8474 8331
8475 8332 #ifdef DEBUG
8476 8333 caddr_t eaddr = addr + len;
8477 8334 #endif /* DEBUG */
8478 8335
8479 8336 ASSERT(ism_hatid != NULL && sfmmup != NULL);
8480 8337 ASSERT(sptaddr == ISMID_STARTADDR);
8481 8338 /*
8482 8339 * Check the alignment.
8483 8340 */
8484 8341 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8485 8342 return (EINVAL);
8486 8343
8487 8344 /*
8488 8345 * Check size alignment.
8489 8346 */
8490 8347 if (!ISM_ALIGNED(ismshift, len))
8491 8348 return (EINVAL);
8492 8349
8493 8350 /*
8494 8351 * Allocate ism_ment for the ism_hat's mapping list, and an
8495 8352 * ism map blk in case we need one. We must do our
8496 8353 * allocations before acquiring locks to prevent a deadlock
8497 8354 * in the kmem allocator on the mapping list lock.
8498 8355 */
8499 8356 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8500 8357 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8501 8358
8502 8359 /*
8503 8360 * Serialize ISM mappings with the ISM busy flag, and also the
8504 8361 * trap handlers.
8505 8362 */
8506 8363 sfmmu_ismhat_enter(sfmmup, 0);
8507 8364
8508 8365 /*
8509 8366 * Allocate an ism map blk if necessary.
8510 8367 */
8511 8368 if (sfmmup->sfmmu_iblk == NULL) {
8512 8369 sfmmup->sfmmu_iblk = new_iblk;
8513 8370 bzero(new_iblk, sizeof (*new_iblk));
8514 8371 new_iblk->iblk_nextpa = (uint64_t)-1;
8515 8372 membar_stst(); /* make sure next ptr visible to all CPUs */
8516 8373 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8517 8374 reload_mmu = 1;
8518 8375 new_iblk = NULL;
8519 8376 }
8520 8377
8521 8378 #ifdef DEBUG
8522 8379 /*
8523 8380 * Make sure mapping does not already exist.
8524 8381 */
8525 8382 ism_blkp = sfmmup->sfmmu_iblk;
8526 8383 while (ism_blkp != NULL) {
8527 8384 ism_map = ism_blkp->iblk_maps;
8528 8385 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8529 8386 if ((addr >= ism_start(ism_map[i]) &&
8530 8387 addr < ism_end(ism_map[i])) ||
8531 8388 eaddr > ism_start(ism_map[i]) &&
8532 8389 eaddr <= ism_end(ism_map[i])) {
8533 8390 panic("sfmmu_share: Already mapped!");
8534 8391 }
8535 8392 }
8536 8393 ism_blkp = ism_blkp->iblk_next;
8537 8394 }
8538 8395 #endif /* DEBUG */
8539 8396
8540 8397 ASSERT(ismszc >= TTE4M);
8541 8398 if (ismszc == TTE4M) {
8542 8399 ismhatflag = HAT_4M_FLAG;
8543 8400 } else if (ismszc == TTE32M) {
8544 8401 ismhatflag = HAT_32M_FLAG;
8545 8402 } else if (ismszc == TTE256M) {
8546 8403 ismhatflag = HAT_256M_FLAG;
8547 8404 }
8548 8405 /*
8549 8406 * Add mapping to first available mapping slot.
8550 8407 */
8551 8408 ism_blkp = sfmmup->sfmmu_iblk;
8552 8409 added = 0;
8553 8410 while (!added) {
8554 8411 ism_map = ism_blkp->iblk_maps;
8555 8412 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8556 8413 if (ism_map[i].imap_ismhat == NULL) {
8557 8414
8558 8415 ism_map[i].imap_ismhat = ism_hatid;
8559 8416 ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8560 8417 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8561 8418 ism_map[i].imap_hatflags = ismhatflag;
8562 8419 ism_map[i].imap_sz_mask = ismmask;
8563 8420 /*
8564 8421 * imap_seg is checked in ISM_CHECK to see if
8565 8422 * non-NULL, then other info assumed valid.
8566 8423 */
8567 8424 membar_stst();
8568 8425 ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8569 8426 ism_map[i].imap_ment = ism_ment;
8570 8427
8571 8428 /*
8572 8429 * Now add ourselves to the ism_hat's
8573 8430 * mapping list.
8574 8431 */
8575 8432 ism_ment->iment_hat = sfmmup;
8576 8433 ism_ment->iment_base_va = addr;
8577 8434 ism_hatid->sfmmu_ismhat = 1;
8578 8435 mutex_enter(&ism_mlist_lock);
8579 8436 iment_add(ism_ment, ism_hatid);
8580 8437 mutex_exit(&ism_mlist_lock);
8581 8438 added = 1;
8582 8439 break;
8583 8440 }
8584 8441 }
8585 8442 if (!added && ism_blkp->iblk_next == NULL) {
8586 8443 ism_blkp->iblk_next = new_iblk;
8587 8444 new_iblk = NULL;
8588 8445 bzero(ism_blkp->iblk_next,
8589 8446 sizeof (*ism_blkp->iblk_next));
8590 8447 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8591 8448 membar_stst();
8592 8449 ism_blkp->iblk_nextpa =
8593 8450 va_to_pa((caddr_t)ism_blkp->iblk_next);
8594 8451 }
8595 8452 ism_blkp = ism_blkp->iblk_next;
8596 8453 }
8597 8454
8598 8455 /*
8599 8456 * After calling hat_join_region, sfmmup may join a new SCD or
8600 8457 * move from the old scd to a new scd, in which case, we want to
8601 8458 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8602 8459 * sfmmu_check_page_sizes at the end of this routine.
8603 8460 */
8604 8461 old_scdp = sfmmup->sfmmu_scdp;
8605 8462
8606 8463 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8607 8464 PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8608 8465 if (rcookie != HAT_INVALID_REGION_COOKIE) {
8609 8466 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8610 8467 }
8611 8468 /*
8612 8469 * Update our counters for this sfmmup's ism mappings.
8613 8470 */
8614 8471 for (i = 0; i <= ismszc; i++) {
8615 8472 if (!(disable_ism_large_pages & (1 << i)))
8616 8473 (void) ism_tsb_entries(sfmmup, i);
8617 8474 }
8618 8475
8619 8476 /*
8620 8477 * For ISM and DISM we do not support 512K pages, so we only only
8621 8478 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8622 8479 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8623 8480 *
8624 8481 * Need to set 32M/256M ISM flags to make sure
8625 8482 * sfmmu_check_page_sizes() enables them on Panther.
8626 8483 */
8627 8484 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8628 8485
8629 8486 switch (ismszc) {
8630 8487 case TTE256M:
8631 8488 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8632 8489 hatlockp = sfmmu_hat_enter(sfmmup);
8633 8490 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8634 8491 sfmmu_hat_exit(hatlockp);
8635 8492 }
8636 8493 break;
8637 8494 case TTE32M:
8638 8495 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8639 8496 hatlockp = sfmmu_hat_enter(sfmmup);
8640 8497 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8641 8498 sfmmu_hat_exit(hatlockp);
8642 8499 }
8643 8500 break;
8644 8501 default:
8645 8502 break;
8646 8503 }
8647 8504
8648 8505 /*
8649 8506 * If we updated the ismblkpa for this HAT we must make
8650 8507 * sure all CPUs running this process reload their tsbmiss area.
8651 8508 * Otherwise they will fail to load the mappings in the tsbmiss
8652 8509 * handler and will loop calling pagefault().
8653 8510 */
8654 8511 if (reload_mmu) {
8655 8512 hatlockp = sfmmu_hat_enter(sfmmup);
8656 8513 sfmmu_sync_mmustate(sfmmup);
8657 8514 sfmmu_hat_exit(hatlockp);
8658 8515 }
8659 8516
8660 8517 sfmmu_ismhat_exit(sfmmup, 0);
8661 8518
8662 8519 /*
8663 8520 * Free up ismblk if we didn't use it.
8664 8521 */
8665 8522 if (new_iblk != NULL)
8666 8523 kmem_cache_free(ism_blk_cache, new_iblk);
8667 8524
8668 8525 /*
8669 8526 * Check TSB and TLB page sizes.
8670 8527 */
8671 8528 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8672 8529 sfmmu_check_page_sizes(sfmmup, 0);
8673 8530 } else {
8674 8531 sfmmu_check_page_sizes(sfmmup, 1);
8675 8532 }
8676 8533 return (0);
8677 8534 }
8678 8535
8679 8536 /*
8680 8537 * hat_unshare removes exactly one ism_map from
8681 8538 * this process's as. It expects multiple calls
8682 8539 * to hat_unshare for multiple shm segments.
8683 8540 */
8684 8541 void
8685 8542 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8686 8543 {
8687 8544 ism_map_t *ism_map;
8688 8545 ism_ment_t *free_ment = NULL;
8689 8546 ism_blk_t *ism_blkp;
8690 8547 struct hat *ism_hatid;
8691 8548 int found, i;
8692 8549 hatlock_t *hatlockp;
8693 8550 struct tsb_info *tsbinfo;
8694 8551 uint_t ismshift = page_get_shift(ismszc);
8695 8552 size_t sh_size = ISM_SHIFT(ismshift, len);
8696 8553 uchar_t ism_rid;
8697 8554 sf_scd_t *old_scdp;
8698 8555
8699 8556 ASSERT(ISM_ALIGNED(ismshift, addr));
8700 8557 ASSERT(ISM_ALIGNED(ismshift, len));
8701 8558 ASSERT(sfmmup != NULL);
8702 8559 ASSERT(sfmmup != ksfmmup);
8703 8560
8704 8561 ASSERT(sfmmup->sfmmu_as != NULL);
8705 8562
8706 8563 /*
8707 8564 * Make sure that during the entire time ISM mappings are removed,
8708 8565 * the trap handlers serialize behind us, and that no one else
8709 8566 * can be mucking with ISM mappings. This also lets us get away
8710 8567 * with not doing expensive cross calls to flush the TLB -- we
8711 8568 * just discard the context, flush the entire TSB, and call it
8712 8569 * a day.
8713 8570 */
8714 8571 sfmmu_ismhat_enter(sfmmup, 0);
8715 8572
8716 8573 /*
8717 8574 * Remove the mapping.
8718 8575 *
8719 8576 * We can't have any holes in the ism map.
8720 8577 * The tsb miss code while searching the ism map will
8721 8578 * stop on an empty map slot. So we must move
8722 8579 * everyone past the hole up 1 if any.
8723 8580 *
8724 8581 * Also empty ism map blks are not freed until the
8725 8582 * process exits. This is to prevent a MT race condition
8726 8583 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8727 8584 */
8728 8585 found = 0;
8729 8586 ism_blkp = sfmmup->sfmmu_iblk;
8730 8587 while (!found && ism_blkp != NULL) {
8731 8588 ism_map = ism_blkp->iblk_maps;
8732 8589 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8733 8590 if (addr == ism_start(ism_map[i]) &&
8734 8591 sh_size == (size_t)(ism_size(ism_map[i]))) {
8735 8592 found = 1;
8736 8593 break;
8737 8594 }
8738 8595 }
8739 8596 if (!found)
8740 8597 ism_blkp = ism_blkp->iblk_next;
8741 8598 }
8742 8599
8743 8600 if (found) {
8744 8601 ism_hatid = ism_map[i].imap_ismhat;
8745 8602 ism_rid = ism_map[i].imap_rid;
8746 8603 ASSERT(ism_hatid != NULL);
8747 8604 ASSERT(ism_hatid->sfmmu_ismhat == 1);
8748 8605
8749 8606 /*
8750 8607 * After hat_leave_region, the sfmmup may leave SCD,
8751 8608 * in which case, we want to grow the private tsb size when
8752 8609 * calling sfmmu_check_page_sizes at the end of the routine.
8753 8610 */
8754 8611 old_scdp = sfmmup->sfmmu_scdp;
8755 8612 /*
8756 8613 * Then remove ourselves from the region.
8757 8614 */
8758 8615 if (ism_rid != SFMMU_INVALID_ISMRID) {
8759 8616 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8760 8617 HAT_REGION_ISM);
8761 8618 }
8762 8619
8763 8620 /*
8764 8621 * And now guarantee that any other cpu
8765 8622 * that tries to process an ISM miss
8766 8623 * will go to tl=0.
8767 8624 */
8768 8625 hatlockp = sfmmu_hat_enter(sfmmup);
8769 8626 sfmmu_invalidate_ctx(sfmmup);
8770 8627 sfmmu_hat_exit(hatlockp);
8771 8628
8772 8629 /*
8773 8630 * Remove ourselves from the ism mapping list.
8774 8631 */
8775 8632 mutex_enter(&ism_mlist_lock);
8776 8633 iment_sub(ism_map[i].imap_ment, ism_hatid);
8777 8634 mutex_exit(&ism_mlist_lock);
8778 8635 free_ment = ism_map[i].imap_ment;
8779 8636
8780 8637 /*
8781 8638 * We delete the ism map by copying
8782 8639 * the next map over the current one.
8783 8640 * We will take the next one in the maps
8784 8641 * array or from the next ism_blk.
8785 8642 */
8786 8643 while (ism_blkp != NULL) {
8787 8644 ism_map = ism_blkp->iblk_maps;
8788 8645 while (i < (ISM_MAP_SLOTS - 1)) {
8789 8646 ism_map[i] = ism_map[i + 1];
8790 8647 i++;
8791 8648 }
8792 8649 /* i == (ISM_MAP_SLOTS - 1) */
8793 8650 ism_blkp = ism_blkp->iblk_next;
8794 8651 if (ism_blkp != NULL) {
8795 8652 ism_map[i] = ism_blkp->iblk_maps[0];
8796 8653 i = 0;
8797 8654 } else {
8798 8655 ism_map[i].imap_seg = 0;
8799 8656 ism_map[i].imap_vb_shift = 0;
8800 8657 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8801 8658 ism_map[i].imap_hatflags = 0;
8802 8659 ism_map[i].imap_sz_mask = 0;
8803 8660 ism_map[i].imap_ismhat = NULL;
8804 8661 ism_map[i].imap_ment = NULL;
8805 8662 }
8806 8663 }
8807 8664
8808 8665 /*
8809 8666 * Now flush entire TSB for the process, since
8810 8667 * demapping page by page can be too expensive.
8811 8668 * We don't have to flush the TLB here anymore
8812 8669 * since we switch to a new TLB ctx instead.
8813 8670 * Also, there is no need to flush if the process
8814 8671 * is exiting since the TSB will be freed later.
8815 8672 */
8816 8673 if (!sfmmup->sfmmu_free) {
8817 8674 hatlockp = sfmmu_hat_enter(sfmmup);
8818 8675 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8819 8676 tsbinfo = tsbinfo->tsb_next) {
8820 8677 if (tsbinfo->tsb_flags & TSB_SWAPPED)
8821 8678 continue;
8822 8679 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
8823 8680 tsbinfo->tsb_flags |=
8824 8681 TSB_FLUSH_NEEDED;
8825 8682 continue;
8826 8683 }
8827 8684
8828 8685 sfmmu_inv_tsb(tsbinfo->tsb_va,
8829 8686 TSB_BYTES(tsbinfo->tsb_szc));
8830 8687 }
8831 8688 sfmmu_hat_exit(hatlockp);
8832 8689 }
8833 8690 }
8834 8691
8835 8692 /*
8836 8693 * Update our counters for this sfmmup's ism mappings.
8837 8694 */
8838 8695 for (i = 0; i <= ismszc; i++) {
8839 8696 if (!(disable_ism_large_pages & (1 << i)))
8840 8697 (void) ism_tsb_entries(sfmmup, i);
8841 8698 }
8842 8699
8843 8700 sfmmu_ismhat_exit(sfmmup, 0);
8844 8701
8845 8702 /*
8846 8703 * We must do our freeing here after dropping locks
8847 8704 * to prevent a deadlock in the kmem allocator on the
8848 8705 * mapping list lock.
8849 8706 */
8850 8707 if (free_ment != NULL)
8851 8708 kmem_cache_free(ism_ment_cache, free_ment);
8852 8709
8853 8710 /*
8854 8711 * Check TSB and TLB page sizes if the process isn't exiting.
8855 8712 */
8856 8713 if (!sfmmup->sfmmu_free) {
8857 8714 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
8858 8715 sfmmu_check_page_sizes(sfmmup, 1);
8859 8716 } else {
8860 8717 sfmmu_check_page_sizes(sfmmup, 0);
8861 8718 }
8862 8719 }
8863 8720 }
8864 8721
8865 8722 /* ARGSUSED */
8866 8723 static int
8867 8724 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
8868 8725 {
8869 8726 /* void *buf is sfmmu_t pointer */
8870 8727 bzero(buf, sizeof (sfmmu_t));
8871 8728
8872 8729 return (0);
8873 8730 }
8874 8731
8875 8732 /* ARGSUSED */
8876 8733 static void
8877 8734 sfmmu_idcache_destructor(void *buf, void *cdrarg)
8878 8735 {
8879 8736 /* void *buf is sfmmu_t pointer */
8880 8737 }
8881 8738
8882 8739 /*
8883 8740 * setup kmem hmeblks by bzeroing all members and initializing the nextpa
8884 8741 * field to be the pa of this hmeblk
8885 8742 */
8886 8743 /* ARGSUSED */
8887 8744 static int
8888 8745 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
8889 8746 {
8890 8747 struct hme_blk *hmeblkp;
8891 8748
8892 8749 bzero(buf, (size_t)cdrarg);
8893 8750 hmeblkp = (struct hme_blk *)buf;
8894 8751 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8895 8752
8896 8753 #ifdef HBLK_TRACE
8897 8754 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8898 8755 #endif /* HBLK_TRACE */
8899 8756
8900 8757 return (0);
8901 8758 }
8902 8759
8903 8760 /* ARGSUSED */
8904 8761 static void
8905 8762 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
8906 8763 {
8907 8764
8908 8765 #ifdef HBLK_TRACE
8909 8766
8910 8767 struct hme_blk *hmeblkp;
8911 8768
8912 8769 hmeblkp = (struct hme_blk *)buf;
8913 8770 mutex_destroy(&hmeblkp->hblk_audit_lock);
8914 8771
8915 8772 #endif /* HBLK_TRACE */
8916 8773 }
8917 8774
8918 8775 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
8919 8776 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
8920 8777 /*
8921 8778 * The kmem allocator will callback into our reclaim routine when the system
8922 8779 * is running low in memory. We traverse the hash and free up all unused but
8923 8780 * still cached hme_blks. We also traverse the free list and free them up
8924 8781 * as well.
8925 8782 */
8926 8783 /*ARGSUSED*/
8927 8784 static void
8928 8785 sfmmu_hblkcache_reclaim(void *cdrarg)
8929 8786 {
8930 8787 int i;
8931 8788 struct hmehash_bucket *hmebp;
8932 8789 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8933 8790 static struct hmehash_bucket *uhmehash_reclaim_hand;
8934 8791 static struct hmehash_bucket *khmehash_reclaim_hand;
8935 8792 struct hme_blk *list = NULL, *last_hmeblkp;
8936 8793 cpuset_t cpuset = cpu_ready_set;
8937 8794 cpu_hme_pend_t *cpuhp;
8938 8795
8939 8796 /* Free up hmeblks on the cpu pending lists */
8940 8797 for (i = 0; i < NCPU; i++) {
8941 8798 cpuhp = &cpu_hme_pend[i];
8942 8799 if (cpuhp->chp_listp != NULL) {
8943 8800 mutex_enter(&cpuhp->chp_mutex);
8944 8801 if (cpuhp->chp_listp == NULL) {
8945 8802 mutex_exit(&cpuhp->chp_mutex);
8946 8803 continue;
8947 8804 }
8948 8805 for (last_hmeblkp = cpuhp->chp_listp;
8949 8806 last_hmeblkp->hblk_next != NULL;
8950 8807 last_hmeblkp = last_hmeblkp->hblk_next)
8951 8808 ;
8952 8809 last_hmeblkp->hblk_next = list;
8953 8810 list = cpuhp->chp_listp;
8954 8811 cpuhp->chp_listp = NULL;
8955 8812 cpuhp->chp_count = 0;
8956 8813 mutex_exit(&cpuhp->chp_mutex);
8957 8814 }
8958 8815
8959 8816 }
8960 8817
8961 8818 if (list != NULL) {
8962 8819 kpreempt_disable();
8963 8820 CPUSET_DEL(cpuset, CPU->cpu_id);
8964 8821 xt_sync(cpuset);
8965 8822 xt_sync(cpuset);
8966 8823 kpreempt_enable();
8967 8824 sfmmu_hblk_free(&list);
8968 8825 list = NULL;
8969 8826 }
8970 8827
8971 8828 hmebp = uhmehash_reclaim_hand;
8972 8829 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
8973 8830 uhmehash_reclaim_hand = hmebp = uhme_hash;
8974 8831 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8975 8832
8976 8833 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8977 8834 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8978 8835 hmeblkp = hmebp->hmeblkp;
8979 8836 pr_hblk = NULL;
8980 8837 while (hmeblkp) {
8981 8838 nx_hblk = hmeblkp->hblk_next;
8982 8839 if (!hmeblkp->hblk_vcnt &&
8983 8840 !hmeblkp->hblk_hmecnt) {
8984 8841 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8985 8842 pr_hblk, &list, 0);
8986 8843 } else {
8987 8844 pr_hblk = hmeblkp;
8988 8845 }
8989 8846 hmeblkp = nx_hblk;
8990 8847 }
8991 8848 SFMMU_HASH_UNLOCK(hmebp);
8992 8849 }
8993 8850 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
8994 8851 hmebp = uhme_hash;
8995 8852 }
8996 8853
8997 8854 hmebp = khmehash_reclaim_hand;
8998 8855 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
8999 8856 khmehash_reclaim_hand = hmebp = khme_hash;
9000 8857 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9001 8858
9002 8859 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9003 8860 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9004 8861 hmeblkp = hmebp->hmeblkp;
9005 8862 pr_hblk = NULL;
9006 8863 while (hmeblkp) {
9007 8864 nx_hblk = hmeblkp->hblk_next;
9008 8865 if (!hmeblkp->hblk_vcnt &&
9009 8866 !hmeblkp->hblk_hmecnt) {
9010 8867 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9011 8868 pr_hblk, &list, 0);
9012 8869 } else {
9013 8870 pr_hblk = hmeblkp;
9014 8871 }
9015 8872 hmeblkp = nx_hblk;
9016 8873 }
9017 8874 SFMMU_HASH_UNLOCK(hmebp);
9018 8875 }
9019 8876 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9020 8877 hmebp = khme_hash;
9021 8878 }
9022 8879 sfmmu_hblks_list_purge(&list, 0);
9023 8880 }
9024 8881
9025 8882 /*
9026 8883 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9027 8884 * same goes for sfmmu_get_addrvcolor().
9028 8885 *
9029 8886 * This function will return the virtual color for the specified page. The
9030 8887 * virtual color corresponds to this page current mapping or its last mapping.
9031 8888 * It is used by memory allocators to choose addresses with the correct
9032 8889 * alignment so vac consistency is automatically maintained. If the page
9033 8890 * has no color it returns -1.
9034 8891 */
9035 8892 /*ARGSUSED*/
9036 8893 int
9037 8894 sfmmu_get_ppvcolor(struct page *pp)
9038 8895 {
9039 8896 #ifdef VAC
9040 8897 int color;
9041 8898
9042 8899 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9043 8900 return (-1);
9044 8901 }
9045 8902 color = PP_GET_VCOLOR(pp);
9046 8903 ASSERT(color < mmu_btop(shm_alignment));
9047 8904 return (color);
9048 8905 #else
9049 8906 return (-1);
9050 8907 #endif /* VAC */
9051 8908 }
9052 8909
9053 8910 /*
9054 8911 * This function will return the desired alignment for vac consistency
9055 8912 * (vac color) given a virtual address. If no vac is present it returns -1.
9056 8913 */
9057 8914 /*ARGSUSED*/
9058 8915 int
9059 8916 sfmmu_get_addrvcolor(caddr_t vaddr)
9060 8917 {
9061 8918 #ifdef VAC
9062 8919 if (cache & CACHE_VAC) {
9063 8920 return (addr_to_vcolor(vaddr));
9064 8921 } else {
9065 8922 return (-1);
9066 8923 }
9067 8924 #else
9068 8925 return (-1);
9069 8926 #endif /* VAC */
9070 8927 }
9071 8928
9072 8929 #ifdef VAC
9073 8930 /*
9074 8931 * Check for conflicts.
9075 8932 * A conflict exists if the new and existent mappings do not match in
9076 8933 * their "shm_alignment fields. If conflicts exist, the existant mappings
9077 8934 * are flushed unless one of them is locked. If one of them is locked, then
9078 8935 * the mappings are flushed and converted to non-cacheable mappings.
9079 8936 */
9080 8937 static void
9081 8938 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9082 8939 {
9083 8940 struct hat *tmphat;
9084 8941 struct sf_hment *sfhmep, *tmphme = NULL;
9085 8942 struct hme_blk *hmeblkp;
9086 8943 int vcolor;
9087 8944 tte_t tte;
9088 8945
9089 8946 ASSERT(sfmmu_mlist_held(pp));
9090 8947 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */
9091 8948
9092 8949 vcolor = addr_to_vcolor(addr);
9093 8950 if (PP_NEWPAGE(pp)) {
9094 8951 PP_SET_VCOLOR(pp, vcolor);
9095 8952 return;
9096 8953 }
9097 8954
9098 8955 if (PP_GET_VCOLOR(pp) == vcolor) {
9099 8956 return;
9100 8957 }
9101 8958
9102 8959 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9103 8960 /*
9104 8961 * Previous user of page had a different color
9105 8962 * but since there are no current users
9106 8963 * we just flush the cache and change the color.
9107 8964 */
9108 8965 SFMMU_STAT(sf_pgcolor_conflict);
9109 8966 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9110 8967 PP_SET_VCOLOR(pp, vcolor);
9111 8968 return;
9112 8969 }
9113 8970
9114 8971 /*
9115 8972 * If we get here we have a vac conflict with a current
9116 8973 * mapping. VAC conflict policy is as follows.
9117 8974 * - The default is to unload the other mappings unless:
9118 8975 * - If we have a large mapping we uncache the page.
9119 8976 * We need to uncache the rest of the large page too.
9120 8977 * - If any of the mappings are locked we uncache the page.
9121 8978 * - If the requested mapping is inconsistent
9122 8979 * with another mapping and that mapping
9123 8980 * is in the same address space we have to
9124 8981 * make it non-cached. The default thing
9125 8982 * to do is unload the inconsistent mapping
9126 8983 * but if they are in the same address space
9127 8984 * we run the risk of unmapping the pc or the
9128 8985 * stack which we will use as we return to the user,
9129 8986 * in which case we can then fault on the thing
9130 8987 * we just unloaded and get into an infinite loop.
9131 8988 */
9132 8989 if (PP_ISMAPPED_LARGE(pp)) {
9133 8990 int sz;
9134 8991
9135 8992 /*
9136 8993 * Existing mapping is for big pages. We don't unload
9137 8994 * existing big mappings to satisfy new mappings.
9138 8995 * Always convert all mappings to TNC.
9139 8996 */
9140 8997 sz = fnd_mapping_sz(pp);
9141 8998 pp = PP_GROUPLEADER(pp, sz);
9142 8999 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9143 9000 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9144 9001 TTEPAGES(sz));
9145 9002
9146 9003 return;
9147 9004 }
9148 9005
9149 9006 /*
9150 9007 * check if any mapping is in same as or if it is locked
9151 9008 * since in that case we need to uncache.
9152 9009 */
9153 9010 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9154 9011 tmphme = sfhmep->hme_next;
9155 9012 if (IS_PAHME(sfhmep))
9156 9013 continue;
9157 9014 hmeblkp = sfmmu_hmetohblk(sfhmep);
9158 9015 tmphat = hblktosfmmu(hmeblkp);
9159 9016 sfmmu_copytte(&sfhmep->hme_tte, &tte);
9160 9017 ASSERT(TTE_IS_VALID(&tte));
9161 9018 if (hmeblkp->hblk_shared || tmphat == hat ||
9162 9019 hmeblkp->hblk_lckcnt) {
9163 9020 /*
9164 9021 * We have an uncache conflict
9165 9022 */
9166 9023 SFMMU_STAT(sf_uncache_conflict);
9167 9024 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9168 9025 return;
9169 9026 }
9170 9027 }
9171 9028
9172 9029 /*
9173 9030 * We have an unload conflict
9174 9031 * We have already checked for LARGE mappings, therefore
9175 9032 * the remaining mapping(s) must be TTE8K.
9176 9033 */
9177 9034 SFMMU_STAT(sf_unload_conflict);
9178 9035
9179 9036 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9180 9037 tmphme = sfhmep->hme_next;
9181 9038 if (IS_PAHME(sfhmep))
9182 9039 continue;
9183 9040 hmeblkp = sfmmu_hmetohblk(sfhmep);
9184 9041 ASSERT(!hmeblkp->hblk_shared);
9185 9042 (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9186 9043 }
9187 9044
9188 9045 if (PP_ISMAPPED_KPM(pp))
9189 9046 sfmmu_kpm_vac_unload(pp, addr);
9190 9047
9191 9048 /*
9192 9049 * Unloads only do TLB flushes so we need to flush the
9193 9050 * cache here.
9194 9051 */
9195 9052 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9196 9053 PP_SET_VCOLOR(pp, vcolor);
9197 9054 }
9198 9055
9199 9056 /*
9200 9057 * Whenever a mapping is unloaded and the page is in TNC state,
9201 9058 * we see if the page can be made cacheable again. 'pp' is
9202 9059 * the page that we just unloaded a mapping from, the size
9203 9060 * of mapping that was unloaded is 'ottesz'.
9204 9061 * Remark:
9205 9062 * The recache policy for mpss pages can leave a performance problem
9206 9063 * under the following circumstances:
9207 9064 * . A large page in uncached mode has just been unmapped.
9208 9065 * . All constituent pages are TNC due to a conflicting small mapping.
9209 9066 * . There are many other, non conflicting, small mappings around for
9210 9067 * a lot of the constituent pages.
9211 9068 * . We're called w/ the "old" groupleader page and the old ottesz,
9212 9069 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9213 9070 * we end up w/ TTE8K or npages == 1.
9214 9071 * . We call tst_tnc w/ the old groupleader only, and if there is no
9215 9072 * conflict, we re-cache only this page.
9216 9073 * . All other small mappings are not checked and will be left in TNC mode.
9217 9074 * The problem is not very serious because:
9218 9075 * . mpss is actually only defined for heap and stack, so the probability
9219 9076 * is not very high that a large page mapping exists in parallel to a small
9220 9077 * one (this is possible, but seems to be bad programming style in the
9221 9078 * appl).
9222 9079 * . The problem gets a little bit more serious, when those TNC pages
9223 9080 * have to be mapped into kernel space, e.g. for networking.
9224 9081 * . When VAC alias conflicts occur in applications, this is regarded
9225 9082 * as an application bug. So if kstat's show them, the appl should
9226 9083 * be changed anyway.
9227 9084 */
9228 9085 void
9229 9086 conv_tnc(page_t *pp, int ottesz)
9230 9087 {
9231 9088 int cursz, dosz;
9232 9089 pgcnt_t curnpgs, dopgs;
9233 9090 pgcnt_t pg64k;
9234 9091 page_t *pp2;
9235 9092
9236 9093 /*
9237 9094 * Determine how big a range we check for TNC and find
9238 9095 * leader page. cursz is the size of the biggest
9239 9096 * mapping that still exist on 'pp'.
9240 9097 */
9241 9098 if (PP_ISMAPPED_LARGE(pp)) {
9242 9099 cursz = fnd_mapping_sz(pp);
9243 9100 } else {
9244 9101 cursz = TTE8K;
9245 9102 }
9246 9103
9247 9104 if (ottesz >= cursz) {
9248 9105 dosz = ottesz;
9249 9106 pp2 = pp;
9250 9107 } else {
9251 9108 dosz = cursz;
9252 9109 pp2 = PP_GROUPLEADER(pp, dosz);
9253 9110 }
9254 9111
9255 9112 pg64k = TTEPAGES(TTE64K);
9256 9113 dopgs = TTEPAGES(dosz);
9257 9114
9258 9115 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9259 9116
9260 9117 while (dopgs != 0) {
9261 9118 curnpgs = TTEPAGES(cursz);
9262 9119 if (tst_tnc(pp2, curnpgs)) {
9263 9120 SFMMU_STAT_ADD(sf_recache, curnpgs);
9264 9121 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9265 9122 curnpgs);
9266 9123 }
9267 9124
9268 9125 ASSERT(dopgs >= curnpgs);
9269 9126 dopgs -= curnpgs;
9270 9127
9271 9128 if (dopgs == 0) {
9272 9129 break;
9273 9130 }
9274 9131
9275 9132 pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9276 9133 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9277 9134 cursz = fnd_mapping_sz(pp2);
9278 9135 } else {
9279 9136 cursz = TTE8K;
9280 9137 }
9281 9138 }
9282 9139 }
9283 9140
9284 9141 /*
9285 9142 * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9286 9143 * returns 0 otherwise. Note that oaddr argument is valid for only
9287 9144 * 8k pages.
9288 9145 */
9289 9146 int
9290 9147 tst_tnc(page_t *pp, pgcnt_t npages)
9291 9148 {
9292 9149 struct sf_hment *sfhme;
9293 9150 struct hme_blk *hmeblkp;
9294 9151 tte_t tte;
9295 9152 caddr_t vaddr;
9296 9153 int clr_valid = 0;
9297 9154 int color, color1, bcolor;
9298 9155 int i, ncolors;
9299 9156
9300 9157 ASSERT(pp != NULL);
9301 9158 ASSERT(!(cache & CACHE_WRITEBACK));
9302 9159
9303 9160 if (npages > 1) {
9304 9161 ncolors = CACHE_NUM_COLOR;
9305 9162 }
9306 9163
9307 9164 for (i = 0; i < npages; i++) {
9308 9165 ASSERT(sfmmu_mlist_held(pp));
9309 9166 ASSERT(PP_ISTNC(pp));
9310 9167 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9311 9168
9312 9169 if (PP_ISPNC(pp)) {
9313 9170 return (0);
9314 9171 }
9315 9172
9316 9173 clr_valid = 0;
9317 9174 if (PP_ISMAPPED_KPM(pp)) {
9318 9175 caddr_t kpmvaddr;
9319 9176
9320 9177 ASSERT(kpm_enable);
9321 9178 kpmvaddr = hat_kpm_page2va(pp, 1);
9322 9179 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9323 9180 color1 = addr_to_vcolor(kpmvaddr);
9324 9181 clr_valid = 1;
9325 9182 }
9326 9183
9327 9184 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9328 9185 if (IS_PAHME(sfhme))
9329 9186 continue;
9330 9187 hmeblkp = sfmmu_hmetohblk(sfhme);
9331 9188
9332 9189 sfmmu_copytte(&sfhme->hme_tte, &tte);
9333 9190 ASSERT(TTE_IS_VALID(&tte));
9334 9191
9335 9192 vaddr = tte_to_vaddr(hmeblkp, tte);
9336 9193 color = addr_to_vcolor(vaddr);
9337 9194
9338 9195 if (npages > 1) {
9339 9196 /*
9340 9197 * If there is a big mapping, make sure
9341 9198 * 8K mapping is consistent with the big
9342 9199 * mapping.
9343 9200 */
9344 9201 bcolor = i % ncolors;
9345 9202 if (color != bcolor) {
9346 9203 return (0);
9347 9204 }
9348 9205 }
9349 9206 if (!clr_valid) {
9350 9207 clr_valid = 1;
9351 9208 color1 = color;
9352 9209 }
9353 9210
9354 9211 if (color1 != color) {
9355 9212 return (0);
9356 9213 }
9357 9214 }
9358 9215
9359 9216 pp = PP_PAGENEXT(pp);
9360 9217 }
9361 9218
9362 9219 return (1);
9363 9220 }
9364 9221
9365 9222 void
9366 9223 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9367 9224 pgcnt_t npages)
9368 9225 {
9369 9226 kmutex_t *pmtx;
9370 9227 int i, ncolors, bcolor;
9371 9228 kpm_hlk_t *kpmp;
9372 9229 cpuset_t cpuset;
9373 9230
9374 9231 ASSERT(pp != NULL);
9375 9232 ASSERT(!(cache & CACHE_WRITEBACK));
9376 9233
9377 9234 kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9378 9235 pmtx = sfmmu_page_enter(pp);
9379 9236
9380 9237 /*
9381 9238 * Fast path caching single unmapped page
9382 9239 */
9383 9240 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9384 9241 flags == HAT_CACHE) {
9385 9242 PP_CLRTNC(pp);
9386 9243 PP_CLRPNC(pp);
9387 9244 sfmmu_page_exit(pmtx);
9388 9245 sfmmu_kpm_kpmp_exit(kpmp);
9389 9246 return;
9390 9247 }
9391 9248
9392 9249 /*
9393 9250 * We need to capture all cpus in order to change cacheability
9394 9251 * because we can't allow one cpu to access the same physical
9395 9252 * page using a cacheable and a non-cachebale mapping at the same
9396 9253 * time. Since we may end up walking the ism mapping list
9397 9254 * have to grab it's lock now since we can't after all the
9398 9255 * cpus have been captured.
9399 9256 */
9400 9257 sfmmu_hat_lock_all();
9401 9258 mutex_enter(&ism_mlist_lock);
9402 9259 kpreempt_disable();
9403 9260 cpuset = cpu_ready_set;
9404 9261 xc_attention(cpuset);
9405 9262
9406 9263 if (npages > 1) {
9407 9264 /*
9408 9265 * Make sure all colors are flushed since the
9409 9266 * sfmmu_page_cache() only flushes one color-
9410 9267 * it does not know big pages.
9411 9268 */
9412 9269 ncolors = CACHE_NUM_COLOR;
9413 9270 if (flags & HAT_TMPNC) {
9414 9271 for (i = 0; i < ncolors; i++) {
9415 9272 sfmmu_cache_flushcolor(i, pp->p_pagenum);
9416 9273 }
9417 9274 cache_flush_flag = CACHE_NO_FLUSH;
9418 9275 }
9419 9276 }
9420 9277
9421 9278 for (i = 0; i < npages; i++) {
9422 9279
9423 9280 ASSERT(sfmmu_mlist_held(pp));
9424 9281
9425 9282 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9426 9283
9427 9284 if (npages > 1) {
9428 9285 bcolor = i % ncolors;
9429 9286 } else {
9430 9287 bcolor = NO_VCOLOR;
9431 9288 }
9432 9289
9433 9290 sfmmu_page_cache(pp, flags, cache_flush_flag,
9434 9291 bcolor);
9435 9292 }
9436 9293
9437 9294 pp = PP_PAGENEXT(pp);
9438 9295 }
9439 9296
9440 9297 xt_sync(cpuset);
9441 9298 xc_dismissed(cpuset);
9442 9299 mutex_exit(&ism_mlist_lock);
9443 9300 sfmmu_hat_unlock_all();
9444 9301 sfmmu_page_exit(pmtx);
9445 9302 sfmmu_kpm_kpmp_exit(kpmp);
9446 9303 kpreempt_enable();
9447 9304 }
9448 9305
9449 9306 /*
9450 9307 * This function changes the virtual cacheability of all mappings to a
9451 9308 * particular page. When changing from uncache to cacheable the mappings will
9452 9309 * only be changed if all of them have the same virtual color.
9453 9310 * We need to flush the cache in all cpus. It is possible that
9454 9311 * a process referenced a page as cacheable but has sinced exited
9455 9312 * and cleared the mapping list. We still to flush it but have no
9456 9313 * state so all cpus is the only alternative.
9457 9314 */
9458 9315 static void
9459 9316 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9460 9317 {
9461 9318 struct sf_hment *sfhme;
9462 9319 struct hme_blk *hmeblkp;
9463 9320 sfmmu_t *sfmmup;
9464 9321 tte_t tte, ttemod;
9465 9322 caddr_t vaddr;
9466 9323 int ret, color;
9467 9324 pfn_t pfn;
9468 9325
9469 9326 color = bcolor;
9470 9327 pfn = pp->p_pagenum;
9471 9328
9472 9329 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9473 9330
9474 9331 if (IS_PAHME(sfhme))
9475 9332 continue;
9476 9333 hmeblkp = sfmmu_hmetohblk(sfhme);
9477 9334
9478 9335 sfmmu_copytte(&sfhme->hme_tte, &tte);
9479 9336 ASSERT(TTE_IS_VALID(&tte));
9480 9337 vaddr = tte_to_vaddr(hmeblkp, tte);
9481 9338 color = addr_to_vcolor(vaddr);
9482 9339
9483 9340 #ifdef DEBUG
9484 9341 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9485 9342 ASSERT(color == bcolor);
9486 9343 }
9487 9344 #endif
9488 9345
9489 9346 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9490 9347
9491 9348 ttemod = tte;
9492 9349 if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9493 9350 TTE_CLR_VCACHEABLE(&ttemod);
9494 9351 } else { /* flags & HAT_CACHE */
9495 9352 TTE_SET_VCACHEABLE(&ttemod);
9496 9353 }
9497 9354 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9498 9355 if (ret < 0) {
9499 9356 /*
9500 9357 * Since all cpus are captured modifytte should not
9501 9358 * fail.
9502 9359 */
9503 9360 panic("sfmmu_page_cache: write to tte failed");
9504 9361 }
9505 9362
9506 9363 sfmmup = hblktosfmmu(hmeblkp);
9507 9364 if (cache_flush_flag == CACHE_FLUSH) {
9508 9365 /*
9509 9366 * Flush TSBs, TLBs and caches
9510 9367 */
9511 9368 if (hmeblkp->hblk_shared) {
9512 9369 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9513 9370 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9514 9371 sf_region_t *rgnp;
9515 9372 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9516 9373 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9517 9374 ASSERT(srdp != NULL);
9518 9375 rgnp = srdp->srd_hmergnp[rid];
9519 9376 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9520 9377 srdp, rgnp, rid);
9521 9378 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9522 9379 hmeblkp, 0);
9523 9380 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9524 9381 } else if (sfmmup->sfmmu_ismhat) {
9525 9382 if (flags & HAT_CACHE) {
9526 9383 SFMMU_STAT(sf_ism_recache);
9527 9384 } else {
9528 9385 SFMMU_STAT(sf_ism_uncache);
9529 9386 }
9530 9387 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9531 9388 pfn, CACHE_FLUSH);
9532 9389 } else {
9533 9390 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9534 9391 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9535 9392 }
9536 9393
9537 9394 /*
9538 9395 * all cache entries belonging to this pfn are
9539 9396 * now flushed.
9540 9397 */
9541 9398 cache_flush_flag = CACHE_NO_FLUSH;
9542 9399 } else {
9543 9400 /*
9544 9401 * Flush only TSBs and TLBs.
9545 9402 */
9546 9403 if (hmeblkp->hblk_shared) {
9547 9404 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9548 9405 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9549 9406 sf_region_t *rgnp;
9550 9407 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9551 9408 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9552 9409 ASSERT(srdp != NULL);
9553 9410 rgnp = srdp->srd_hmergnp[rid];
9554 9411 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9555 9412 srdp, rgnp, rid);
9556 9413 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9557 9414 hmeblkp, 0);
9558 9415 } else if (sfmmup->sfmmu_ismhat) {
9559 9416 if (flags & HAT_CACHE) {
9560 9417 SFMMU_STAT(sf_ism_recache);
9561 9418 } else {
9562 9419 SFMMU_STAT(sf_ism_uncache);
9563 9420 }
9564 9421 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9565 9422 pfn, CACHE_NO_FLUSH);
9566 9423 } else {
9567 9424 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9568 9425 }
9569 9426 }
9570 9427 }
9571 9428
9572 9429 if (PP_ISMAPPED_KPM(pp))
9573 9430 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9574 9431
9575 9432 switch (flags) {
9576 9433
9577 9434 default:
9578 9435 panic("sfmmu_pagecache: unknown flags");
9579 9436 break;
9580 9437
9581 9438 case HAT_CACHE:
9582 9439 PP_CLRTNC(pp);
9583 9440 PP_CLRPNC(pp);
9584 9441 PP_SET_VCOLOR(pp, color);
9585 9442 break;
9586 9443
9587 9444 case HAT_TMPNC:
9588 9445 PP_SETTNC(pp);
9589 9446 PP_SET_VCOLOR(pp, NO_VCOLOR);
9590 9447 break;
9591 9448
9592 9449 case HAT_UNCACHE:
9593 9450 PP_SETPNC(pp);
9594 9451 PP_CLRTNC(pp);
9595 9452 PP_SET_VCOLOR(pp, NO_VCOLOR);
9596 9453 break;
9597 9454 }
9598 9455 }
9599 9456 #endif /* VAC */
9600 9457
9601 9458
9602 9459 /*
9603 9460 * Wrapper routine used to return a context.
9604 9461 *
9605 9462 * It's the responsibility of the caller to guarantee that the
9606 9463 * process serializes on calls here by taking the HAT lock for
9607 9464 * the hat.
9608 9465 *
9609 9466 */
9610 9467 static void
9611 9468 sfmmu_get_ctx(sfmmu_t *sfmmup)
9612 9469 {
9613 9470 mmu_ctx_t *mmu_ctxp;
9614 9471 uint_t pstate_save;
9615 9472 int ret;
9616 9473
9617 9474 ASSERT(sfmmu_hat_lock_held(sfmmup));
9618 9475 ASSERT(sfmmup != ksfmmup);
9619 9476
9620 9477 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9621 9478 sfmmu_setup_tsbinfo(sfmmup);
9622 9479 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9623 9480 }
9624 9481
9625 9482 kpreempt_disable();
9626 9483
9627 9484 mmu_ctxp = CPU_MMU_CTXP(CPU);
9628 9485 ASSERT(mmu_ctxp);
9629 9486 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9630 9487 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9631 9488
9632 9489 /*
9633 9490 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9634 9491 */
9635 9492 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9636 9493 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9637 9494
9638 9495 /*
9639 9496 * Let the MMU set up the page sizes to use for
9640 9497 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9641 9498 */
9642 9499 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9643 9500 mmu_set_ctx_page_sizes(sfmmup);
9644 9501 }
9645 9502
9646 9503 /*
9647 9504 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9648 9505 * interrupts disabled to prevent race condition with wrap-around
9649 9506 * ctx invalidatation. In sun4v, ctx invalidation also involves
9650 9507 * a HV call to set the number of TSBs to 0. If interrupts are not
9651 9508 * disabled until after sfmmu_load_mmustate is complete TSBs may
9652 9509 * become assigned to INVALID_CONTEXT. This is not allowed.
9653 9510 */
9654 9511 pstate_save = sfmmu_disable_intrs();
9655 9512
9656 9513 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9657 9514 sfmmup->sfmmu_scdp != NULL) {
9658 9515 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9659 9516 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9660 9517 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9661 9518 /* debug purpose only */
9662 9519 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9663 9520 != INVALID_CONTEXT);
9664 9521 }
9665 9522 sfmmu_load_mmustate(sfmmup);
9666 9523
9667 9524 sfmmu_enable_intrs(pstate_save);
9668 9525
9669 9526 kpreempt_enable();
9670 9527 }
9671 9528
9672 9529 /*
9673 9530 * When all cnums are used up in a MMU, cnum will wrap around to the
9674 9531 * next generation and start from 2.
9675 9532 */
9676 9533 static void
9677 9534 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9678 9535 {
9679 9536
9680 9537 /* caller must have disabled the preemption */
9681 9538 ASSERT(curthread->t_preempt >= 1);
9682 9539 ASSERT(mmu_ctxp != NULL);
9683 9540
9684 9541 /* acquire Per-MMU (PM) spin lock */
9685 9542 mutex_enter(&mmu_ctxp->mmu_lock);
9686 9543
9687 9544 /* re-check to see if wrap-around is needed */
9688 9545 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9689 9546 goto done;
9690 9547
9691 9548 SFMMU_MMU_STAT(mmu_wrap_around);
9692 9549
9693 9550 /* update gnum */
9694 9551 ASSERT(mmu_ctxp->mmu_gnum != 0);
9695 9552 mmu_ctxp->mmu_gnum++;
9696 9553 if (mmu_ctxp->mmu_gnum == 0 ||
9697 9554 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9698 9555 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9699 9556 (void *)mmu_ctxp);
9700 9557 }
9701 9558
9702 9559 if (mmu_ctxp->mmu_ncpus > 1) {
9703 9560 cpuset_t cpuset;
9704 9561
9705 9562 membar_enter(); /* make sure updated gnum visible */
9706 9563
9707 9564 SFMMU_XCALL_STATS(NULL);
9708 9565
9709 9566 /* xcall to others on the same MMU to invalidate ctx */
9710 9567 cpuset = mmu_ctxp->mmu_cpuset;
9711 9568 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9712 9569 CPUSET_DEL(cpuset, CPU->cpu_id);
9713 9570 CPUSET_AND(cpuset, cpu_ready_set);
9714 9571
9715 9572 /*
9716 9573 * Pass in INVALID_CONTEXT as the first parameter to
9717 9574 * sfmmu_raise_tsb_exception, which invalidates the context
9718 9575 * of any process running on the CPUs in the MMU.
9719 9576 */
9720 9577 xt_some(cpuset, sfmmu_raise_tsb_exception,
9721 9578 INVALID_CONTEXT, INVALID_CONTEXT);
9722 9579 xt_sync(cpuset);
9723 9580
9724 9581 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9725 9582 }
9726 9583
9727 9584 if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9728 9585 sfmmu_setctx_sec(INVALID_CONTEXT);
9729 9586 sfmmu_clear_utsbinfo();
9730 9587 }
9731 9588
9732 9589 /*
9733 9590 * No xcall is needed here. For sun4u systems all CPUs in context
9734 9591 * domain share a single physical MMU therefore it's enough to flush
9735 9592 * TLB on local CPU. On sun4v systems we use 1 global context
9736 9593 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9737 9594 * handler. Note that vtag_flushall_uctxs() is called
9738 9595 * for Ultra II machine, where the equivalent flushall functionality
9739 9596 * is implemented in SW, and only user ctx TLB entries are flushed.
9740 9597 */
9741 9598 if (&vtag_flushall_uctxs != NULL) {
9742 9599 vtag_flushall_uctxs();
9743 9600 } else {
9744 9601 vtag_flushall();
9745 9602 }
9746 9603
9747 9604 /* reset mmu cnum, skips cnum 0 and 1 */
9748 9605 if (reset_cnum == B_TRUE)
9749 9606 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9750 9607
9751 9608 done:
9752 9609 mutex_exit(&mmu_ctxp->mmu_lock);
9753 9610 }
9754 9611
9755 9612
9756 9613 /*
9757 9614 * For multi-threaded process, set the process context to INVALID_CONTEXT
9758 9615 * so that it faults and reloads the MMU state from TL=0. For single-threaded
9759 9616 * process, we can just load the MMU state directly without having to
9760 9617 * set context invalid. Caller must hold the hat lock since we don't
9761 9618 * acquire it here.
9762 9619 */
9763 9620 static void
9764 9621 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9765 9622 {
9766 9623 uint_t cnum;
9767 9624 uint_t pstate_save;
9768 9625
9769 9626 ASSERT(sfmmup != ksfmmup);
9770 9627 ASSERT(sfmmu_hat_lock_held(sfmmup));
9771 9628
9772 9629 kpreempt_disable();
9773 9630
9774 9631 /*
9775 9632 * We check whether the pass'ed-in sfmmup is the same as the
9776 9633 * current running proc. This is to makes sure the current proc
9777 9634 * stays single-threaded if it already is.
9778 9635 */
9779 9636 if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9780 9637 (curthread->t_procp->p_lwpcnt == 1)) {
9781 9638 /* single-thread */
9782 9639 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9783 9640 if (cnum != INVALID_CONTEXT) {
9784 9641 uint_t curcnum;
9785 9642 /*
9786 9643 * Disable interrupts to prevent race condition
9787 9644 * with sfmmu_ctx_wrap_around ctx invalidation.
9788 9645 * In sun4v, ctx invalidation involves setting
9789 9646 * TSB to NULL, hence, interrupts should be disabled
9790 9647 * untill after sfmmu_load_mmustate is completed.
9791 9648 */
9792 9649 pstate_save = sfmmu_disable_intrs();
9793 9650 curcnum = sfmmu_getctx_sec();
9794 9651 if (curcnum == cnum)
9795 9652 sfmmu_load_mmustate(sfmmup);
9796 9653 sfmmu_enable_intrs(pstate_save);
9797 9654 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9798 9655 }
9799 9656 } else {
9800 9657 /*
9801 9658 * multi-thread
9802 9659 * or when sfmmup is not the same as the curproc.
↓ open down ↓ |
7677 lines elided |
↑ open up ↑ |
9803 9660 */
9804 9661 sfmmu_invalidate_ctx(sfmmup);
9805 9662 }
9806 9663
9807 9664 kpreempt_enable();
9808 9665 }
9809 9666
9810 9667
9811 9668 /*
9812 9669 * Replace the specified TSB with a new TSB. This function gets called when
9813 - * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the
9670 + * we grow, or shrink a TSB. When swapping in a TSB (TSB_SWAPIN), the
9814 9671 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9815 9672 * (8K).
9816 9673 *
9817 9674 * Caller must hold the HAT lock, but should assume any tsb_info
9818 9675 * pointers it has are no longer valid after calling this function.
9819 9676 *
9820 9677 * Return values:
9821 9678 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
9822 9679 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
9823 9680 * something to this tsbinfo/TSB
9824 9681 * TSB_SUCCESS Operation succeeded
9825 9682 */
9826 9683 static tsb_replace_rc_t
9827 9684 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9828 9685 hatlock_t *hatlockp, uint_t flags)
9829 9686 {
9830 9687 struct tsb_info *new_tsbinfo = NULL;
9831 9688 struct tsb_info *curtsb, *prevtsb;
9832 9689 uint_t tte_sz_mask;
9833 9690 int i;
9834 9691
9835 9692 ASSERT(sfmmup != ksfmmup);
9836 9693 ASSERT(sfmmup->sfmmu_ismhat == 0);
9837 9694 ASSERT(sfmmu_hat_lock_held(sfmmup));
9838 9695 ASSERT(szc <= tsb_max_growsize);
9839 9696
9840 9697 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
9841 9698 return (TSB_LOSTRACE);
9842 9699
9843 9700 /*
9844 9701 * Find the tsb_info ahead of this one in the list, and
9845 9702 * also make sure that the tsb_info passed in really
9846 9703 * exists!
9847 9704 */
9848 9705 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9849 9706 curtsb != old_tsbinfo && curtsb != NULL;
9850 9707 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9851 9708 ;
9852 9709 ASSERT(curtsb != NULL);
9853 9710
9854 9711 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9855 9712 /*
9856 9713 * The process is swapped out, so just set the new size
9857 9714 * code. When it swaps back in, we'll allocate a new one
9858 9715 * of the new chosen size.
9859 9716 */
9860 9717 curtsb->tsb_szc = szc;
9861 9718 return (TSB_SUCCESS);
9862 9719 }
9863 9720 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
9864 9721
9865 9722 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
9866 9723
9867 9724 /*
9868 9725 * All initialization is done inside of sfmmu_tsbinfo_alloc().
9869 9726 * If we fail to allocate a TSB, exit.
9870 9727 *
9871 9728 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
9872 9729 * then try 4M slab after the initial alloc fails.
9873 9730 *
9874 9731 * If tsb swapin with tsb size > 4M, then try 4M after the
9875 9732 * initial alloc fails.
9876 9733 */
9877 9734 sfmmu_hat_exit(hatlockp);
9878 9735 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
9879 9736 tte_sz_mask, flags, sfmmup) &&
9880 9737 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
9881 9738 (!(flags & TSB_SWAPIN) &&
9882 9739 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
9883 9740 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
9884 9741 tte_sz_mask, flags, sfmmup))) {
9885 9742 (void) sfmmu_hat_enter(sfmmup);
9886 9743 if (!(flags & TSB_SWAPIN))
9887 9744 SFMMU_STAT(sf_tsb_resize_failures);
9888 9745 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9889 9746 return (TSB_ALLOCFAIL);
9890 9747 }
9891 9748 (void) sfmmu_hat_enter(sfmmup);
9892 9749
9893 9750 /*
9894 9751 * Re-check to make sure somebody else didn't muck with us while we
9895 9752 * didn't hold the HAT lock. If the process swapped out, fine, just
9896 9753 * exit; this can happen if we try to shrink the TSB from the context
9897 9754 * of another process (such as on an ISM unmap), though it is rare.
9898 9755 */
9899 9756 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9900 9757 SFMMU_STAT(sf_tsb_resize_failures);
9901 9758 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9902 9759 sfmmu_hat_exit(hatlockp);
9903 9760 sfmmu_tsbinfo_free(new_tsbinfo);
9904 9761 (void) sfmmu_hat_enter(sfmmup);
9905 9762 return (TSB_LOSTRACE);
9906 9763 }
9907 9764
9908 9765 #ifdef DEBUG
9909 9766 /* Reverify that the tsb_info still exists.. for debugging only */
9910 9767 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9911 9768 curtsb != old_tsbinfo && curtsb != NULL;
9912 9769 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9913 9770 ;
9914 9771 ASSERT(curtsb != NULL);
9915 9772 #endif /* DEBUG */
9916 9773
9917 9774 /*
9918 9775 * Quiesce any CPUs running this process on their next TLB miss
9919 9776 * so they atomically see the new tsb_info. We temporarily set the
9920 9777 * context to invalid context so new threads that come on processor
9921 9778 * after we do the xcall to cpusran will also serialize behind the
9922 9779 * HAT lock on TLB miss and will see the new TSB. Since this short
9923 9780 * race with a new thread coming on processor is relatively rare,
9924 9781 * this synchronization mechanism should be cheaper than always
9925 9782 * pausing all CPUs for the duration of the setup, which is what
9926 9783 * the old implementation did. This is particuarly true if we are
9927 9784 * copying a huge chunk of memory around during that window.
9928 9785 *
9929 9786 * The memory barriers are to make sure things stay consistent
9930 9787 * with resume() since it does not hold the HAT lock while
9931 9788 * walking the list of tsb_info structures.
9932 9789 */
9933 9790 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
9934 9791 /* The TSB is either growing or shrinking. */
9935 9792 sfmmu_invalidate_ctx(sfmmup);
9936 9793 } else {
9937 9794 /*
9938 9795 * It is illegal to swap in TSBs from a process other
9939 9796 * than a process being swapped in. This in turn
9940 9797 * implies we do not have a valid MMU context here
9941 9798 * since a process needs one to resolve translation
9942 9799 * misses.
9943 9800 */
9944 9801 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
9945 9802 }
9946 9803
9947 9804 #ifdef DEBUG
9948 9805 ASSERT(max_mmu_ctxdoms > 0);
9949 9806
9950 9807 /*
9951 9808 * Process should have INVALID_CONTEXT on all MMUs
9952 9809 */
9953 9810 for (i = 0; i < max_mmu_ctxdoms; i++) {
9954 9811
9955 9812 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
9956 9813 }
9957 9814 #endif
9958 9815
9959 9816 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
9960 9817 membar_stst(); /* strict ordering required */
9961 9818 if (prevtsb)
9962 9819 prevtsb->tsb_next = new_tsbinfo;
9963 9820 else
9964 9821 sfmmup->sfmmu_tsb = new_tsbinfo;
9965 9822 membar_enter(); /* make sure new TSB globally visible */
9966 9823
9967 9824 /*
9968 9825 * We need to migrate TSB entries from the old TSB to the new TSB
9969 9826 * if tsb_remap_ttes is set and the TSB is growing.
9970 9827 */
9971 9828 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
9972 9829 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
9973 9830
9974 9831 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9975 9832
9976 9833 /*
9977 9834 * Drop the HAT lock to free our old tsb_info.
9978 9835 */
9979 9836 sfmmu_hat_exit(hatlockp);
9980 9837
9981 9838 if ((flags & TSB_GROW) == TSB_GROW) {
9982 9839 SFMMU_STAT(sf_tsb_grow);
9983 9840 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
9984 9841 SFMMU_STAT(sf_tsb_shrink);
9985 9842 }
9986 9843
9987 9844 sfmmu_tsbinfo_free(old_tsbinfo);
9988 9845
9989 9846 (void) sfmmu_hat_enter(sfmmup);
9990 9847 return (TSB_SUCCESS);
9991 9848 }
9992 9849
9993 9850 /*
9994 9851 * This function will re-program hat pgsz array, and invalidate the
9995 9852 * process' context, forcing the process to switch to another
9996 9853 * context on the next TLB miss, and therefore start using the
9997 9854 * TLB that is reprogrammed for the new page sizes.
9998 9855 */
9999 9856 void
10000 9857 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10001 9858 {
10002 9859 int i;
10003 9860 hatlock_t *hatlockp = NULL;
10004 9861
10005 9862 hatlockp = sfmmu_hat_enter(sfmmup);
10006 9863 /* USIII+-IV+ optimization, requires hat lock */
10007 9864 if (tmp_pgsz) {
10008 9865 for (i = 0; i < mmu_page_sizes; i++)
10009 9866 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10010 9867 }
10011 9868 SFMMU_STAT(sf_tlb_reprog_pgsz);
10012 9869
10013 9870 sfmmu_invalidate_ctx(sfmmup);
10014 9871
10015 9872 sfmmu_hat_exit(hatlockp);
10016 9873 }
10017 9874
10018 9875 /*
10019 9876 * The scd_rttecnt field in the SCD must be updated to take account of the
10020 9877 * regions which it contains.
10021 9878 */
10022 9879 static void
10023 9880 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10024 9881 {
10025 9882 uint_t rid;
10026 9883 uint_t i, j;
10027 9884 ulong_t w;
10028 9885 sf_region_t *rgnp;
10029 9886
10030 9887 ASSERT(srdp != NULL);
10031 9888
10032 9889 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10033 9890 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10034 9891 continue;
10035 9892 }
10036 9893
10037 9894 j = 0;
10038 9895 while (w) {
10039 9896 if (!(w & 0x1)) {
10040 9897 j++;
10041 9898 w >>= 1;
10042 9899 continue;
10043 9900 }
10044 9901 rid = (i << BT_ULSHIFT) | j;
10045 9902 j++;
10046 9903 w >>= 1;
10047 9904
10048 9905 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10049 9906 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10050 9907 rgnp = srdp->srd_hmergnp[rid];
10051 9908 ASSERT(rgnp->rgn_refcnt > 0);
10052 9909 ASSERT(rgnp->rgn_id == rid);
10053 9910
10054 9911 scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10055 9912 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10056 9913
10057 9914 /*
10058 9915 * Maintain the tsb0 inflation cnt for the regions
10059 9916 * in the SCD.
10060 9917 */
10061 9918 if (rgnp->rgn_pgszc >= TTE4M) {
10062 9919 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10063 9920 rgnp->rgn_size >>
10064 9921 (TTE_PAGE_SHIFT(TTE8K) + 2);
10065 9922 }
10066 9923 }
10067 9924 }
10068 9925 }
10069 9926
10070 9927 /*
10071 9928 * This function assumes that there are either four or six supported page
10072 9929 * sizes and at most two programmable TLBs, so we need to decide which
10073 9930 * page sizes are most important and then tell the MMU layer so it
10074 9931 * can adjust the TLB page sizes accordingly (if supported).
10075 9932 *
10076 9933 * If these assumptions change, this function will need to be
10077 9934 * updated to support whatever the new limits are.
10078 9935 *
10079 9936 * The growing flag is nonzero if we are growing the address space,
10080 9937 * and zero if it is shrinking. This allows us to decide whether
10081 9938 * to grow or shrink our TSB, depending upon available memory
10082 9939 * conditions.
10083 9940 */
10084 9941 static void
10085 9942 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10086 9943 {
10087 9944 uint64_t ttecnt[MMU_PAGE_SIZES];
10088 9945 uint64_t tte8k_cnt, tte4m_cnt;
10089 9946 uint8_t i;
10090 9947 int sectsb_thresh;
10091 9948
10092 9949 /*
10093 9950 * Kernel threads, processes with small address spaces not using
10094 9951 * large pages, and dummy ISM HATs need not apply.
10095 9952 */
10096 9953 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10097 9954 return;
10098 9955
10099 9956 if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10100 9957 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10101 9958 return;
10102 9959
10103 9960 for (i = 0; i < mmu_page_sizes; i++) {
10104 9961 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10105 9962 sfmmup->sfmmu_ismttecnt[i];
10106 9963 }
10107 9964
10108 9965 /* Check pagesizes in use, and possibly reprogram DTLB. */
10109 9966 if (&mmu_check_page_sizes)
10110 9967 mmu_check_page_sizes(sfmmup, ttecnt);
10111 9968
10112 9969 /*
10113 9970 * Calculate the number of 8k ttes to represent the span of these
10114 9971 * pages.
10115 9972 */
10116 9973 tte8k_cnt = ttecnt[TTE8K] +
10117 9974 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10118 9975 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10119 9976 if (mmu_page_sizes == max_mmu_page_sizes) {
10120 9977 tte4m_cnt = ttecnt[TTE4M] +
10121 9978 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10122 9979 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10123 9980 } else {
10124 9981 tte4m_cnt = ttecnt[TTE4M];
10125 9982 }
10126 9983
10127 9984 /*
10128 9985 * Inflate tte8k_cnt to allow for region large page allocation failure.
10129 9986 */
10130 9987 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10131 9988
10132 9989 /*
10133 9990 * Inflate TSB sizes by a factor of 2 if this process
10134 9991 * uses 4M text pages to minimize extra conflict misses
10135 9992 * in the first TSB since without counting text pages
10136 9993 * 8K TSB may become too small.
10137 9994 *
10138 9995 * Also double the size of the second TSB to minimize
10139 9996 * extra conflict misses due to competition between 4M text pages
10140 9997 * and data pages.
10141 9998 *
10142 9999 * We need to adjust the second TSB allocation threshold by the
10143 10000 * inflation factor, since there is no point in creating a second
10144 10001 * TSB when we know all the mappings can fit in the I/D TLBs.
10145 10002 */
10146 10003 sectsb_thresh = tsb_sectsb_threshold;
10147 10004 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10148 10005 tte8k_cnt <<= 1;
10149 10006 tte4m_cnt <<= 1;
10150 10007 sectsb_thresh <<= 1;
10151 10008 }
10152 10009
10153 10010 /*
10154 10011 * Check to see if our TSB is the right size; we may need to
10155 10012 * grow or shrink it. If the process is small, our work is
10156 10013 * finished at this point.
10157 10014 */
10158 10015 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10159 10016 return;
10160 10017 }
10161 10018 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10162 10019 }
10163 10020
10164 10021 static void
10165 10022 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10166 10023 uint64_t tte4m_cnt, int sectsb_thresh)
10167 10024 {
10168 10025 int tsb_bits;
10169 10026 uint_t tsb_szc;
10170 10027 struct tsb_info *tsbinfop;
10171 10028 hatlock_t *hatlockp = NULL;
10172 10029
10173 10030 hatlockp = sfmmu_hat_enter(sfmmup);
10174 10031 ASSERT(hatlockp != NULL);
10175 10032 tsbinfop = sfmmup->sfmmu_tsb;
10176 10033 ASSERT(tsbinfop != NULL);
10177 10034
10178 10035 /*
10179 10036 * If we're growing, select the size based on RSS. If we're
10180 10037 * shrinking, leave some room so we don't have to turn around and
10181 10038 * grow again immediately.
10182 10039 */
10183 10040 if (growing)
10184 10041 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10185 10042 else
10186 10043 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10187 10044
10188 10045 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10189 10046 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10190 10047 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10191 10048 hatlockp, TSB_SHRINK);
10192 10049 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10193 10050 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10194 10051 hatlockp, TSB_GROW);
10195 10052 }
10196 10053 tsbinfop = sfmmup->sfmmu_tsb;
10197 10054
10198 10055 /*
10199 10056 * With the TLB and first TSB out of the way, we need to see if
10200 10057 * we need a second TSB for 4M pages. If we managed to reprogram
10201 10058 * the TLB page sizes above, the process will start using this new
10202 10059 * TSB right away; otherwise, it will start using it on the next
10203 10060 * context switch. Either way, it's no big deal so there's no
10204 10061 * synchronization with the trap handlers here unless we grow the
10205 10062 * TSB (in which case it's required to prevent using the old one
10206 10063 * after it's freed). Note: second tsb is required for 32M/256M
10207 10064 * page sizes.
10208 10065 */
10209 10066 if (tte4m_cnt > sectsb_thresh) {
10210 10067 /*
10211 10068 * If we're growing, select the size based on RSS. If we're
10212 10069 * shrinking, leave some room so we don't have to turn
10213 10070 * around and grow again immediately.
10214 10071 */
10215 10072 if (growing)
10216 10073 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10217 10074 else
10218 10075 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10219 10076 if (tsbinfop->tsb_next == NULL) {
10220 10077 struct tsb_info *newtsb;
10221 10078 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10222 10079 0 : TSB_ALLOC;
10223 10080
10224 10081 sfmmu_hat_exit(hatlockp);
10225 10082
10226 10083 /*
10227 10084 * Try to allocate a TSB for 4[32|256]M pages. If we
10228 10085 * can't get the size we want, retry w/a minimum sized
10229 10086 * TSB. If that still didn't work, give up; we can
10230 10087 * still run without one.
10231 10088 */
10232 10089 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10233 10090 TSB4M|TSB32M|TSB256M:TSB4M;
10234 10091 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10235 10092 allocflags, sfmmup)) &&
10236 10093 (tsb_szc <= TSB_4M_SZCODE ||
10237 10094 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10238 10095 tsb_bits, allocflags, sfmmup)) &&
10239 10096 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10240 10097 tsb_bits, allocflags, sfmmup)) {
10241 10098 return;
10242 10099 }
10243 10100
10244 10101 hatlockp = sfmmu_hat_enter(sfmmup);
10245 10102
10246 10103 sfmmu_invalidate_ctx(sfmmup);
10247 10104
10248 10105 if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10249 10106 sfmmup->sfmmu_tsb->tsb_next = newtsb;
10250 10107 SFMMU_STAT(sf_tsb_sectsb_create);
10251 10108 sfmmu_hat_exit(hatlockp);
10252 10109 return;
10253 10110 } else {
10254 10111 /*
10255 10112 * It's annoying, but possible for us
10256 10113 * to get here.. we dropped the HAT lock
10257 10114 * because of locking order in the kmem
10258 10115 * allocator, and while we were off getting
10259 10116 * our memory, some other thread decided to
10260 10117 * do us a favor and won the race to get a
10261 10118 * second TSB for this process. Sigh.
10262 10119 */
10263 10120 sfmmu_hat_exit(hatlockp);
10264 10121 sfmmu_tsbinfo_free(newtsb);
10265 10122 return;
10266 10123 }
10267 10124 }
10268 10125
10269 10126 /*
10270 10127 * We have a second TSB, see if it's big enough.
10271 10128 */
10272 10129 tsbinfop = tsbinfop->tsb_next;
10273 10130
10274 10131 /*
10275 10132 * Check to see if our second TSB is the right size;
10276 10133 * we may need to grow or shrink it.
10277 10134 * To prevent thrashing (e.g. growing the TSB on a
10278 10135 * subsequent map operation), only try to shrink if
10279 10136 * the TSB reach exceeds twice the virtual address
10280 10137 * space size.
10281 10138 */
10282 10139 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10283 10140 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10284 10141 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10285 10142 tsb_szc, hatlockp, TSB_SHRINK);
10286 10143 } else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10287 10144 TSB_OK_GROW()) {
10288 10145 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10289 10146 tsb_szc, hatlockp, TSB_GROW);
10290 10147 }
10291 10148 }
10292 10149
10293 10150 sfmmu_hat_exit(hatlockp);
10294 10151 }
10295 10152
10296 10153 /*
10297 10154 * Free up a sfmmu
10298 10155 * Since the sfmmu is currently embedded in the hat struct we simply zero
10299 10156 * out our fields and free up the ism map blk list if any.
10300 10157 */
10301 10158 static void
10302 10159 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10303 10160 {
10304 10161 ism_blk_t *blkp, *nx_blkp;
10305 10162 #ifdef DEBUG
10306 10163 ism_map_t *map;
10307 10164 int i;
10308 10165 #endif
10309 10166
10310 10167 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10311 10168 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10312 10169 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10313 10170 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10314 10171 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10315 10172 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10316 10173 ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10317 10174
10318 10175 sfmmup->sfmmu_free = 0;
10319 10176 sfmmup->sfmmu_ismhat = 0;
10320 10177
10321 10178 blkp = sfmmup->sfmmu_iblk;
10322 10179 sfmmup->sfmmu_iblk = NULL;
10323 10180
10324 10181 while (blkp) {
10325 10182 #ifdef DEBUG
10326 10183 map = blkp->iblk_maps;
10327 10184 for (i = 0; i < ISM_MAP_SLOTS; i++) {
10328 10185 ASSERT(map[i].imap_seg == 0);
10329 10186 ASSERT(map[i].imap_ismhat == NULL);
10330 10187 ASSERT(map[i].imap_ment == NULL);
10331 10188 }
10332 10189 #endif
10333 10190 nx_blkp = blkp->iblk_next;
10334 10191 blkp->iblk_next = NULL;
10335 10192 blkp->iblk_nextpa = (uint64_t)-1;
10336 10193 kmem_cache_free(ism_blk_cache, blkp);
10337 10194 blkp = nx_blkp;
10338 10195 }
10339 10196 }
10340 10197
10341 10198 /*
10342 10199 * Locking primitves accessed by HATLOCK macros
10343 10200 */
10344 10201
10345 10202 #define SFMMU_SPL_MTX (0x0)
10346 10203 #define SFMMU_ML_MTX (0x1)
10347 10204
10348 10205 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \
10349 10206 SPL_HASH(pg) : MLIST_HASH(pg))
10350 10207
10351 10208 kmutex_t *
10352 10209 sfmmu_page_enter(struct page *pp)
10353 10210 {
10354 10211 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10355 10212 }
10356 10213
10357 10214 void
10358 10215 sfmmu_page_exit(kmutex_t *spl)
10359 10216 {
10360 10217 mutex_exit(spl);
10361 10218 }
10362 10219
10363 10220 int
10364 10221 sfmmu_page_spl_held(struct page *pp)
10365 10222 {
10366 10223 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10367 10224 }
10368 10225
10369 10226 kmutex_t *
10370 10227 sfmmu_mlist_enter(struct page *pp)
10371 10228 {
10372 10229 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10373 10230 }
10374 10231
10375 10232 void
10376 10233 sfmmu_mlist_exit(kmutex_t *mml)
10377 10234 {
10378 10235 mutex_exit(mml);
10379 10236 }
10380 10237
10381 10238 int
10382 10239 sfmmu_mlist_held(struct page *pp)
10383 10240 {
10384 10241
10385 10242 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10386 10243 }
10387 10244
10388 10245 /*
10389 10246 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For
10390 10247 * sfmmu_mlist_enter() case mml_table lock array is used and for
10391 10248 * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10392 10249 *
10393 10250 * The lock is taken on a root page so that it protects an operation on all
10394 10251 * constituent pages of a large page pp belongs to.
10395 10252 *
10396 10253 * The routine takes a lock from the appropriate array. The lock is determined
10397 10254 * by hashing the root page. After taking the lock this routine checks if the
10398 10255 * root page has the same size code that was used to determine the root (i.e
10399 10256 * that root hasn't changed). If root page has the expected p_szc field we
10400 10257 * have the right lock and it's returned to the caller. If root's p_szc
10401 10258 * decreased we release the lock and retry from the beginning. This case can
10402 10259 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10403 10260 * value and taking the lock. The number of retries due to p_szc decrease is
10404 10261 * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10405 10262 * determined by hashing pp itself.
10406 10263 *
10407 10264 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10408 10265 * possible that p_szc can increase. To increase p_szc a thread has to lock
10409 10266 * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10410 10267 * callers that don't hold a page locked recheck if hmeblk through which pp
10411 10268 * was found still maps this pp. If it doesn't map it anymore returned lock
10412 10269 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10413 10270 * p_szc increase after taking the lock it returns this lock without further
10414 10271 * retries because in this case the caller doesn't care about which lock was
10415 10272 * taken. The caller will drop it right away.
10416 10273 *
10417 10274 * After the routine returns it's guaranteed that hat_page_demote() can't
10418 10275 * change p_szc field of any of constituent pages of a large page pp belongs
10419 10276 * to as long as pp was either locked at least SHARED prior to this call or
10420 10277 * the caller finds that hment that pointed to this pp still references this
10421 10278 * pp (this also assumes that the caller holds hme hash bucket lock so that
10422 10279 * the same pp can't be remapped into the same hmeblk after it was unmapped by
10423 10280 * hat_pageunload()).
10424 10281 */
10425 10282 static kmutex_t *
10426 10283 sfmmu_mlspl_enter(struct page *pp, int type)
10427 10284 {
10428 10285 kmutex_t *mtx;
10429 10286 uint_t prev_rszc = UINT_MAX;
10430 10287 page_t *rootpp;
10431 10288 uint_t szc;
10432 10289 uint_t rszc;
10433 10290 uint_t pszc = pp->p_szc;
10434 10291
10435 10292 ASSERT(pp != NULL);
10436 10293
10437 10294 again:
10438 10295 if (pszc == 0) {
10439 10296 mtx = SFMMU_MLSPL_MTX(type, pp);
10440 10297 mutex_enter(mtx);
10441 10298 return (mtx);
10442 10299 }
10443 10300
10444 10301 /* The lock lives in the root page */
10445 10302 rootpp = PP_GROUPLEADER(pp, pszc);
10446 10303 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10447 10304 mutex_enter(mtx);
10448 10305
10449 10306 /*
10450 10307 * Return mml in the following 3 cases:
10451 10308 *
10452 10309 * 1) If pp itself is root since if its p_szc decreased before we took
10453 10310 * the lock pp is still the root of smaller szc page. And if its p_szc
10454 10311 * increased it doesn't matter what lock we return (see comment in
10455 10312 * front of this routine).
10456 10313 *
10457 10314 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10458 10315 * large page we have the right lock since any previous potential
10459 10316 * hat_page_demote() is done demoting from greater than current root's
10460 10317 * p_szc because hat_page_demote() changes root's p_szc last. No
10461 10318 * further hat_page_demote() can start or be in progress since it
10462 10319 * would need the same lock we currently hold.
10463 10320 *
10464 10321 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10465 10322 * matter what lock we return (see comment in front of this routine).
10466 10323 */
10467 10324 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10468 10325 rszc >= prev_rszc) {
10469 10326 return (mtx);
10470 10327 }
10471 10328
10472 10329 /*
10473 10330 * hat_page_demote() could have decreased root's p_szc.
10474 10331 * In this case pp's p_szc must also be smaller than pszc.
10475 10332 * Retry.
10476 10333 */
10477 10334 if (rszc < pszc) {
10478 10335 szc = pp->p_szc;
10479 10336 if (szc < pszc) {
10480 10337 mutex_exit(mtx);
10481 10338 pszc = szc;
10482 10339 goto again;
10483 10340 }
10484 10341 /*
10485 10342 * pp's p_szc increased after it was decreased.
10486 10343 * page cannot be mapped. Return current lock. The caller
10487 10344 * will drop it right away.
10488 10345 */
10489 10346 return (mtx);
10490 10347 }
10491 10348
10492 10349 /*
10493 10350 * root's p_szc is greater than pp's p_szc.
10494 10351 * hat_page_demote() is not done with all pages
10495 10352 * yet. Wait for it to complete.
10496 10353 */
10497 10354 mutex_exit(mtx);
10498 10355 rootpp = PP_GROUPLEADER(rootpp, rszc);
10499 10356 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10500 10357 mutex_enter(mtx);
10501 10358 mutex_exit(mtx);
10502 10359 prev_rszc = rszc;
10503 10360 goto again;
10504 10361 }
10505 10362
10506 10363 static int
10507 10364 sfmmu_mlspl_held(struct page *pp, int type)
10508 10365 {
10509 10366 kmutex_t *mtx;
10510 10367
10511 10368 ASSERT(pp != NULL);
10512 10369 /* The lock lives in the root page */
10513 10370 pp = PP_PAGEROOT(pp);
10514 10371 ASSERT(pp != NULL);
10515 10372
10516 10373 mtx = SFMMU_MLSPL_MTX(type, pp);
10517 10374 return (MUTEX_HELD(mtx));
10518 10375 }
10519 10376
10520 10377 static uint_t
10521 10378 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10522 10379 {
10523 10380 struct hme_blk *hblkp;
10524 10381
10525 10382
10526 10383 if (freehblkp != NULL) {
10527 10384 mutex_enter(&freehblkp_lock);
10528 10385 if (freehblkp != NULL) {
10529 10386 /*
10530 10387 * If the current thread is owning hblk_reserve OR
10531 10388 * critical request from sfmmu_hblk_steal()
10532 10389 * let it succeed even if freehblkcnt is really low.
10533 10390 */
10534 10391 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10535 10392 SFMMU_STAT(sf_get_free_throttle);
10536 10393 mutex_exit(&freehblkp_lock);
10537 10394 return (0);
10538 10395 }
10539 10396 freehblkcnt--;
10540 10397 *hmeblkpp = freehblkp;
10541 10398 hblkp = *hmeblkpp;
10542 10399 freehblkp = hblkp->hblk_next;
10543 10400 mutex_exit(&freehblkp_lock);
10544 10401 hblkp->hblk_next = NULL;
10545 10402 SFMMU_STAT(sf_get_free_success);
10546 10403
10547 10404 ASSERT(hblkp->hblk_hmecnt == 0);
10548 10405 ASSERT(hblkp->hblk_vcnt == 0);
10549 10406 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10550 10407
10551 10408 return (1);
10552 10409 }
10553 10410 mutex_exit(&freehblkp_lock);
10554 10411 }
10555 10412
10556 10413 /* Check cpu hblk pending queues */
10557 10414 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10558 10415 hblkp = *hmeblkpp;
10559 10416 hblkp->hblk_next = NULL;
10560 10417 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10561 10418
10562 10419 ASSERT(hblkp->hblk_hmecnt == 0);
10563 10420 ASSERT(hblkp->hblk_vcnt == 0);
10564 10421
10565 10422 return (1);
10566 10423 }
10567 10424
10568 10425 SFMMU_STAT(sf_get_free_fail);
10569 10426 return (0);
10570 10427 }
10571 10428
10572 10429 static uint_t
10573 10430 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10574 10431 {
10575 10432 struct hme_blk *hblkp;
10576 10433
10577 10434 ASSERT(hmeblkp->hblk_hmecnt == 0);
10578 10435 ASSERT(hmeblkp->hblk_vcnt == 0);
10579 10436 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10580 10437
10581 10438 /*
10582 10439 * If the current thread is mapping into kernel space,
10583 10440 * let it succede even if freehblkcnt is max
10584 10441 * so that it will avoid freeing it to kmem.
10585 10442 * This will prevent stack overflow due to
10586 10443 * possible recursion since kmem_cache_free()
10587 10444 * might require creation of a slab which
10588 10445 * in turn needs an hmeblk to map that slab;
10589 10446 * let's break this vicious chain at the first
10590 10447 * opportunity.
10591 10448 */
10592 10449 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10593 10450 mutex_enter(&freehblkp_lock);
10594 10451 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10595 10452 SFMMU_STAT(sf_put_free_success);
10596 10453 freehblkcnt++;
10597 10454 hmeblkp->hblk_next = freehblkp;
10598 10455 freehblkp = hmeblkp;
10599 10456 mutex_exit(&freehblkp_lock);
10600 10457 return (1);
10601 10458 }
10602 10459 mutex_exit(&freehblkp_lock);
10603 10460 }
10604 10461
10605 10462 /*
10606 10463 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10607 10464 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10608 10465 * we are not in the process of mapping into kernel space.
10609 10466 */
10610 10467 ASSERT(!critical);
10611 10468 while (freehblkcnt > HBLK_RESERVE_CNT) {
10612 10469 mutex_enter(&freehblkp_lock);
10613 10470 if (freehblkcnt > HBLK_RESERVE_CNT) {
10614 10471 freehblkcnt--;
10615 10472 hblkp = freehblkp;
10616 10473 freehblkp = hblkp->hblk_next;
10617 10474 mutex_exit(&freehblkp_lock);
10618 10475 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10619 10476 kmem_cache_free(sfmmu8_cache, hblkp);
10620 10477 continue;
10621 10478 }
10622 10479 mutex_exit(&freehblkp_lock);
10623 10480 }
10624 10481 SFMMU_STAT(sf_put_free_fail);
10625 10482 return (0);
10626 10483 }
10627 10484
10628 10485 static void
10629 10486 sfmmu_hblk_swap(struct hme_blk *new)
10630 10487 {
10631 10488 struct hme_blk *old, *hblkp, *prev;
10632 10489 uint64_t newpa;
10633 10490 caddr_t base, vaddr, endaddr;
10634 10491 struct hmehash_bucket *hmebp;
10635 10492 struct sf_hment *osfhme, *nsfhme;
10636 10493 page_t *pp;
10637 10494 kmutex_t *pml;
10638 10495 tte_t tte;
10639 10496 struct hme_blk *list = NULL;
10640 10497
10641 10498 #ifdef DEBUG
10642 10499 hmeblk_tag hblktag;
10643 10500 struct hme_blk *found;
10644 10501 #endif
10645 10502 old = HBLK_RESERVE;
10646 10503 ASSERT(!old->hblk_shared);
10647 10504
10648 10505 /*
10649 10506 * save pa before bcopy clobbers it
10650 10507 */
10651 10508 newpa = new->hblk_nextpa;
10652 10509
10653 10510 base = (caddr_t)get_hblk_base(old);
10654 10511 endaddr = base + get_hblk_span(old);
10655 10512
10656 10513 /*
10657 10514 * acquire hash bucket lock.
10658 10515 */
10659 10516 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10660 10517 SFMMU_INVALID_SHMERID);
10661 10518
10662 10519 /*
10663 10520 * copy contents from old to new
10664 10521 */
10665 10522 bcopy((void *)old, (void *)new, HME8BLK_SZ);
10666 10523
10667 10524 /*
10668 10525 * add new to hash chain
10669 10526 */
10670 10527 sfmmu_hblk_hash_add(hmebp, new, newpa);
10671 10528
10672 10529 /*
10673 10530 * search hash chain for hblk_reserve; this needs to be performed
10674 10531 * after adding new, otherwise prev won't correspond to the hblk which
10675 10532 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10676 10533 * remove old later.
10677 10534 */
10678 10535 for (prev = NULL,
10679 10536 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10680 10537 prev = hblkp, hblkp = hblkp->hblk_next)
10681 10538 ;
10682 10539
10683 10540 if (hblkp != old)
10684 10541 panic("sfmmu_hblk_swap: hblk_reserve not found");
10685 10542
10686 10543 /*
10687 10544 * p_mapping list is still pointing to hments in hblk_reserve;
10688 10545 * fix up p_mapping list so that they point to hments in new.
10689 10546 *
10690 10547 * Since all these mappings are created by hblk_reserve_thread
10691 10548 * on the way and it's using at least one of the buffers from each of
10692 10549 * the newly minted slabs, there is no danger of any of these
10693 10550 * mappings getting unloaded by another thread.
10694 10551 *
10695 10552 * tsbmiss could only modify ref/mod bits of hments in old/new.
10696 10553 * Since all of these hments hold mappings established by segkmem
10697 10554 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10698 10555 * have no meaning for the mappings in hblk_reserve. hments in
10699 10556 * old and new are identical except for ref/mod bits.
10700 10557 */
10701 10558 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10702 10559
10703 10560 HBLKTOHME(osfhme, old, vaddr);
10704 10561 sfmmu_copytte(&osfhme->hme_tte, &tte);
10705 10562
10706 10563 if (TTE_IS_VALID(&tte)) {
10707 10564 if ((pp = osfhme->hme_page) == NULL)
10708 10565 panic("sfmmu_hblk_swap: page not mapped");
10709 10566
10710 10567 pml = sfmmu_mlist_enter(pp);
10711 10568
10712 10569 if (pp != osfhme->hme_page)
10713 10570 panic("sfmmu_hblk_swap: mapping changed");
10714 10571
10715 10572 HBLKTOHME(nsfhme, new, vaddr);
10716 10573
10717 10574 HME_ADD(nsfhme, pp);
10718 10575 HME_SUB(osfhme, pp);
10719 10576
10720 10577 sfmmu_mlist_exit(pml);
10721 10578 }
10722 10579 }
10723 10580
10724 10581 /*
10725 10582 * remove old from hash chain
10726 10583 */
10727 10584 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10728 10585
10729 10586 #ifdef DEBUG
10730 10587
10731 10588 hblktag.htag_id = ksfmmup;
10732 10589 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10733 10590 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10734 10591 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10735 10592 HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10736 10593
10737 10594 if (found != new)
10738 10595 panic("sfmmu_hblk_swap: new hblk not found");
10739 10596 #endif
10740 10597
10741 10598 SFMMU_HASH_UNLOCK(hmebp);
10742 10599
10743 10600 /*
10744 10601 * Reset hblk_reserve
10745 10602 */
10746 10603 bzero((void *)old, HME8BLK_SZ);
10747 10604 old->hblk_nextpa = va_to_pa((caddr_t)old);
10748 10605 }
10749 10606
10750 10607 /*
10751 10608 * Grab the mlist mutex for both pages passed in.
10752 10609 *
10753 10610 * low and high will be returned as pointers to the mutexes for these pages.
10754 10611 * low refers to the mutex residing in the lower bin of the mlist hash, while
10755 10612 * high refers to the mutex residing in the higher bin of the mlist hash. This
10756 10613 * is due to the locking order restrictions on the same thread grabbing
10757 10614 * multiple mlist mutexes. The low lock must be acquired before the high lock.
10758 10615 *
10759 10616 * If both pages hash to the same mutex, only grab that single mutex, and
10760 10617 * high will be returned as NULL
10761 10618 * If the pages hash to different bins in the hash, grab the lower addressed
10762 10619 * lock first and then the higher addressed lock in order to follow the locking
10763 10620 * rules involved with the same thread grabbing multiple mlist mutexes.
10764 10621 * low and high will both have non-NULL values.
10765 10622 */
10766 10623 static void
10767 10624 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10768 10625 kmutex_t **low, kmutex_t **high)
10769 10626 {
10770 10627 kmutex_t *mml_targ, *mml_repl;
10771 10628
10772 10629 /*
10773 10630 * no need to do the dance around szc as in sfmmu_mlist_enter()
10774 10631 * because this routine is only called by hat_page_relocate() and all
10775 10632 * targ and repl pages are already locked EXCL so szc can't change.
10776 10633 */
10777 10634
10778 10635 mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10779 10636 mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10780 10637
10781 10638 if (mml_targ == mml_repl) {
10782 10639 *low = mml_targ;
10783 10640 *high = NULL;
10784 10641 } else {
10785 10642 if (mml_targ < mml_repl) {
10786 10643 *low = mml_targ;
10787 10644 *high = mml_repl;
10788 10645 } else {
10789 10646 *low = mml_repl;
10790 10647 *high = mml_targ;
10791 10648 }
10792 10649 }
10793 10650
10794 10651 mutex_enter(*low);
10795 10652 if (*high)
10796 10653 mutex_enter(*high);
10797 10654 }
10798 10655
10799 10656 static void
10800 10657 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10801 10658 {
10802 10659 if (high)
10803 10660 mutex_exit(high);
10804 10661 mutex_exit(low);
10805 10662 }
10806 10663
10807 10664 static hatlock_t *
10808 10665 sfmmu_hat_enter(sfmmu_t *sfmmup)
10809 10666 {
10810 10667 hatlock_t *hatlockp;
10811 10668
10812 10669 if (sfmmup != ksfmmup) {
10813 10670 hatlockp = TSB_HASH(sfmmup);
10814 10671 mutex_enter(HATLOCK_MUTEXP(hatlockp));
10815 10672 return (hatlockp);
10816 10673 }
10817 10674 return (NULL);
10818 10675 }
10819 10676
10820 10677 static hatlock_t *
10821 10678 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
10822 10679 {
10823 10680 hatlock_t *hatlockp;
10824 10681
10825 10682 if (sfmmup != ksfmmup) {
10826 10683 hatlockp = TSB_HASH(sfmmup);
10827 10684 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
10828 10685 return (NULL);
10829 10686 return (hatlockp);
10830 10687 }
10831 10688 return (NULL);
10832 10689 }
10833 10690
10834 10691 static void
10835 10692 sfmmu_hat_exit(hatlock_t *hatlockp)
10836 10693 {
10837 10694 if (hatlockp != NULL)
10838 10695 mutex_exit(HATLOCK_MUTEXP(hatlockp));
10839 10696 }
10840 10697
10841 10698 static void
10842 10699 sfmmu_hat_lock_all(void)
10843 10700 {
10844 10701 int i;
10845 10702 for (i = 0; i < SFMMU_NUM_LOCK; i++)
10846 10703 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
10847 10704 }
10848 10705
10849 10706 static void
10850 10707 sfmmu_hat_unlock_all(void)
10851 10708 {
10852 10709 int i;
10853 10710 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
10854 10711 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
10855 10712 }
10856 10713
10857 10714 int
10858 10715 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
10859 10716 {
10860 10717 ASSERT(sfmmup != ksfmmup);
10861 10718 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
10862 10719 }
10863 10720
10864 10721 /*
10865 10722 * Locking primitives to provide consistency between ISM unmap
10866 10723 * and other operations. Since ISM unmap can take a long time, we
10867 10724 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
10868 10725 * contention on the hatlock buckets while ISM segments are being
10869 10726 * unmapped. The tradeoff is that the flags don't prevent priority
10870 10727 * inversion from occurring, so we must request kernel priority in
10871 10728 * case we have to sleep to keep from getting buried while holding
10872 10729 * the HAT_ISMBUSY flag set, which in turn could block other kernel
10873 10730 * threads from running (for example, in sfmmu_uvatopfn()).
10874 10731 */
10875 10732 static void
10876 10733 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10877 10734 {
10878 10735 hatlock_t *hatlockp;
10879 10736
10880 10737 THREAD_KPRI_REQUEST();
10881 10738 if (!hatlock_held)
10882 10739 hatlockp = sfmmu_hat_enter(sfmmup);
10883 10740 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10884 10741 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10885 10742 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10886 10743 if (!hatlock_held)
10887 10744 sfmmu_hat_exit(hatlockp);
10888 10745 }
10889 10746
10890 10747 static void
10891 10748 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
10892 10749 {
10893 10750 hatlock_t *hatlockp;
10894 10751
10895 10752 if (!hatlock_held)
10896 10753 hatlockp = sfmmu_hat_enter(sfmmup);
10897 10754 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10898 10755 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10899 10756 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10900 10757 if (!hatlock_held)
10901 10758 sfmmu_hat_exit(hatlockp);
10902 10759 THREAD_KPRI_RELEASE();
10903 10760 }
10904 10761
10905 10762 /*
10906 10763 *
10907 10764 * Algorithm:
10908 10765 *
10909 10766 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10910 10767 * hblks.
10911 10768 *
10912 10769 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10913 10770 *
10914 10771 * (a) try to return an hblk from reserve pool of free hblks;
10915 10772 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
10916 10773 * and return hblk_reserve.
10917 10774 *
10918 10775 * (3) call kmem_cache_alloc() to allocate hblk;
10919 10776 *
10920 10777 * (a) if hblk_reserve_lock is held by the current thread,
10921 10778 * atomically replace hblk_reserve by the hblk that is
10922 10779 * returned by kmem_cache_alloc; release hblk_reserve_lock
10923 10780 * and call kmem_cache_alloc() again.
10924 10781 * (b) if reserve pool is not full, add the hblk that is
10925 10782 * returned by kmem_cache_alloc to reserve pool and
10926 10783 * call kmem_cache_alloc again.
10927 10784 *
10928 10785 */
10929 10786 static struct hme_blk *
10930 10787 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
10931 10788 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
10932 10789 uint_t flags, uint_t rid)
10933 10790 {
10934 10791 struct hme_blk *hmeblkp = NULL;
10935 10792 struct hme_blk *newhblkp;
10936 10793 struct hme_blk *shw_hblkp = NULL;
10937 10794 struct kmem_cache *sfmmu_cache = NULL;
10938 10795 uint64_t hblkpa;
10939 10796 ulong_t index;
10940 10797 uint_t owner; /* set to 1 if using hblk_reserve */
10941 10798 uint_t forcefree;
10942 10799 int sleep;
10943 10800 sf_srd_t *srdp;
10944 10801 sf_region_t *rgnp;
10945 10802
10946 10803 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10947 10804 ASSERT(hblktag.htag_rid == rid);
10948 10805 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
10949 10806 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
10950 10807 IS_P2ALIGNED(vaddr, TTEBYTES(size)));
10951 10808
10952 10809 /*
10953 10810 * If segkmem is not created yet, allocate from static hmeblks
10954 10811 * created at the end of startup_modules(). See the block comment
10955 10812 * in startup_modules() describing how we estimate the number of
10956 10813 * static hmeblks that will be needed during re-map.
10957 10814 */
10958 10815 if (!hblk_alloc_dynamic) {
10959 10816
10960 10817 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
10961 10818
10962 10819 if (size == TTE8K) {
10963 10820 index = nucleus_hblk8.index;
10964 10821 if (index >= nucleus_hblk8.len) {
10965 10822 /*
10966 10823 * If we panic here, see startup_modules() to
10967 10824 * make sure that we are calculating the
10968 10825 * number of hblk8's that we need correctly.
10969 10826 */
10970 10827 prom_panic("no nucleus hblk8 to allocate");
10971 10828 }
10972 10829 hmeblkp =
10973 10830 (struct hme_blk *)&nucleus_hblk8.list[index];
10974 10831 nucleus_hblk8.index++;
10975 10832 SFMMU_STAT(sf_hblk8_nalloc);
10976 10833 } else {
10977 10834 index = nucleus_hblk1.index;
10978 10835 if (nucleus_hblk1.index >= nucleus_hblk1.len) {
10979 10836 /*
10980 10837 * If we panic here, see startup_modules().
10981 10838 * Most likely you need to update the
10982 10839 * calculation of the number of hblk1 elements
10983 10840 * that the kernel needs to boot.
10984 10841 */
10985 10842 prom_panic("no nucleus hblk1 to allocate");
10986 10843 }
10987 10844 hmeblkp =
10988 10845 (struct hme_blk *)&nucleus_hblk1.list[index];
10989 10846 nucleus_hblk1.index++;
10990 10847 SFMMU_STAT(sf_hblk1_nalloc);
10991 10848 }
10992 10849
10993 10850 goto hblk_init;
10994 10851 }
10995 10852
10996 10853 SFMMU_HASH_UNLOCK(hmebp);
10997 10854
10998 10855 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
10999 10856 if (mmu_page_sizes == max_mmu_page_sizes) {
11000 10857 if (size < TTE256M)
11001 10858 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11002 10859 size, flags);
11003 10860 } else {
11004 10861 if (size < TTE4M)
11005 10862 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11006 10863 size, flags);
11007 10864 }
11008 10865 } else if (SFMMU_IS_SHMERID_VALID(rid)) {
11009 10866 /*
11010 10867 * Shared hmes use per region bitmaps in rgn_hmeflag
11011 10868 * rather than shadow hmeblks to keep track of the
11012 10869 * mapping sizes which have been allocated for the region.
11013 10870 * Here we cleanup old invalid hmeblks with this rid,
11014 10871 * which may be left around by pageunload().
11015 10872 */
11016 10873 int ttesz;
11017 10874 caddr_t va;
11018 10875 caddr_t eva = vaddr + TTEBYTES(size);
11019 10876
11020 10877 ASSERT(sfmmup != KHATID);
11021 10878
11022 10879 srdp = sfmmup->sfmmu_srdp;
11023 10880 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11024 10881 rgnp = srdp->srd_hmergnp[rid];
11025 10882 ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11026 10883 ASSERT(rgnp->rgn_refcnt != 0);
11027 10884 ASSERT(size <= rgnp->rgn_pgszc);
11028 10885
11029 10886 ttesz = HBLK_MIN_TTESZ;
11030 10887 do {
11031 10888 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11032 10889 continue;
11033 10890 }
11034 10891
11035 10892 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11036 10893 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11037 10894 } else if (ttesz < size) {
11038 10895 for (va = vaddr; va < eva;
11039 10896 va += TTEBYTES(ttesz)) {
11040 10897 sfmmu_cleanup_rhblk(srdp, va, rid,
11041 10898 ttesz);
11042 10899 }
11043 10900 }
11044 10901 } while (++ttesz <= rgnp->rgn_pgszc);
11045 10902 }
11046 10903
11047 10904 fill_hblk:
11048 10905 owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11049 10906
11050 10907 if (owner && size == TTE8K) {
11051 10908
11052 10909 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11053 10910 /*
11054 10911 * We are really in a tight spot. We already own
11055 10912 * hblk_reserve and we need another hblk. In anticipation
11056 10913 * of this kind of scenario, we specifically set aside
11057 10914 * HBLK_RESERVE_MIN number of hblks to be used exclusively
11058 10915 * by owner of hblk_reserve.
11059 10916 */
11060 10917 SFMMU_STAT(sf_hblk_recurse_cnt);
11061 10918
11062 10919 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11063 10920 panic("sfmmu_hblk_alloc: reserve list is empty");
11064 10921
11065 10922 goto hblk_verify;
11066 10923 }
11067 10924
11068 10925 ASSERT(!owner);
11069 10926
11070 10927 if ((flags & HAT_NO_KALLOC) == 0) {
11071 10928
11072 10929 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11073 10930 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11074 10931
11075 10932 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11076 10933 hmeblkp = sfmmu_hblk_steal(size);
11077 10934 } else {
11078 10935 /*
11079 10936 * if we are the owner of hblk_reserve,
11080 10937 * swap hblk_reserve with hmeblkp and
11081 10938 * start a fresh life. Hope things go
11082 10939 * better this time.
11083 10940 */
11084 10941 if (hblk_reserve_thread == curthread) {
11085 10942 ASSERT(sfmmu_cache == sfmmu8_cache);
11086 10943 sfmmu_hblk_swap(hmeblkp);
11087 10944 hblk_reserve_thread = NULL;
11088 10945 mutex_exit(&hblk_reserve_lock);
11089 10946 goto fill_hblk;
11090 10947 }
11091 10948 /*
11092 10949 * let's donate this hblk to our reserve list if
11093 10950 * we are not mapping kernel range
11094 10951 */
11095 10952 if (size == TTE8K && sfmmup != KHATID) {
11096 10953 if (sfmmu_put_free_hblk(hmeblkp, 0))
11097 10954 goto fill_hblk;
11098 10955 }
11099 10956 }
11100 10957 } else {
11101 10958 /*
11102 10959 * We are here to map the slab in sfmmu8_cache; let's
11103 10960 * check if we could tap our reserve list; if successful,
11104 10961 * this will avoid the pain of going thru sfmmu_hblk_swap
11105 10962 */
11106 10963 SFMMU_STAT(sf_hblk_slab_cnt);
11107 10964 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11108 10965 /*
11109 10966 * let's start hblk_reserve dance
11110 10967 */
11111 10968 SFMMU_STAT(sf_hblk_reserve_cnt);
11112 10969 owner = 1;
11113 10970 mutex_enter(&hblk_reserve_lock);
11114 10971 hmeblkp = HBLK_RESERVE;
11115 10972 hblk_reserve_thread = curthread;
11116 10973 }
11117 10974 }
11118 10975
11119 10976 hblk_verify:
11120 10977 ASSERT(hmeblkp != NULL);
11121 10978 set_hblk_sz(hmeblkp, size);
11122 10979 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11123 10980 SFMMU_HASH_LOCK(hmebp);
11124 10981 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11125 10982 if (newhblkp != NULL) {
11126 10983 SFMMU_HASH_UNLOCK(hmebp);
11127 10984 if (hmeblkp != HBLK_RESERVE) {
11128 10985 /*
11129 10986 * This is really tricky!
11130 10987 *
11131 10988 * vmem_alloc(vmem_seg_arena)
11132 10989 * vmem_alloc(vmem_internal_arena)
11133 10990 * segkmem_alloc(heap_arena)
11134 10991 * vmem_alloc(heap_arena)
11135 10992 * page_create()
11136 10993 * hat_memload()
11137 10994 * kmem_cache_free()
11138 10995 * kmem_cache_alloc()
11139 10996 * kmem_slab_create()
11140 10997 * vmem_alloc(kmem_internal_arena)
11141 10998 * segkmem_alloc(heap_arena)
11142 10999 * vmem_alloc(heap_arena)
11143 11000 * page_create()
11144 11001 * hat_memload()
11145 11002 * kmem_cache_free()
11146 11003 * ...
11147 11004 *
11148 11005 * Thus, hat_memload() could call kmem_cache_free
11149 11006 * for enough number of times that we could easily
11150 11007 * hit the bottom of the stack or run out of reserve
11151 11008 * list of vmem_seg structs. So, we must donate
11152 11009 * this hblk to reserve list if it's allocated
11153 11010 * from sfmmu8_cache *and* mapping kernel range.
11154 11011 * We don't need to worry about freeing hmeblk1's
11155 11012 * to kmem since they don't map any kmem slabs.
11156 11013 *
11157 11014 * Note: When segkmem supports largepages, we must
11158 11015 * free hmeblk1's to reserve list as well.
11159 11016 */
11160 11017 forcefree = (sfmmup == KHATID) ? 1 : 0;
11161 11018 if (size == TTE8K &&
11162 11019 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11163 11020 goto re_verify;
11164 11021 }
11165 11022 ASSERT(sfmmup != KHATID);
11166 11023 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11167 11024 } else {
11168 11025 /*
11169 11026 * Hey! we don't need hblk_reserve any more.
11170 11027 */
11171 11028 ASSERT(owner);
11172 11029 hblk_reserve_thread = NULL;
11173 11030 mutex_exit(&hblk_reserve_lock);
11174 11031 owner = 0;
11175 11032 }
11176 11033 re_verify:
11177 11034 /*
11178 11035 * let's check if the goodies are still present
11179 11036 */
11180 11037 SFMMU_HASH_LOCK(hmebp);
11181 11038 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11182 11039 if (newhblkp != NULL) {
11183 11040 /*
11184 11041 * return newhblkp if it's not hblk_reserve;
11185 11042 * if newhblkp is hblk_reserve, return it
11186 11043 * _only if_ we are the owner of hblk_reserve.
11187 11044 */
11188 11045 if (newhblkp != HBLK_RESERVE || owner) {
11189 11046 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11190 11047 newhblkp->hblk_shared);
11191 11048 ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11192 11049 !newhblkp->hblk_shared);
11193 11050 return (newhblkp);
11194 11051 } else {
11195 11052 /*
11196 11053 * we just hit hblk_reserve in the hash and
11197 11054 * we are not the owner of that;
11198 11055 *
11199 11056 * block until hblk_reserve_thread completes
11200 11057 * swapping hblk_reserve and try the dance
11201 11058 * once again.
11202 11059 */
11203 11060 SFMMU_HASH_UNLOCK(hmebp);
11204 11061 mutex_enter(&hblk_reserve_lock);
11205 11062 mutex_exit(&hblk_reserve_lock);
11206 11063 SFMMU_STAT(sf_hblk_reserve_hit);
11207 11064 goto fill_hblk;
11208 11065 }
11209 11066 } else {
11210 11067 /*
11211 11068 * it's no more! try the dance once again.
11212 11069 */
11213 11070 SFMMU_HASH_UNLOCK(hmebp);
11214 11071 goto fill_hblk;
11215 11072 }
11216 11073 }
11217 11074
11218 11075 hblk_init:
11219 11076 if (SFMMU_IS_SHMERID_VALID(rid)) {
11220 11077 uint16_t tteflag = 0x1 <<
11221 11078 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11222 11079
11223 11080 if (!(rgnp->rgn_hmeflags & tteflag)) {
11224 11081 atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11225 11082 }
11226 11083 hmeblkp->hblk_shared = 1;
11227 11084 } else {
11228 11085 hmeblkp->hblk_shared = 0;
11229 11086 }
11230 11087 set_hblk_sz(hmeblkp, size);
11231 11088 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11232 11089 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11233 11090 hmeblkp->hblk_tag = hblktag;
11234 11091 hmeblkp->hblk_shadow = shw_hblkp;
11235 11092 hblkpa = hmeblkp->hblk_nextpa;
11236 11093 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11237 11094
11238 11095 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11239 11096 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11240 11097 ASSERT(hmeblkp->hblk_hmecnt == 0);
11241 11098 ASSERT(hmeblkp->hblk_vcnt == 0);
11242 11099 ASSERT(hmeblkp->hblk_lckcnt == 0);
11243 11100 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11244 11101 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11245 11102 return (hmeblkp);
11246 11103 }
11247 11104
11248 11105 /*
11249 11106 * This function cleans up the hme_blk and returns it to the free list.
11250 11107 */
11251 11108 /* ARGSUSED */
11252 11109 static void
11253 11110 sfmmu_hblk_free(struct hme_blk **listp)
11254 11111 {
11255 11112 struct hme_blk *hmeblkp, *next_hmeblkp;
11256 11113 int size;
11257 11114 uint_t critical;
11258 11115 uint64_t hblkpa;
11259 11116
11260 11117 ASSERT(*listp != NULL);
11261 11118
11262 11119 hmeblkp = *listp;
11263 11120 while (hmeblkp != NULL) {
11264 11121 next_hmeblkp = hmeblkp->hblk_next;
11265 11122 ASSERT(!hmeblkp->hblk_hmecnt);
11266 11123 ASSERT(!hmeblkp->hblk_vcnt);
11267 11124 ASSERT(!hmeblkp->hblk_lckcnt);
11268 11125 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11269 11126 ASSERT(hmeblkp->hblk_shared == 0);
11270 11127 ASSERT(hmeblkp->hblk_shw_bit == 0);
11271 11128 ASSERT(hmeblkp->hblk_shadow == NULL);
11272 11129
11273 11130 hblkpa = va_to_pa((caddr_t)hmeblkp);
11274 11131 ASSERT(hblkpa != (uint64_t)-1);
11275 11132 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11276 11133
11277 11134 size = get_hblk_ttesz(hmeblkp);
11278 11135 hmeblkp->hblk_next = NULL;
11279 11136 hmeblkp->hblk_nextpa = hblkpa;
11280 11137
11281 11138 if (hmeblkp->hblk_nuc_bit == 0) {
11282 11139
11283 11140 if (size != TTE8K ||
11284 11141 !sfmmu_put_free_hblk(hmeblkp, critical))
11285 11142 kmem_cache_free(get_hblk_cache(hmeblkp),
11286 11143 hmeblkp);
11287 11144 }
11288 11145 hmeblkp = next_hmeblkp;
11289 11146 }
11290 11147 }
11291 11148
11292 11149 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
11293 11150 #define SFMMU_HBLK_STEAL_THRESHOLD 5
11294 11151
11295 11152 static uint_t sfmmu_hblk_steal_twice;
11296 11153 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11297 11154
11298 11155 /*
11299 11156 * Steal a hmeblk from user or kernel hme hash lists.
11300 11157 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11301 11158 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11302 11159 * tap into critical reserve of freehblkp.
11303 11160 * Note: We remain looping in this routine until we find one.
11304 11161 */
11305 11162 static struct hme_blk *
11306 11163 sfmmu_hblk_steal(int size)
11307 11164 {
11308 11165 static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11309 11166 struct hmehash_bucket *hmebp;
11310 11167 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11311 11168 uint64_t hblkpa;
11312 11169 int i;
11313 11170 uint_t loop_cnt = 0, critical;
11314 11171
11315 11172 for (;;) {
11316 11173 /* Check cpu hblk pending queues */
11317 11174 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11318 11175 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11319 11176 ASSERT(hmeblkp->hblk_hmecnt == 0);
11320 11177 ASSERT(hmeblkp->hblk_vcnt == 0);
11321 11178 return (hmeblkp);
11322 11179 }
11323 11180
11324 11181 if (size == TTE8K) {
11325 11182 critical =
11326 11183 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11327 11184 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11328 11185 return (hmeblkp);
11329 11186 }
11330 11187
11331 11188 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11332 11189 uhmehash_steal_hand;
11333 11190 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11334 11191
11335 11192 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11336 11193 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11337 11194 SFMMU_HASH_LOCK(hmebp);
11338 11195 hmeblkp = hmebp->hmeblkp;
11339 11196 hblkpa = hmebp->hmeh_nextpa;
11340 11197 pr_hblk = NULL;
11341 11198 while (hmeblkp) {
11342 11199 /*
11343 11200 * check if it is a hmeblk that is not locked
11344 11201 * and not shared. skip shadow hmeblks with
11345 11202 * shadow_mask set i.e valid count non zero.
11346 11203 */
11347 11204 if ((get_hblk_ttesz(hmeblkp) == size) &&
11348 11205 (hmeblkp->hblk_shw_bit == 0 ||
11349 11206 hmeblkp->hblk_vcnt == 0) &&
11350 11207 (hmeblkp->hblk_lckcnt == 0)) {
11351 11208 /*
11352 11209 * there is a high probability that we
11353 11210 * will find a free one. search some
11354 11211 * buckets for a free hmeblk initially
11355 11212 * before unloading a valid hmeblk.
11356 11213 */
11357 11214 if ((hmeblkp->hblk_vcnt == 0 &&
11358 11215 hmeblkp->hblk_hmecnt == 0) || (i >=
11359 11216 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11360 11217 if (sfmmu_steal_this_hblk(hmebp,
11361 11218 hmeblkp, hblkpa, pr_hblk)) {
11362 11219 /*
11363 11220 * Hblk is unloaded
11364 11221 * successfully
11365 11222 */
11366 11223 break;
11367 11224 }
11368 11225 }
11369 11226 }
11370 11227 pr_hblk = hmeblkp;
11371 11228 hblkpa = hmeblkp->hblk_nextpa;
11372 11229 hmeblkp = hmeblkp->hblk_next;
11373 11230 }
11374 11231
11375 11232 SFMMU_HASH_UNLOCK(hmebp);
11376 11233 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11377 11234 hmebp = uhme_hash;
11378 11235 }
11379 11236 uhmehash_steal_hand = hmebp;
11380 11237
11381 11238 if (hmeblkp != NULL)
11382 11239 break;
11383 11240
11384 11241 /*
11385 11242 * in the worst case, look for a free one in the kernel
11386 11243 * hash table.
11387 11244 */
11388 11245 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11389 11246 SFMMU_HASH_LOCK(hmebp);
11390 11247 hmeblkp = hmebp->hmeblkp;
11391 11248 hblkpa = hmebp->hmeh_nextpa;
11392 11249 pr_hblk = NULL;
11393 11250 while (hmeblkp) {
11394 11251 /*
11395 11252 * check if it is free hmeblk
11396 11253 */
11397 11254 if ((get_hblk_ttesz(hmeblkp) == size) &&
11398 11255 (hmeblkp->hblk_lckcnt == 0) &&
11399 11256 (hmeblkp->hblk_vcnt == 0) &&
11400 11257 (hmeblkp->hblk_hmecnt == 0)) {
11401 11258 if (sfmmu_steal_this_hblk(hmebp,
11402 11259 hmeblkp, hblkpa, pr_hblk)) {
11403 11260 break;
11404 11261 } else {
11405 11262 /*
11406 11263 * Cannot fail since we have
11407 11264 * hash lock.
11408 11265 */
11409 11266 panic("fail to steal?");
11410 11267 }
11411 11268 }
11412 11269
11413 11270 pr_hblk = hmeblkp;
11414 11271 hblkpa = hmeblkp->hblk_nextpa;
11415 11272 hmeblkp = hmeblkp->hblk_next;
11416 11273 }
11417 11274
11418 11275 SFMMU_HASH_UNLOCK(hmebp);
11419 11276 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11420 11277 hmebp = khme_hash;
11421 11278 }
11422 11279
11423 11280 if (hmeblkp != NULL)
11424 11281 break;
11425 11282 sfmmu_hblk_steal_twice++;
11426 11283 }
11427 11284 return (hmeblkp);
11428 11285 }
11429 11286
11430 11287 /*
11431 11288 * This routine does real work to prepare a hblk to be "stolen" by
11432 11289 * unloading the mappings, updating shadow counts ....
11433 11290 * It returns 1 if the block is ready to be reused (stolen), or 0
11434 11291 * means the block cannot be stolen yet- pageunload is still working
11435 11292 * on this hblk.
11436 11293 */
11437 11294 static int
11438 11295 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11439 11296 uint64_t hblkpa, struct hme_blk *pr_hblk)
11440 11297 {
11441 11298 int shw_size, vshift;
11442 11299 struct hme_blk *shw_hblkp;
11443 11300 caddr_t vaddr;
11444 11301 uint_t shw_mask, newshw_mask;
11445 11302 struct hme_blk *list = NULL;
11446 11303
11447 11304 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11448 11305
11449 11306 /*
11450 11307 * check if the hmeblk is free, unload if necessary
11451 11308 */
11452 11309 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11453 11310 sfmmu_t *sfmmup;
11454 11311 demap_range_t dmr;
11455 11312
11456 11313 sfmmup = hblktosfmmu(hmeblkp);
11457 11314 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11458 11315 return (0);
11459 11316 }
11460 11317 DEMAP_RANGE_INIT(sfmmup, &dmr);
11461 11318 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11462 11319 (caddr_t)get_hblk_base(hmeblkp),
11463 11320 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11464 11321 DEMAP_RANGE_FLUSH(&dmr);
11465 11322 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11466 11323 /*
11467 11324 * Pageunload is working on the same hblk.
11468 11325 */
11469 11326 return (0);
11470 11327 }
11471 11328
11472 11329 sfmmu_hblk_steal_unload_count++;
11473 11330 }
11474 11331
11475 11332 ASSERT(hmeblkp->hblk_lckcnt == 0);
11476 11333 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11477 11334
11478 11335 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11479 11336 hmeblkp->hblk_nextpa = hblkpa;
11480 11337
11481 11338 shw_hblkp = hmeblkp->hblk_shadow;
11482 11339 if (shw_hblkp) {
11483 11340 ASSERT(!hmeblkp->hblk_shared);
11484 11341 shw_size = get_hblk_ttesz(shw_hblkp);
11485 11342 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11486 11343 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11487 11344 ASSERT(vshift < 8);
11488 11345 /*
11489 11346 * Atomically clear shadow mask bit
11490 11347 */
11491 11348 do {
11492 11349 shw_mask = shw_hblkp->hblk_shw_mask;
11493 11350 ASSERT(shw_mask & (1 << vshift));
11494 11351 newshw_mask = shw_mask & ~(1 << vshift);
11495 11352 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11496 11353 shw_mask, newshw_mask);
11497 11354 } while (newshw_mask != shw_mask);
11498 11355 hmeblkp->hblk_shadow = NULL;
11499 11356 }
11500 11357
11501 11358 /*
11502 11359 * remove shadow bit if we are stealing an unused shadow hmeblk.
11503 11360 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11504 11361 * we are indeed allocating a shadow hmeblk.
11505 11362 */
11506 11363 hmeblkp->hblk_shw_bit = 0;
11507 11364
11508 11365 if (hmeblkp->hblk_shared) {
11509 11366 sf_srd_t *srdp;
11510 11367 sf_region_t *rgnp;
11511 11368 uint_t rid;
11512 11369
11513 11370 srdp = hblktosrd(hmeblkp);
11514 11371 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11515 11372 rid = hmeblkp->hblk_tag.htag_rid;
11516 11373 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11517 11374 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11518 11375 rgnp = srdp->srd_hmergnp[rid];
11519 11376 ASSERT(rgnp != NULL);
11520 11377 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11521 11378 hmeblkp->hblk_shared = 0;
11522 11379 }
11523 11380
11524 11381 sfmmu_hblk_steal_count++;
11525 11382 SFMMU_STAT(sf_steal_count);
11526 11383
11527 11384 return (1);
11528 11385 }
11529 11386
11530 11387 struct hme_blk *
11531 11388 sfmmu_hmetohblk(struct sf_hment *sfhme)
11532 11389 {
11533 11390 struct hme_blk *hmeblkp;
11534 11391 struct sf_hment *sfhme0;
11535 11392 struct hme_blk *hblk_dummy = 0;
11536 11393
11537 11394 /*
11538 11395 * No dummy sf_hments, please.
11539 11396 */
11540 11397 ASSERT(sfhme->hme_tte.ll != 0);
11541 11398
11542 11399 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11543 11400 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11544 11401 (uintptr_t)&hblk_dummy->hblk_hme[0]);
11545 11402
11546 11403 return (hmeblkp);
11547 11404 }
11548 11405
11549 11406 /*
11550 11407 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11551 11408 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11552 11409 * KM_SLEEP allocation.
11553 11410 *
11554 11411 * Return 0 on success, -1 otherwise.
11555 11412 */
11556 11413 static void
11557 11414 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11558 11415 {
11559 11416 struct tsb_info *tsbinfop, *next;
11560 11417 tsb_replace_rc_t rc;
11561 11418 boolean_t gotfirst = B_FALSE;
11562 11419
11563 11420 ASSERT(sfmmup != ksfmmup);
11564 11421 ASSERT(sfmmu_hat_lock_held(sfmmup));
11565 11422
11566 11423 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11567 11424 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11568 11425 }
11569 11426
11570 11427 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11571 11428 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11572 11429 } else {
11573 11430 return;
11574 11431 }
11575 11432
11576 11433 ASSERT(sfmmup->sfmmu_tsb != NULL);
11577 11434
11578 11435 /*
11579 11436 * Loop over all tsbinfo's replacing them with ones that actually have
11580 11437 * a TSB. If any of the replacements ever fail, bail out of the loop.
11581 11438 */
11582 11439 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11583 11440 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11584 11441 next = tsbinfop->tsb_next;
11585 11442 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11586 11443 hatlockp, TSB_SWAPIN);
11587 11444 if (rc != TSB_SUCCESS) {
11588 11445 break;
11589 11446 }
11590 11447 gotfirst = B_TRUE;
11591 11448 }
11592 11449
11593 11450 switch (rc) {
11594 11451 case TSB_SUCCESS:
11595 11452 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11596 11453 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11597 11454 return;
11598 11455 case TSB_LOSTRACE:
11599 11456 break;
11600 11457 case TSB_ALLOCFAIL:
11601 11458 break;
11602 11459 default:
11603 11460 panic("sfmmu_replace_tsb returned unrecognized failure code "
11604 11461 "%d", rc);
11605 11462 }
11606 11463
11607 11464 /*
11608 11465 * In this case, we failed to get one of our TSBs. If we failed to
11609 11466 * get the first TSB, get one of minimum size (8KB). Walk the list
11610 11467 * and throw away the tsbinfos, starting where the allocation failed;
11611 11468 * we can get by with just one TSB as long as we don't leave the
11612 11469 * SWAPPED tsbinfo structures lying around.
11613 11470 */
11614 11471 tsbinfop = sfmmup->sfmmu_tsb;
11615 11472 next = tsbinfop->tsb_next;
11616 11473 tsbinfop->tsb_next = NULL;
11617 11474
11618 11475 sfmmu_hat_exit(hatlockp);
11619 11476 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11620 11477 next = tsbinfop->tsb_next;
11621 11478 sfmmu_tsbinfo_free(tsbinfop);
11622 11479 }
11623 11480 hatlockp = sfmmu_hat_enter(sfmmup);
11624 11481
11625 11482 /*
11626 11483 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11627 11484 * pages.
11628 11485 */
11629 11486 if (!gotfirst) {
11630 11487 tsbinfop = sfmmup->sfmmu_tsb;
11631 11488 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11632 11489 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11633 11490 ASSERT(rc == TSB_SUCCESS);
11634 11491 }
11635 11492
11636 11493 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11637 11494 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11638 11495 }
11639 11496
11640 11497 static int
11641 11498 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11642 11499 {
11643 11500 ulong_t bix = 0;
11644 11501 uint_t rid;
11645 11502 sf_region_t *rgnp;
11646 11503
11647 11504 ASSERT(srdp != NULL);
11648 11505 ASSERT(srdp->srd_refcnt != 0);
11649 11506
11650 11507 w <<= BT_ULSHIFT;
11651 11508 while (bmw) {
11652 11509 if (!(bmw & 0x1)) {
11653 11510 bix++;
11654 11511 bmw >>= 1;
11655 11512 continue;
11656 11513 }
11657 11514 rid = w | bix;
11658 11515 rgnp = srdp->srd_hmergnp[rid];
11659 11516 ASSERT(rgnp->rgn_refcnt > 0);
11660 11517 ASSERT(rgnp->rgn_id == rid);
11661 11518 if (addr < rgnp->rgn_saddr ||
11662 11519 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11663 11520 bix++;
11664 11521 bmw >>= 1;
11665 11522 } else {
11666 11523 return (1);
11667 11524 }
11668 11525 }
11669 11526 return (0);
11670 11527 }
11671 11528
11672 11529 /*
11673 11530 * Handle exceptions for low level tsb_handler.
11674 11531 *
11675 11532 * There are many scenarios that could land us here:
11676 11533 *
11677 11534 * If the context is invalid we land here. The context can be invalid
11678 11535 * for 3 reasons: 1) we couldn't allocate a new context and now need to
11679 11536 * perform a wrap around operation in order to allocate a new context.
11680 11537 * 2) Context was invalidated to change pagesize programming 3) ISMs or
11681 11538 * TSBs configuration is changeing for this process and we are forced into
11682 11539 * here to do a syncronization operation. If the context is valid we can
11683 11540 * be here from window trap hanlder. In this case just call trap to handle
11684 11541 * the fault.
11685 11542 *
11686 11543 * Note that the process will run in INVALID_CONTEXT before
11687 11544 * faulting into here and subsequently loading the MMU registers
11688 11545 * (including the TSB base register) associated with this process.
11689 11546 * For this reason, the trap handlers must all test for
11690 11547 * INVALID_CONTEXT before attempting to access any registers other
11691 11548 * than the context registers.
11692 11549 */
11693 11550 void
11694 11551 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11695 11552 {
11696 11553 sfmmu_t *sfmmup, *shsfmmup;
11697 11554 uint_t ctxtype;
11698 11555 klwp_id_t lwp;
11699 11556 char lwp_save_state;
11700 11557 hatlock_t *hatlockp, *shatlockp;
11701 11558 struct tsb_info *tsbinfop;
11702 11559 struct tsbmiss *tsbmp;
11703 11560 sf_scd_t *scdp;
11704 11561
11705 11562 SFMMU_STAT(sf_tsb_exceptions);
11706 11563 SFMMU_MMU_STAT(mmu_tsb_exceptions);
11707 11564 sfmmup = astosfmmu(curthread->t_procp->p_as);
11708 11565 /*
11709 11566 * note that in sun4u, tagacces register contains ctxnum
11710 11567 * while sun4v passes ctxtype in the tagaccess register.
11711 11568 */
11712 11569 ctxtype = tagaccess & TAGACC_CTX_MASK;
11713 11570
11714 11571 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11715 11572 ASSERT(sfmmup->sfmmu_ismhat == 0);
11716 11573 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11717 11574 ctxtype == INVALID_CONTEXT);
11718 11575
11719 11576 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11720 11577 /*
11721 11578 * We may land here because shme bitmap and pagesize
11722 11579 * flags are updated lazily in tsbmiss area on other cpus.
11723 11580 * If we detect here that tsbmiss area is out of sync with
11724 11581 * sfmmu update it and retry the trapped instruction.
11725 11582 * Otherwise call trap().
11726 11583 */
11727 11584 int ret = 0;
11728 11585 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11729 11586 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11730 11587
11731 11588 /*
11732 11589 * Must set lwp state to LWP_SYS before
11733 11590 * trying to acquire any adaptive lock
11734 11591 */
11735 11592 lwp = ttolwp(curthread);
11736 11593 ASSERT(lwp);
11737 11594 lwp_save_state = lwp->lwp_state;
11738 11595 lwp->lwp_state = LWP_SYS;
11739 11596
11740 11597 hatlockp = sfmmu_hat_enter(sfmmup);
11741 11598 kpreempt_disable();
11742 11599 tsbmp = &tsbmiss_area[CPU->cpu_id];
11743 11600 ASSERT(sfmmup == tsbmp->usfmmup);
11744 11601 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11745 11602 ~tteflag_mask) ||
11746 11603 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) &
11747 11604 ~tteflag_mask)) {
11748 11605 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11749 11606 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11750 11607 ret = 1;
11751 11608 }
11752 11609 if (sfmmup->sfmmu_srdp != NULL) {
11753 11610 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11754 11611 ulong_t *tm = tsbmp->shmermap;
11755 11612 ulong_t i;
11756 11613 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11757 11614 ulong_t d = tm[i] ^ sm[i];
11758 11615 if (d) {
11759 11616 if (d & sm[i]) {
11760 11617 if (!ret && sfmmu_is_rgnva(
11761 11618 sfmmup->sfmmu_srdp,
11762 11619 addr, i, d & sm[i])) {
11763 11620 ret = 1;
11764 11621 }
11765 11622 }
11766 11623 tm[i] = sm[i];
11767 11624 }
11768 11625 }
11769 11626 }
11770 11627 kpreempt_enable();
11771 11628 sfmmu_hat_exit(hatlockp);
11772 11629 lwp->lwp_state = lwp_save_state;
11773 11630 if (ret) {
11774 11631 return;
11775 11632 }
11776 11633 } else if (ctxtype == INVALID_CONTEXT) {
11777 11634 /*
11778 11635 * First, make sure we come out of here with a valid ctx,
11779 11636 * since if we don't get one we'll simply loop on the
11780 11637 * faulting instruction.
11781 11638 *
11782 11639 * If the ISM mappings are changing, the TSB is relocated,
11783 11640 * the process is swapped, the process is joining SCD or
11784 11641 * leaving SCD or shared regions we serialize behind the
11785 11642 * controlling thread with hat lock, sfmmu_flags and
11786 11643 * sfmmu_tsb_cv condition variable.
11787 11644 */
11788 11645
11789 11646 /*
11790 11647 * Must set lwp state to LWP_SYS before
11791 11648 * trying to acquire any adaptive lock
11792 11649 */
11793 11650 lwp = ttolwp(curthread);
11794 11651 ASSERT(lwp);
11795 11652 lwp_save_state = lwp->lwp_state;
11796 11653 lwp->lwp_state = LWP_SYS;
11797 11654
11798 11655 hatlockp = sfmmu_hat_enter(sfmmup);
11799 11656 retry:
11800 11657 if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11801 11658 shsfmmup = scdp->scd_sfmmup;
11802 11659 ASSERT(shsfmmup != NULL);
11803 11660
11804 11661 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11805 11662 tsbinfop = tsbinfop->tsb_next) {
11806 11663 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11807 11664 /* drop the private hat lock */
11808 11665 sfmmu_hat_exit(hatlockp);
11809 11666 /* acquire the shared hat lock */
11810 11667 shatlockp = sfmmu_hat_enter(shsfmmup);
11811 11668 /*
11812 11669 * recheck to see if anything changed
11813 11670 * after we drop the private hat lock.
11814 11671 */
11815 11672 if (sfmmup->sfmmu_scdp == scdp &&
11816 11673 shsfmmup == scdp->scd_sfmmup) {
11817 11674 sfmmu_tsb_chk_reloc(shsfmmup,
11818 11675 shatlockp);
11819 11676 }
11820 11677 sfmmu_hat_exit(shatlockp);
11821 11678 hatlockp = sfmmu_hat_enter(sfmmup);
11822 11679 goto retry;
11823 11680 }
11824 11681 }
11825 11682 }
11826 11683
11827 11684 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
11828 11685 tsbinfop = tsbinfop->tsb_next) {
11829 11686 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11830 11687 cv_wait(&sfmmup->sfmmu_tsb_cv,
11831 11688 HATLOCK_MUTEXP(hatlockp));
11832 11689 goto retry;
11833 11690 }
11834 11691 }
11835 11692
11836 11693 /*
11837 11694 * Wait for ISM maps to be updated.
11838 11695 */
11839 11696 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
11840 11697 cv_wait(&sfmmup->sfmmu_tsb_cv,
11841 11698 HATLOCK_MUTEXP(hatlockp));
11842 11699 goto retry;
11843 11700 }
11844 11701
11845 11702 /* Is this process joining an SCD? */
11846 11703 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11847 11704 /*
11848 11705 * Flush private TSB and setup shared TSB.
11849 11706 * sfmmu_finish_join_scd() does not drop the
11850 11707 * hat lock.
11851 11708 */
11852 11709 sfmmu_finish_join_scd(sfmmup);
11853 11710 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
11854 11711 }
11855 11712
11856 11713 /*
11857 11714 * If we're swapping in, get TSB(s). Note that we must do
11858 11715 * this before we get a ctx or load the MMU state. Once
11859 11716 * we swap in we have to recheck to make sure the TSB(s) and
11860 11717 * ISM mappings didn't change while we slept.
11861 11718 */
11862 11719 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11863 11720 sfmmu_tsb_swapin(sfmmup, hatlockp);
11864 11721 goto retry;
11865 11722 }
11866 11723
11867 11724 sfmmu_get_ctx(sfmmup);
11868 11725
11869 11726 sfmmu_hat_exit(hatlockp);
11870 11727 /*
11871 11728 * Must restore lwp_state if not calling
11872 11729 * trap() for further processing. Restore
11873 11730 * it anyway.
11874 11731 */
11875 11732 lwp->lwp_state = lwp_save_state;
11876 11733 return;
11877 11734 }
11878 11735 trap(rp, (caddr_t)tagaccess, traptype, 0);
11879 11736 }
11880 11737
11881 11738 static void
11882 11739 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11883 11740 {
11884 11741 struct tsb_info *tp;
11885 11742
11886 11743 ASSERT(sfmmu_hat_lock_held(sfmmup));
11887 11744
11888 11745 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
11889 11746 if (tp->tsb_flags & TSB_RELOC_FLAG) {
11890 11747 cv_wait(&sfmmup->sfmmu_tsb_cv,
11891 11748 HATLOCK_MUTEXP(hatlockp));
11892 11749 break;
11893 11750 }
11894 11751 }
11895 11752 }
11896 11753
11897 11754 /*
11898 11755 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
11899 11756 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
11900 11757 * rather than spinning to avoid send mondo timeouts with
11901 11758 * interrupts enabled. When the lock is acquired it is immediately
11902 11759 * released and we return back to sfmmu_vatopfn just after
11903 11760 * the GET_TTE call.
11904 11761 */
11905 11762 void
11906 11763 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
11907 11764 {
11908 11765 struct page **pp;
11909 11766
11910 11767 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11911 11768 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11912 11769 }
11913 11770
11914 11771 /*
11915 11772 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
11916 11773 * TTE_SUSPENDED bit set in tte. We do this so that we can handle
11917 11774 * cross traps which cannot be handled while spinning in the
11918 11775 * trap handlers. Simply enter and exit the kpr_suspendlock spin
11919 11776 * mutex, which is held by the holder of the suspend bit, and then
11920 11777 * retry the trapped instruction after unwinding.
11921 11778 */
11922 11779 /*ARGSUSED*/
11923 11780 void
11924 11781 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
11925 11782 {
11926 11783 ASSERT(curthread != kreloc_thread);
11927 11784 mutex_enter(&kpr_suspendlock);
11928 11785 mutex_exit(&kpr_suspendlock);
11929 11786 }
11930 11787
11931 11788 /*
11932 11789 * This routine could be optimized to reduce the number of xcalls by flushing
11933 11790 * the entire TLBs if region reference count is above some threshold but the
11934 11791 * tradeoff will depend on the size of the TLB. So for now flush the specific
11935 11792 * page a context at a time.
11936 11793 *
11937 11794 * If uselocks is 0 then it's called after all cpus were captured and all the
11938 11795 * hat locks were taken. In this case don't take the region lock by relying on
11939 11796 * the order of list region update operations in hat_join_region(),
11940 11797 * hat_leave_region() and hat_dup_region(). The ordering in those routines
11941 11798 * guarantees that list is always forward walkable and reaches active sfmmus
11942 11799 * regardless of where xc_attention() captures a cpu.
11943 11800 */
11944 11801 cpuset_t
11945 11802 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
11946 11803 struct hme_blk *hmeblkp, int uselocks)
11947 11804 {
11948 11805 sfmmu_t *sfmmup;
11949 11806 cpuset_t cpuset;
11950 11807 cpuset_t rcpuset;
11951 11808 hatlock_t *hatlockp;
11952 11809 uint_t rid = rgnp->rgn_id;
11953 11810 sf_rgn_link_t *rlink;
11954 11811 sf_scd_t *scdp;
11955 11812
11956 11813 ASSERT(hmeblkp->hblk_shared);
11957 11814 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11958 11815 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11959 11816
11960 11817 CPUSET_ZERO(rcpuset);
11961 11818 if (uselocks) {
11962 11819 mutex_enter(&rgnp->rgn_mutex);
11963 11820 }
11964 11821 sfmmup = rgnp->rgn_sfmmu_head;
11965 11822 while (sfmmup != NULL) {
11966 11823 if (uselocks) {
11967 11824 hatlockp = sfmmu_hat_enter(sfmmup);
11968 11825 }
11969 11826
11970 11827 /*
11971 11828 * When an SCD is created the SCD hat is linked on the sfmmu
11972 11829 * region lists for each hme region which is part of the
11973 11830 * SCD. If we find an SCD hat, when walking these lists,
11974 11831 * then we flush the shared TSBs, if we find a private hat,
11975 11832 * which is part of an SCD, but where the region
11976 11833 * is not part of the SCD then we flush the private TSBs.
11977 11834 */
11978 11835 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
11979 11836 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11980 11837 scdp = sfmmup->sfmmu_scdp;
11981 11838 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
11982 11839 if (uselocks) {
11983 11840 sfmmu_hat_exit(hatlockp);
11984 11841 }
11985 11842 goto next;
11986 11843 }
11987 11844 }
11988 11845
11989 11846 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
11990 11847
11991 11848 kpreempt_disable();
11992 11849 cpuset = sfmmup->sfmmu_cpusran;
11993 11850 CPUSET_AND(cpuset, cpu_ready_set);
11994 11851 CPUSET_DEL(cpuset, CPU->cpu_id);
11995 11852 SFMMU_XCALL_STATS(sfmmup);
11996 11853 xt_some(cpuset, vtag_flushpage_tl1,
11997 11854 (uint64_t)addr, (uint64_t)sfmmup);
11998 11855 vtag_flushpage(addr, (uint64_t)sfmmup);
11999 11856 if (uselocks) {
12000 11857 sfmmu_hat_exit(hatlockp);
12001 11858 }
12002 11859 kpreempt_enable();
12003 11860 CPUSET_OR(rcpuset, cpuset);
12004 11861
12005 11862 next:
12006 11863 /* LINTED: constant in conditional context */
12007 11864 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12008 11865 ASSERT(rlink != NULL);
12009 11866 sfmmup = rlink->next;
12010 11867 }
12011 11868 if (uselocks) {
12012 11869 mutex_exit(&rgnp->rgn_mutex);
12013 11870 }
12014 11871 return (rcpuset);
12015 11872 }
12016 11873
12017 11874 /*
12018 11875 * This routine takes an sfmmu pointer and the va for an adddress in an
12019 11876 * ISM region as input and returns the corresponding region id in ism_rid.
12020 11877 * The return value of 1 indicates that a region has been found and ism_rid
12021 11878 * is valid, otherwise 0 is returned.
12022 11879 */
12023 11880 static int
12024 11881 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12025 11882 {
12026 11883 ism_blk_t *ism_blkp;
12027 11884 int i;
12028 11885 ism_map_t *ism_map;
12029 11886 #ifdef DEBUG
12030 11887 struct hat *ism_hatid;
12031 11888 #endif
12032 11889 ASSERT(sfmmu_hat_lock_held(sfmmup));
12033 11890
12034 11891 ism_blkp = sfmmup->sfmmu_iblk;
12035 11892 while (ism_blkp != NULL) {
12036 11893 ism_map = ism_blkp->iblk_maps;
12037 11894 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12038 11895 if ((va >= ism_start(ism_map[i])) &&
12039 11896 (va < ism_end(ism_map[i]))) {
12040 11897
12041 11898 *ism_rid = ism_map[i].imap_rid;
12042 11899 #ifdef DEBUG
12043 11900 ism_hatid = ism_map[i].imap_ismhat;
12044 11901 ASSERT(ism_hatid == ism_sfmmup);
12045 11902 ASSERT(ism_hatid->sfmmu_ismhat);
12046 11903 #endif
12047 11904 return (1);
12048 11905 }
12049 11906 }
12050 11907 ism_blkp = ism_blkp->iblk_next;
12051 11908 }
12052 11909 return (0);
12053 11910 }
12054 11911
12055 11912 /*
12056 11913 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12057 11914 * This routine may be called with all cpu's captured. Therefore, the
12058 11915 * caller is responsible for holding all locks and disabling kernel
12059 11916 * preemption.
12060 11917 */
12061 11918 /* ARGSUSED */
12062 11919 static void
12063 11920 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12064 11921 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12065 11922 {
12066 11923 cpuset_t cpuset;
12067 11924 caddr_t va;
12068 11925 ism_ment_t *ment;
12069 11926 sfmmu_t *sfmmup;
12070 11927 #ifdef VAC
12071 11928 int vcolor;
12072 11929 #endif
12073 11930
12074 11931 sf_scd_t *scdp;
12075 11932 uint_t ism_rid;
12076 11933
12077 11934 ASSERT(!hmeblkp->hblk_shared);
12078 11935 /*
12079 11936 * Walk the ism_hat's mapping list and flush the page
12080 11937 * from every hat sharing this ism_hat. This routine
12081 11938 * may be called while all cpu's have been captured.
12082 11939 * Therefore we can't attempt to grab any locks. For now
12083 11940 * this means we will protect the ism mapping list under
12084 11941 * a single lock which will be grabbed by the caller.
12085 11942 * If hat_share/unshare scalibility becomes a performance
12086 11943 * problem then we may need to re-think ism mapping list locking.
12087 11944 */
12088 11945 ASSERT(ism_sfmmup->sfmmu_ismhat);
12089 11946 ASSERT(MUTEX_HELD(&ism_mlist_lock));
12090 11947 addr = addr - ISMID_STARTADDR;
12091 11948
12092 11949 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12093 11950
12094 11951 sfmmup = ment->iment_hat;
12095 11952
12096 11953 va = ment->iment_base_va;
12097 11954 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr);
12098 11955
12099 11956 /*
12100 11957 * When an SCD is created the SCD hat is linked on the ism
12101 11958 * mapping lists for each ISM segment which is part of the
12102 11959 * SCD. If we find an SCD hat, when walking these lists,
12103 11960 * then we flush the shared TSBs, if we find a private hat,
12104 11961 * which is part of an SCD, but where the region
12105 11962 * corresponding to this va is not part of the SCD then we
12106 11963 * flush the private TSBs.
12107 11964 */
12108 11965 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12109 11966 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12110 11967 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12111 11968 if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12112 11969 &ism_rid)) {
12113 11970 cmn_err(CE_PANIC,
12114 11971 "can't find matching ISM rid!");
12115 11972 }
12116 11973
12117 11974 scdp = sfmmup->sfmmu_scdp;
12118 11975 if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12119 11976 SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12120 11977 ism_rid)) {
12121 11978 continue;
12122 11979 }
12123 11980 }
12124 11981 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12125 11982
12126 11983 cpuset = sfmmup->sfmmu_cpusran;
12127 11984 CPUSET_AND(cpuset, cpu_ready_set);
12128 11985 CPUSET_DEL(cpuset, CPU->cpu_id);
12129 11986 SFMMU_XCALL_STATS(sfmmup);
12130 11987 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12131 11988 (uint64_t)sfmmup);
12132 11989 vtag_flushpage(va, (uint64_t)sfmmup);
12133 11990
12134 11991 #ifdef VAC
12135 11992 /*
12136 11993 * Flush D$
12137 11994 * When flushing D$ we must flush all
12138 11995 * cpu's. See sfmmu_cache_flush().
12139 11996 */
12140 11997 if (cache_flush_flag == CACHE_FLUSH) {
12141 11998 cpuset = cpu_ready_set;
12142 11999 CPUSET_DEL(cpuset, CPU->cpu_id);
12143 12000
12144 12001 SFMMU_XCALL_STATS(sfmmup);
12145 12002 vcolor = addr_to_vcolor(va);
12146 12003 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12147 12004 vac_flushpage(pfnum, vcolor);
12148 12005 }
12149 12006 #endif /* VAC */
12150 12007 }
12151 12008 }
12152 12009
12153 12010 /*
12154 12011 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12155 12012 * a particular virtual address and ctx. If noflush is set we do not
12156 12013 * flush the TLB/TSB. This function may or may not be called with the
12157 12014 * HAT lock held.
12158 12015 */
12159 12016 static void
12160 12017 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12161 12018 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12162 12019 int hat_lock_held)
12163 12020 {
12164 12021 #ifdef VAC
12165 12022 int vcolor;
12166 12023 #endif
12167 12024 cpuset_t cpuset;
12168 12025 hatlock_t *hatlockp;
12169 12026
12170 12027 ASSERT(!hmeblkp->hblk_shared);
12171 12028
12172 12029 #if defined(lint) && !defined(VAC)
12173 12030 pfnum = pfnum;
12174 12031 cpu_flag = cpu_flag;
12175 12032 cache_flush_flag = cache_flush_flag;
12176 12033 #endif
12177 12034
12178 12035 /*
12179 12036 * There is no longer a need to protect against ctx being
12180 12037 * stolen here since we don't store the ctx in the TSB anymore.
12181 12038 */
12182 12039 #ifdef VAC
12183 12040 vcolor = addr_to_vcolor(addr);
12184 12041 #endif
12185 12042
12186 12043 /*
12187 12044 * We must hold the hat lock during the flush of TLB,
12188 12045 * to avoid a race with sfmmu_invalidate_ctx(), where
12189 12046 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12190 12047 * causing TLB demap routine to skip flush on that MMU.
12191 12048 * If the context on a MMU has already been set to
12192 12049 * INVALID_CONTEXT, we just get an extra flush on
12193 12050 * that MMU.
12194 12051 */
12195 12052 if (!hat_lock_held && !tlb_noflush)
12196 12053 hatlockp = sfmmu_hat_enter(sfmmup);
12197 12054
12198 12055 kpreempt_disable();
12199 12056 if (!tlb_noflush) {
12200 12057 /*
12201 12058 * Flush the TSB and TLB.
12202 12059 */
12203 12060 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12204 12061
12205 12062 cpuset = sfmmup->sfmmu_cpusran;
12206 12063 CPUSET_AND(cpuset, cpu_ready_set);
12207 12064 CPUSET_DEL(cpuset, CPU->cpu_id);
12208 12065
12209 12066 SFMMU_XCALL_STATS(sfmmup);
12210 12067
12211 12068 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12212 12069 (uint64_t)sfmmup);
12213 12070
12214 12071 vtag_flushpage(addr, (uint64_t)sfmmup);
12215 12072 }
12216 12073
12217 12074 if (!hat_lock_held && !tlb_noflush)
12218 12075 sfmmu_hat_exit(hatlockp);
12219 12076
12220 12077 #ifdef VAC
12221 12078 /*
12222 12079 * Flush the D$
12223 12080 *
12224 12081 * Even if the ctx is stolen, we need to flush the
12225 12082 * cache. Our ctx stealer only flushes the TLBs.
12226 12083 */
12227 12084 if (cache_flush_flag == CACHE_FLUSH) {
12228 12085 if (cpu_flag & FLUSH_ALL_CPUS) {
12229 12086 cpuset = cpu_ready_set;
12230 12087 } else {
12231 12088 cpuset = sfmmup->sfmmu_cpusran;
12232 12089 CPUSET_AND(cpuset, cpu_ready_set);
12233 12090 }
12234 12091 CPUSET_DEL(cpuset, CPU->cpu_id);
12235 12092 SFMMU_XCALL_STATS(sfmmup);
12236 12093 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12237 12094 vac_flushpage(pfnum, vcolor);
12238 12095 }
12239 12096 #endif /* VAC */
12240 12097 kpreempt_enable();
12241 12098 }
12242 12099
12243 12100 /*
12244 12101 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12245 12102 * address and ctx. If noflush is set we do not currently do anything.
12246 12103 * This function may or may not be called with the HAT lock held.
12247 12104 */
12248 12105 static void
12249 12106 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12250 12107 int tlb_noflush, int hat_lock_held)
12251 12108 {
12252 12109 cpuset_t cpuset;
12253 12110 hatlock_t *hatlockp;
12254 12111
12255 12112 ASSERT(!hmeblkp->hblk_shared);
12256 12113
12257 12114 /*
12258 12115 * If the process is exiting we have nothing to do.
12259 12116 */
12260 12117 if (tlb_noflush)
12261 12118 return;
12262 12119
12263 12120 /*
12264 12121 * Flush TSB.
12265 12122 */
12266 12123 if (!hat_lock_held)
12267 12124 hatlockp = sfmmu_hat_enter(sfmmup);
12268 12125 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12269 12126
12270 12127 kpreempt_disable();
12271 12128
12272 12129 cpuset = sfmmup->sfmmu_cpusran;
12273 12130 CPUSET_AND(cpuset, cpu_ready_set);
12274 12131 CPUSET_DEL(cpuset, CPU->cpu_id);
12275 12132
12276 12133 SFMMU_XCALL_STATS(sfmmup);
12277 12134 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12278 12135
12279 12136 vtag_flushpage(addr, (uint64_t)sfmmup);
12280 12137
12281 12138 if (!hat_lock_held)
12282 12139 sfmmu_hat_exit(hatlockp);
12283 12140
12284 12141 kpreempt_enable();
12285 12142
12286 12143 }
12287 12144
12288 12145 /*
12289 12146 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12290 12147 * call handler that can flush a range of pages to save on xcalls.
12291 12148 */
12292 12149 static int sfmmu_xcall_save;
12293 12150
12294 12151 /*
12295 12152 * this routine is never used for demaping addresses backed by SRD hmeblks.
12296 12153 */
12297 12154 static void
12298 12155 sfmmu_tlb_range_demap(demap_range_t *dmrp)
12299 12156 {
12300 12157 sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12301 12158 hatlock_t *hatlockp;
12302 12159 cpuset_t cpuset;
12303 12160 uint64_t sfmmu_pgcnt;
12304 12161 pgcnt_t pgcnt = 0;
12305 12162 int pgunload = 0;
12306 12163 int dirtypg = 0;
12307 12164 caddr_t addr = dmrp->dmr_addr;
12308 12165 caddr_t eaddr;
12309 12166 uint64_t bitvec = dmrp->dmr_bitvec;
12310 12167
12311 12168 ASSERT(bitvec & 1);
12312 12169
12313 12170 /*
12314 12171 * Flush TSB and calculate number of pages to flush.
12315 12172 */
12316 12173 while (bitvec != 0) {
12317 12174 dirtypg = 0;
12318 12175 /*
12319 12176 * Find the first page to flush and then count how many
12320 12177 * pages there are after it that also need to be flushed.
12321 12178 * This way the number of TSB flushes is minimized.
12322 12179 */
12323 12180 while ((bitvec & 1) == 0) {
12324 12181 pgcnt++;
12325 12182 addr += MMU_PAGESIZE;
12326 12183 bitvec >>= 1;
12327 12184 }
12328 12185 while (bitvec & 1) {
12329 12186 dirtypg++;
12330 12187 bitvec >>= 1;
12331 12188 }
12332 12189 eaddr = addr + ptob(dirtypg);
12333 12190 hatlockp = sfmmu_hat_enter(sfmmup);
12334 12191 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12335 12192 sfmmu_hat_exit(hatlockp);
12336 12193 pgunload += dirtypg;
12337 12194 addr = eaddr;
12338 12195 pgcnt += dirtypg;
12339 12196 }
12340 12197
12341 12198 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12342 12199 if (sfmmup->sfmmu_free == 0) {
12343 12200 addr = dmrp->dmr_addr;
12344 12201 bitvec = dmrp->dmr_bitvec;
12345 12202
12346 12203 /*
12347 12204 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12348 12205 * as it will be used to pack argument for xt_some
12349 12206 */
12350 12207 ASSERT((pgcnt > 0) &&
12351 12208 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12352 12209
12353 12210 /*
12354 12211 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12355 12212 * the low 6 bits of sfmmup. This is doable since pgcnt
12356 12213 * always >= 1.
12357 12214 */
12358 12215 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12359 12216 sfmmu_pgcnt = (uint64_t)sfmmup |
12360 12217 ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12361 12218
12362 12219 /*
12363 12220 * We must hold the hat lock during the flush of TLB,
12364 12221 * to avoid a race with sfmmu_invalidate_ctx(), where
12365 12222 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12366 12223 * causing TLB demap routine to skip flush on that MMU.
12367 12224 * If the context on a MMU has already been set to
12368 12225 * INVALID_CONTEXT, we just get an extra flush on
12369 12226 * that MMU.
12370 12227 */
12371 12228 hatlockp = sfmmu_hat_enter(sfmmup);
12372 12229 kpreempt_disable();
12373 12230
12374 12231 cpuset = sfmmup->sfmmu_cpusran;
12375 12232 CPUSET_AND(cpuset, cpu_ready_set);
12376 12233 CPUSET_DEL(cpuset, CPU->cpu_id);
12377 12234
12378 12235 SFMMU_XCALL_STATS(sfmmup);
12379 12236 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12380 12237 sfmmu_pgcnt);
12381 12238
12382 12239 for (; bitvec != 0; bitvec >>= 1) {
12383 12240 if (bitvec & 1)
12384 12241 vtag_flushpage(addr, (uint64_t)sfmmup);
12385 12242 addr += MMU_PAGESIZE;
12386 12243 }
12387 12244 kpreempt_enable();
12388 12245 sfmmu_hat_exit(hatlockp);
12389 12246
12390 12247 sfmmu_xcall_save += (pgunload-1);
12391 12248 }
12392 12249 dmrp->dmr_bitvec = 0;
12393 12250 }
12394 12251
12395 12252 /*
12396 12253 * In cases where we need to synchronize with TLB/TSB miss trap
12397 12254 * handlers, _and_ need to flush the TLB, it's a lot easier to
12398 12255 * throw away the context from the process than to do a
12399 12256 * special song and dance to keep things consistent for the
12400 12257 * handlers.
12401 12258 *
12402 12259 * Since the process suddenly ends up without a context and our caller
12403 12260 * holds the hat lock, threads that fault after this function is called
12404 12261 * will pile up on the lock. We can then do whatever we need to
12405 12262 * atomically from the context of the caller. The first blocked thread
12406 12263 * to resume executing will get the process a new context, and the
12407 12264 * process will resume executing.
12408 12265 *
12409 12266 * One added advantage of this approach is that on MMUs that
12410 12267 * support a "flush all" operation, we will delay the flush until
12411 12268 * cnum wrap-around, and then flush the TLB one time. This
12412 12269 * is rather rare, so it's a lot less expensive than making 8000
12413 12270 * x-calls to flush the TLB 8000 times.
12414 12271 *
12415 12272 * A per-process (PP) lock is used to synchronize ctx allocations in
12416 12273 * resume() and ctx invalidations here.
12417 12274 */
12418 12275 static void
12419 12276 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12420 12277 {
12421 12278 cpuset_t cpuset;
12422 12279 int cnum, currcnum;
12423 12280 mmu_ctx_t *mmu_ctxp;
12424 12281 int i;
12425 12282 uint_t pstate_save;
12426 12283
12427 12284 SFMMU_STAT(sf_ctx_inv);
12428 12285
12429 12286 ASSERT(sfmmu_hat_lock_held(sfmmup));
12430 12287 ASSERT(sfmmup != ksfmmup);
12431 12288
12432 12289 kpreempt_disable();
12433 12290
12434 12291 mmu_ctxp = CPU_MMU_CTXP(CPU);
12435 12292 ASSERT(mmu_ctxp);
12436 12293 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12437 12294 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12438 12295
12439 12296 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12440 12297
12441 12298 pstate_save = sfmmu_disable_intrs();
12442 12299
12443 12300 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */
12444 12301 /* set HAT cnum invalid across all context domains. */
12445 12302 for (i = 0; i < max_mmu_ctxdoms; i++) {
12446 12303
12447 12304 cnum = sfmmup->sfmmu_ctxs[i].cnum;
12448 12305 if (cnum == INVALID_CONTEXT) {
12449 12306 continue;
12450 12307 }
12451 12308
12452 12309 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12453 12310 }
12454 12311 membar_enter(); /* make sure globally visible to all CPUs */
12455 12312 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */
12456 12313
12457 12314 sfmmu_enable_intrs(pstate_save);
12458 12315
12459 12316 cpuset = sfmmup->sfmmu_cpusran;
12460 12317 CPUSET_DEL(cpuset, CPU->cpu_id);
12461 12318 CPUSET_AND(cpuset, cpu_ready_set);
12462 12319 if (!CPUSET_ISNULL(cpuset)) {
12463 12320 SFMMU_XCALL_STATS(sfmmup);
12464 12321 xt_some(cpuset, sfmmu_raise_tsb_exception,
12465 12322 (uint64_t)sfmmup, INVALID_CONTEXT);
12466 12323 xt_sync(cpuset);
12467 12324 SFMMU_STAT(sf_tsb_raise_exception);
12468 12325 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12469 12326 }
12470 12327
12471 12328 /*
12472 12329 * If the hat to-be-invalidated is the same as the current
12473 12330 * process on local CPU we need to invalidate
12474 12331 * this CPU context as well.
12475 12332 */
12476 12333 if ((sfmmu_getctx_sec() == currcnum) &&
12477 12334 (currcnum != INVALID_CONTEXT)) {
12478 12335 /* sets shared context to INVALID too */
12479 12336 sfmmu_setctx_sec(INVALID_CONTEXT);
12480 12337 sfmmu_clear_utsbinfo();
12481 12338 }
12482 12339
12483 12340 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12484 12341
12485 12342 kpreempt_enable();
12486 12343
12487 12344 /*
12488 12345 * we hold the hat lock, so nobody should allocate a context
12489 12346 * for us yet
12490 12347 */
12491 12348 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12492 12349 }
12493 12350
12494 12351 #ifdef VAC
12495 12352 /*
12496 12353 * We need to flush the cache in all cpus. It is possible that
12497 12354 * a process referenced a page as cacheable but has sinced exited
12498 12355 * and cleared the mapping list. We still to flush it but have no
12499 12356 * state so all cpus is the only alternative.
12500 12357 */
12501 12358 void
12502 12359 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12503 12360 {
12504 12361 cpuset_t cpuset;
12505 12362
12506 12363 kpreempt_disable();
12507 12364 cpuset = cpu_ready_set;
12508 12365 CPUSET_DEL(cpuset, CPU->cpu_id);
12509 12366 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12510 12367 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12511 12368 xt_sync(cpuset);
12512 12369 vac_flushpage(pfnum, vcolor);
12513 12370 kpreempt_enable();
12514 12371 }
12515 12372
12516 12373 void
12517 12374 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12518 12375 {
12519 12376 cpuset_t cpuset;
12520 12377
12521 12378 ASSERT(vcolor >= 0);
12522 12379
12523 12380 kpreempt_disable();
12524 12381 cpuset = cpu_ready_set;
12525 12382 CPUSET_DEL(cpuset, CPU->cpu_id);
12526 12383 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12527 12384 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12528 12385 xt_sync(cpuset);
12529 12386 vac_flushcolor(vcolor, pfnum);
12530 12387 kpreempt_enable();
12531 12388 }
12532 12389 #endif /* VAC */
12533 12390
12534 12391 /*
12535 12392 * We need to prevent processes from accessing the TSB using a cached physical
12536 12393 * address. It's alright if they try to access the TSB via virtual address
12537 12394 * since they will just fault on that virtual address once the mapping has
12538 12395 * been suspended.
12539 12396 */
12540 12397 #pragma weak sendmondo_in_recover
12541 12398
12542 12399 /* ARGSUSED */
12543 12400 static int
12544 12401 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12545 12402 {
12546 12403 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12547 12404 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12548 12405 hatlock_t *hatlockp;
12549 12406 sf_scd_t *scdp;
12550 12407
12551 12408 if (flags != HAT_PRESUSPEND)
12552 12409 return (0);
12553 12410
12554 12411 /*
12555 12412 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12556 12413 * be a shared hat, then set SCD's tsbinfo's flag.
12557 12414 * If tsb is not shared, sfmmup is a private hat, then set
12558 12415 * its private tsbinfo's flag.
12559 12416 */
12560 12417 hatlockp = sfmmu_hat_enter(sfmmup);
12561 12418 tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12562 12419
12563 12420 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12564 12421 sfmmu_tsb_inv_ctx(sfmmup);
12565 12422 sfmmu_hat_exit(hatlockp);
12566 12423 } else {
12567 12424 /* release lock on the shared hat */
12568 12425 sfmmu_hat_exit(hatlockp);
12569 12426 /* sfmmup is a shared hat */
12570 12427 ASSERT(sfmmup->sfmmu_scdhat);
12571 12428 scdp = sfmmup->sfmmu_scdp;
12572 12429 ASSERT(scdp != NULL);
12573 12430 /* get private hat from the scd list */
12574 12431 mutex_enter(&scdp->scd_mutex);
12575 12432 sfmmup = scdp->scd_sf_list;
12576 12433 while (sfmmup != NULL) {
12577 12434 hatlockp = sfmmu_hat_enter(sfmmup);
12578 12435 /*
12579 12436 * We do not call sfmmu_tsb_inv_ctx here because
12580 12437 * sendmondo_in_recover check is only needed for
12581 12438 * sun4u.
12582 12439 */
12583 12440 sfmmu_invalidate_ctx(sfmmup);
12584 12441 sfmmu_hat_exit(hatlockp);
12585 12442 sfmmup = sfmmup->sfmmu_scd_link.next;
12586 12443
12587 12444 }
12588 12445 mutex_exit(&scdp->scd_mutex);
12589 12446 }
12590 12447 return (0);
12591 12448 }
12592 12449
12593 12450 static void
12594 12451 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12595 12452 {
12596 12453 extern uint32_t sendmondo_in_recover;
12597 12454
12598 12455 ASSERT(sfmmu_hat_lock_held(sfmmup));
12599 12456
12600 12457 /*
12601 12458 * For Cheetah+ Erratum 25:
12602 12459 * Wait for any active recovery to finish. We can't risk
12603 12460 * relocating the TSB of the thread running mondo_recover_proc()
12604 12461 * since, if we did that, we would deadlock. The scenario we are
12605 12462 * trying to avoid is as follows:
12606 12463 *
12607 12464 * THIS CPU RECOVER CPU
12608 12465 * -------- -----------
12609 12466 * Begins recovery, walking through TSB
12610 12467 * hat_pagesuspend() TSB TTE
12611 12468 * TLB miss on TSB TTE, spins at TL1
12612 12469 * xt_sync()
12613 12470 * send_mondo_timeout()
12614 12471 * mondo_recover_proc()
12615 12472 * ((deadlocked))
12616 12473 *
12617 12474 * The second half of the workaround is that mondo_recover_proc()
12618 12475 * checks to see if the tsb_info has the RELOC flag set, and if it
12619 12476 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12620 12477 * and hence avoiding the TLB miss that could result in a deadlock.
12621 12478 */
12622 12479 if (&sendmondo_in_recover) {
12623 12480 membar_enter(); /* make sure RELOC flag visible */
12624 12481 while (sendmondo_in_recover) {
12625 12482 drv_usecwait(1);
12626 12483 membar_consumer();
12627 12484 }
12628 12485 }
12629 12486
12630 12487 sfmmu_invalidate_ctx(sfmmup);
12631 12488 }
12632 12489
12633 12490 /* ARGSUSED */
12634 12491 static int
12635 12492 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12636 12493 void *tsbinfo, pfn_t newpfn)
12637 12494 {
12638 12495 hatlock_t *hatlockp;
12639 12496 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12640 12497 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12641 12498
12642 12499 if (flags != HAT_POSTUNSUSPEND)
12643 12500 return (0);
12644 12501
12645 12502 hatlockp = sfmmu_hat_enter(sfmmup);
12646 12503
12647 12504 SFMMU_STAT(sf_tsb_reloc);
12648 12505
12649 12506 /*
12650 12507 * The process may have swapped out while we were relocating one
12651 12508 * of its TSBs. If so, don't bother doing the setup since the
12652 12509 * process can't be using the memory anymore.
12653 12510 */
12654 12511 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12655 12512 ASSERT(va == tsbinfop->tsb_va);
12656 12513 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12657 12514
12658 12515 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12659 12516 sfmmu_inv_tsb(tsbinfop->tsb_va,
12660 12517 TSB_BYTES(tsbinfop->tsb_szc));
12661 12518 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12662 12519 }
12663 12520 }
12664 12521
12665 12522 membar_exit();
12666 12523 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12667 12524 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12668 12525
12669 12526 sfmmu_hat_exit(hatlockp);
12670 12527
12671 12528 return (0);
12672 12529 }
12673 12530
12674 12531 /*
12675 12532 * Allocate and initialize a tsb_info structure. Note that we may or may not
12676 12533 * allocate a TSB here, depending on the flags passed in.
12677 12534 */
12678 12535 static int
12679 12536 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12680 12537 uint_t flags, sfmmu_t *sfmmup)
12681 12538 {
12682 12539 int err;
12683 12540
12684 12541 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12685 12542 sfmmu_tsbinfo_cache, KM_SLEEP);
12686 12543
12687 12544 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12688 12545 tsb_szc, flags, sfmmup)) != 0) {
12689 12546 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12690 12547 SFMMU_STAT(sf_tsb_allocfail);
12691 12548 *tsbinfopp = NULL;
12692 12549 return (err);
12693 12550 }
12694 12551 SFMMU_STAT(sf_tsb_alloc);
12695 12552
12696 12553 /*
12697 12554 * Bump the TSB size counters for this TSB size.
12698 12555 */
12699 12556 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12700 12557 return (0);
12701 12558 }
12702 12559
12703 12560 static void
12704 12561 sfmmu_tsb_free(struct tsb_info *tsbinfo)
12705 12562 {
12706 12563 caddr_t tsbva = tsbinfo->tsb_va;
12707 12564 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12708 12565 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12709 12566 vmem_t *vmp = tsbinfo->tsb_vmp;
12710 12567
12711 12568 /*
12712 12569 * If we allocated this TSB from relocatable kernel memory, then we
12713 12570 * need to uninstall the callback handler.
12714 12571 */
12715 12572 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12716 12573 uintptr_t slab_mask;
12717 12574 caddr_t slab_vaddr;
12718 12575 page_t **ppl;
12719 12576 int ret;
12720 12577
12721 12578 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12722 12579 if (tsb_size > MMU_PAGESIZE4M)
12723 12580 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12724 12581 else
12725 12582 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12726 12583 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12727 12584
12728 12585 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12729 12586 ASSERT(ret == 0);
12730 12587 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12731 12588 0, NULL);
12732 12589 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12733 12590 }
12734 12591
12735 12592 if (kmem_cachep != NULL) {
12736 12593 kmem_cache_free(kmem_cachep, tsbva);
12737 12594 } else {
12738 12595 vmem_xfree(vmp, (void *)tsbva, tsb_size);
12739 12596 }
12740 12597 tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12741 12598 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12742 12599 }
12743 12600
12744 12601 static void
12745 12602 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12746 12603 {
12747 12604 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12748 12605 sfmmu_tsb_free(tsbinfo);
12749 12606 }
12750 12607 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12751 12608
12752 12609 }
12753 12610
12754 12611 /*
12755 12612 * Setup all the references to physical memory for this tsbinfo.
12756 12613 * The underlying page(s) must be locked.
12757 12614 */
12758 12615 static void
12759 12616 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12760 12617 {
12761 12618 ASSERT(pfn != PFN_INVALID);
12762 12619 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12763 12620
12764 12621 #ifndef sun4v
12765 12622 if (tsbinfo->tsb_szc == 0) {
12766 12623 sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12767 12624 PROT_WRITE|PROT_READ, TTE8K);
12768 12625 } else {
12769 12626 /*
12770 12627 * Round down PA and use a large mapping; the handlers will
12771 12628 * compute the TSB pointer at the correct offset into the
12772 12629 * big virtual page. NOTE: this assumes all TSBs larger
12773 12630 * than 8K must come from physically contiguous slabs of
12774 12631 * size tsb_slab_size.
12775 12632 */
12776 12633 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12777 12634 PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12778 12635 }
12779 12636 tsbinfo->tsb_pa = ptob(pfn);
12780 12637
12781 12638 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12782 12639 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */
12783 12640
12784 12641 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12785 12642 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12786 12643 #else /* sun4v */
12787 12644 tsbinfo->tsb_pa = ptob(pfn);
12788 12645 #endif /* sun4v */
12789 12646 }
12790 12647
12791 12648
12792 12649 /*
12793 12650 * Returns zero on success, ENOMEM if over the high water mark,
12794 12651 * or EAGAIN if the caller needs to retry with a smaller TSB
12795 12652 * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12796 12653 *
12797 12654 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12798 12655 * is specified and the TSB requested is PAGESIZE, though it
12799 12656 * may sleep waiting for memory if sufficient memory is not
12800 12657 * available.
12801 12658 */
12802 12659 static int
12803 12660 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12804 12661 int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12805 12662 {
12806 12663 caddr_t vaddr = NULL;
12807 12664 caddr_t slab_vaddr;
12808 12665 uintptr_t slab_mask;
12809 12666 int tsbbytes = TSB_BYTES(tsbcode);
12810 12667 int lowmem = 0;
12811 12668 struct kmem_cache *kmem_cachep = NULL;
12812 12669 vmem_t *vmp = NULL;
12813 12670 lgrp_id_t lgrpid = LGRP_NONE;
12814 12671 pfn_t pfn;
12815 12672 uint_t cbflags = HAC_SLEEP;
12816 12673 page_t **pplist;
12817 12674 int ret;
12818 12675
12819 12676 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
12820 12677 if (tsbbytes > MMU_PAGESIZE4M)
12821 12678 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12822 12679 else
12823 12680 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12824 12681
12825 12682 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
12826 12683 flags |= TSB_ALLOC;
12827 12684
12828 12685 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
12829 12686
12830 12687 tsbinfo->tsb_sfmmu = sfmmup;
12831 12688
12832 12689 /*
12833 12690 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
12834 12691 * return.
12835 12692 */
12836 12693 if ((flags & TSB_ALLOC) == 0) {
12837 12694 tsbinfo->tsb_szc = tsbcode;
12838 12695 tsbinfo->tsb_ttesz_mask = tteszmask;
12839 12696 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
12840 12697 tsbinfo->tsb_pa = -1;
12841 12698 tsbinfo->tsb_tte.ll = 0;
12842 12699 tsbinfo->tsb_next = NULL;
12843 12700 tsbinfo->tsb_flags = TSB_SWAPPED;
12844 12701 tsbinfo->tsb_cache = NULL;
12845 12702 tsbinfo->tsb_vmp = NULL;
12846 12703 return (0);
12847 12704 }
12848 12705
12849 12706 #ifdef DEBUG
12850 12707 /*
12851 12708 * For debugging:
12852 12709 * Randomly force allocation failures every tsb_alloc_mtbf
12853 12710 * tries if TSB_FORCEALLOC is not specified. This will
12854 12711 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
12855 12712 * it is even, to allow testing of both failure paths...
12856 12713 */
12857 12714 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
12858 12715 (tsb_alloc_count++ == tsb_alloc_mtbf)) {
12859 12716 tsb_alloc_count = 0;
12860 12717 tsb_alloc_fail_mtbf++;
12861 12718 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
12862 12719 }
12863 12720 #endif /* DEBUG */
12864 12721
12865 12722 /*
12866 12723 * Enforce high water mark if we are not doing a forced allocation
12867 12724 * and are not shrinking a process' TSB.
12868 12725 */
12869 12726 if ((flags & TSB_SHRINK) == 0 &&
12870 12727 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
12871 12728 if ((flags & TSB_FORCEALLOC) == 0)
12872 12729 return (ENOMEM);
12873 12730 lowmem = 1;
12874 12731 }
12875 12732
12876 12733 /*
12877 12734 * Allocate from the correct location based upon the size of the TSB
12878 12735 * compared to the base page size, and what memory conditions dictate.
12879 12736 * Note we always do nonblocking allocations from the TSB arena since
12880 12737 * we don't want memory fragmentation to cause processes to block
12881 12738 * indefinitely waiting for memory; until the kernel algorithms that
12882 12739 * coalesce large pages are improved this is our best option.
12883 12740 *
12884 12741 * Algorithm:
12885 12742 * If allocating a "large" TSB (>8K), allocate from the
12886 12743 * appropriate kmem_tsb_default_arena vmem arena
12887 12744 * else if low on memory or the TSB_FORCEALLOC flag is set or
12888 12745 * tsb_forceheap is set
12889 12746 * Allocate from kernel heap via sfmmu_tsb8k_cache with
12890 12747 * KM_SLEEP (never fails)
12891 12748 * else
12892 12749 * Allocate from appropriate sfmmu_tsb_cache with
12893 12750 * KM_NOSLEEP
12894 12751 * endif
12895 12752 */
12896 12753 if (tsb_lgrp_affinity)
12897 12754 lgrpid = lgrp_home_id(curthread);
12898 12755 if (lgrpid == LGRP_NONE)
12899 12756 lgrpid = 0; /* use lgrp of boot CPU */
12900 12757
12901 12758 if (tsbbytes > MMU_PAGESIZE) {
12902 12759 if (tsbbytes > MMU_PAGESIZE4M) {
12903 12760 vmp = kmem_bigtsb_default_arena[lgrpid];
12904 12761 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12905 12762 0, 0, NULL, NULL, VM_NOSLEEP);
12906 12763 } else {
12907 12764 vmp = kmem_tsb_default_arena[lgrpid];
12908 12765 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12909 12766 0, 0, NULL, NULL, VM_NOSLEEP);
12910 12767 }
12911 12768 #ifdef DEBUG
12912 12769 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
12913 12770 #else /* !DEBUG */
12914 12771 } else if (lowmem || (flags & TSB_FORCEALLOC)) {
12915 12772 #endif /* DEBUG */
12916 12773 kmem_cachep = sfmmu_tsb8k_cache;
12917 12774 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
12918 12775 ASSERT(vaddr != NULL);
12919 12776 } else {
12920 12777 kmem_cachep = sfmmu_tsb_cache[lgrpid];
12921 12778 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
12922 12779 }
12923 12780
12924 12781 tsbinfo->tsb_cache = kmem_cachep;
12925 12782 tsbinfo->tsb_vmp = vmp;
12926 12783
12927 12784 if (vaddr == NULL) {
12928 12785 return (EAGAIN);
12929 12786 }
12930 12787
12931 12788 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
12932 12789 kmem_cachep = tsbinfo->tsb_cache;
12933 12790
12934 12791 /*
12935 12792 * If we are allocating from outside the cage, then we need to
12936 12793 * register a relocation callback handler. Note that for now
12937 12794 * since pseudo mappings always hang off of the slab's root page,
12938 12795 * we need only lock the first 8K of the TSB slab. This is a bit
12939 12796 * hacky but it is good for performance.
12940 12797 */
12941 12798 if (kmem_cachep != sfmmu_tsb8k_cache) {
12942 12799 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
12943 12800 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
12944 12801 ASSERT(ret == 0);
12945 12802 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
12946 12803 cbflags, (void *)tsbinfo, &pfn, NULL);
12947 12804
12948 12805 /*
12949 12806 * Need to free up resources if we could not successfully
12950 12807 * add the callback function and return an error condition.
12951 12808 */
12952 12809 if (ret != 0) {
12953 12810 if (kmem_cachep) {
12954 12811 kmem_cache_free(kmem_cachep, vaddr);
12955 12812 } else {
12956 12813 vmem_xfree(vmp, (void *)vaddr, tsbbytes);
12957 12814 }
12958 12815 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
12959 12816 S_WRITE);
12960 12817 return (EAGAIN);
12961 12818 }
12962 12819 } else {
12963 12820 /*
12964 12821 * Since allocation of 8K TSBs from heap is rare and occurs
12965 12822 * during memory pressure we allocate them from permanent
12966 12823 * memory rather than using callbacks to get the PFN.
12967 12824 */
12968 12825 pfn = hat_getpfnum(kas.a_hat, vaddr);
12969 12826 }
12970 12827
12971 12828 tsbinfo->tsb_va = vaddr;
12972 12829 tsbinfo->tsb_szc = tsbcode;
12973 12830 tsbinfo->tsb_ttesz_mask = tteszmask;
12974 12831 tsbinfo->tsb_next = NULL;
12975 12832 tsbinfo->tsb_flags = 0;
12976 12833
12977 12834 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
12978 12835
12979 12836 sfmmu_inv_tsb(vaddr, tsbbytes);
12980 12837
12981 12838 if (kmem_cachep != sfmmu_tsb8k_cache) {
12982 12839 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
12983 12840 }
12984 12841
12985 12842 return (0);
12986 12843 }
12987 12844
12988 12845 /*
12989 12846 * Initialize per cpu tsb and per cpu tsbmiss_area
12990 12847 */
12991 12848 void
12992 12849 sfmmu_init_tsbs(void)
12993 12850 {
12994 12851 int i;
12995 12852 struct tsbmiss *tsbmissp;
12996 12853 struct kpmtsbm *kpmtsbmp;
12997 12854 #ifndef sun4v
12998 12855 extern int dcache_line_mask;
12999 12856 #endif /* sun4v */
13000 12857 extern uint_t vac_colors;
13001 12858
13002 12859 /*
13003 12860 * Init. tsb miss area.
13004 12861 */
13005 12862 tsbmissp = tsbmiss_area;
13006 12863
13007 12864 for (i = 0; i < NCPU; tsbmissp++, i++) {
13008 12865 /*
13009 12866 * initialize the tsbmiss area.
13010 12867 * Do this for all possible CPUs as some may be added
13011 12868 * while the system is running. There is no cost to this.
13012 12869 */
13013 12870 tsbmissp->ksfmmup = ksfmmup;
13014 12871 #ifndef sun4v
13015 12872 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13016 12873 #endif /* sun4v */
13017 12874 tsbmissp->khashstart =
13018 12875 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13019 12876 tsbmissp->uhashstart =
13020 12877 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13021 12878 tsbmissp->khashsz = khmehash_num;
13022 12879 tsbmissp->uhashsz = uhmehash_num;
13023 12880 }
13024 12881
13025 12882 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13026 12883 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13027 12884
13028 12885 if (kpm_enable == 0)
13029 12886 return;
13030 12887
13031 12888 /* -- Begin KPM specific init -- */
13032 12889
13033 12890 if (kpm_smallpages) {
13034 12891 /*
13035 12892 * If we're using base pagesize pages for seg_kpm
13036 12893 * mappings, we use the kernel TSB since we can't afford
13037 12894 * to allocate a second huge TSB for these mappings.
13038 12895 */
13039 12896 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13040 12897 kpm_tsbsz = ktsb_szcode;
13041 12898 kpmsm_tsbbase = kpm_tsbbase;
13042 12899 kpmsm_tsbsz = kpm_tsbsz;
13043 12900 } else {
13044 12901 /*
13045 12902 * In VAC conflict case, just put the entries in the
13046 12903 * kernel 8K indexed TSB for now so we can find them.
13047 12904 * This could really be changed in the future if we feel
13048 12905 * the need...
13049 12906 */
13050 12907 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13051 12908 kpmsm_tsbsz = ktsb_szcode;
13052 12909 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13053 12910 kpm_tsbsz = ktsb4m_szcode;
13054 12911 }
13055 12912
13056 12913 kpmtsbmp = kpmtsbm_area;
13057 12914 for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13058 12915 /*
13059 12916 * Initialize the kpmtsbm area.
13060 12917 * Do this for all possible CPUs as some may be added
13061 12918 * while the system is running. There is no cost to this.
13062 12919 */
13063 12920 kpmtsbmp->vbase = kpm_vbase;
13064 12921 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13065 12922 kpmtsbmp->sz_shift = kpm_size_shift;
13066 12923 kpmtsbmp->kpmp_shift = kpmp_shift;
13067 12924 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13068 12925 if (kpm_smallpages == 0) {
13069 12926 kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13070 12927 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13071 12928 } else {
13072 12929 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13073 12930 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13074 12931 }
13075 12932 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13076 12933 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13077 12934 #ifdef DEBUG
13078 12935 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0;
13079 12936 #endif /* DEBUG */
13080 12937 if (ktsb_phys)
13081 12938 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13082 12939 }
13083 12940
13084 12941 /* -- End KPM specific init -- */
13085 12942 }
13086 12943
13087 12944 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13088 12945 struct tsb_info ktsb_info[2];
13089 12946
13090 12947 /*
13091 12948 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13092 12949 */
13093 12950 void
13094 12951 sfmmu_init_ktsbinfo()
13095 12952 {
13096 12953 ASSERT(ksfmmup != NULL);
13097 12954 ASSERT(ksfmmup->sfmmu_tsb == NULL);
13098 12955 /*
13099 12956 * Allocate tsbinfos for kernel and copy in data
13100 12957 * to make debug easier and sun4v setup easier.
13101 12958 */
13102 12959 ktsb_info[0].tsb_sfmmu = ksfmmup;
13103 12960 ktsb_info[0].tsb_szc = ktsb_szcode;
13104 12961 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13105 12962 ktsb_info[0].tsb_va = ktsb_base;
13106 12963 ktsb_info[0].tsb_pa = ktsb_pbase;
13107 12964 ktsb_info[0].tsb_flags = 0;
13108 12965 ktsb_info[0].tsb_tte.ll = 0;
13109 12966 ktsb_info[0].tsb_cache = NULL;
13110 12967
13111 12968 ktsb_info[1].tsb_sfmmu = ksfmmup;
13112 12969 ktsb_info[1].tsb_szc = ktsb4m_szcode;
13113 12970 ktsb_info[1].tsb_ttesz_mask = TSB4M;
13114 12971 ktsb_info[1].tsb_va = ktsb4m_base;
13115 12972 ktsb_info[1].tsb_pa = ktsb4m_pbase;
13116 12973 ktsb_info[1].tsb_flags = 0;
13117 12974 ktsb_info[1].tsb_tte.ll = 0;
13118 12975 ktsb_info[1].tsb_cache = NULL;
13119 12976
13120 12977 /* Link them into ksfmmup. */
13121 12978 ktsb_info[0].tsb_next = &ktsb_info[1];
13122 12979 ktsb_info[1].tsb_next = NULL;
13123 12980 ksfmmup->sfmmu_tsb = &ktsb_info[0];
13124 12981
13125 12982 sfmmu_setup_tsbinfo(ksfmmup);
13126 12983 }
13127 12984
13128 12985 /*
13129 12986 * Cache the last value returned from va_to_pa(). If the VA specified
13130 12987 * in the current call to cached_va_to_pa() maps to the same Page (as the
13131 12988 * previous call to cached_va_to_pa()), then compute the PA using
13132 12989 * cached info, else call va_to_pa().
13133 12990 *
13134 12991 * Note: this function is neither MT-safe nor consistent in the presence
13135 12992 * of multiple, interleaved threads. This function was created to enable
13136 12993 * an optimization used during boot (at a point when there's only one thread
13137 12994 * executing on the "boot CPU", and before startup_vm() has been called).
13138 12995 */
13139 12996 static uint64_t
13140 12997 cached_va_to_pa(void *vaddr)
13141 12998 {
13142 12999 static uint64_t prev_vaddr_base = 0;
13143 13000 static uint64_t prev_pfn = 0;
13144 13001
13145 13002 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13146 13003 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13147 13004 } else {
13148 13005 uint64_t pa = va_to_pa(vaddr);
13149 13006
13150 13007 if (pa != ((uint64_t)-1)) {
13151 13008 /*
13152 13009 * Computed physical address is valid. Cache its
13153 13010 * related info for the next cached_va_to_pa() call.
13154 13011 */
13155 13012 prev_pfn = pa & MMU_PAGEMASK;
13156 13013 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13157 13014 }
13158 13015
13159 13016 return (pa);
13160 13017 }
13161 13018 }
13162 13019
13163 13020 /*
13164 13021 * Carve up our nucleus hblk region. We may allocate more hblks than
13165 13022 * asked due to rounding errors but we are guaranteed to have at least
13166 13023 * enough space to allocate the requested number of hblk8's and hblk1's.
13167 13024 */
13168 13025 void
13169 13026 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13170 13027 {
13171 13028 struct hme_blk *hmeblkp;
13172 13029 size_t hme8blk_sz, hme1blk_sz;
13173 13030 size_t i;
13174 13031 size_t hblk8_bound;
13175 13032 ulong_t j = 0, k = 0;
13176 13033
13177 13034 ASSERT(addr != NULL && size != 0);
13178 13035
13179 13036 /* Need to use proper structure alignment */
13180 13037 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13181 13038 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13182 13039
13183 13040 nucleus_hblk8.list = (void *)addr;
13184 13041 nucleus_hblk8.index = 0;
13185 13042
13186 13043 /*
13187 13044 * Use as much memory as possible for hblk8's since we
13188 13045 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13189 13046 * We need to hold back enough space for the hblk1's which
13190 13047 * we'll allocate next.
13191 13048 */
13192 13049 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13193 13050 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13194 13051 hmeblkp = (struct hme_blk *)addr;
13195 13052 addr += hme8blk_sz;
13196 13053 hmeblkp->hblk_nuc_bit = 1;
13197 13054 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13198 13055 }
13199 13056 nucleus_hblk8.len = j;
13200 13057 ASSERT(j >= nhblk8);
13201 13058 SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13202 13059
13203 13060 nucleus_hblk1.list = (void *)addr;
13204 13061 nucleus_hblk1.index = 0;
13205 13062 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13206 13063 hmeblkp = (struct hme_blk *)addr;
13207 13064 addr += hme1blk_sz;
13208 13065 hmeblkp->hblk_nuc_bit = 1;
13209 13066 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13210 13067 }
13211 13068 ASSERT(k >= nhblk1);
13212 13069 nucleus_hblk1.len = k;
13213 13070 SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13214 13071 }
13215 13072
13216 13073 /*
13217 13074 * This function is currently not supported on this platform. For what
13218 13075 * it's supposed to do, see hat.c and hat_srmmu.c
13219 13076 */
13220 13077 /* ARGSUSED */
13221 13078 faultcode_t
13222 13079 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13223 13080 uint_t flags)
13224 13081 {
13225 13082 return (FC_NOSUPPORT);
13226 13083 }
13227 13084
13228 13085 /*
13229 13086 * Searchs the mapping list of the page for a mapping of the same size. If not
13230 13087 * found the corresponding bit is cleared in the p_index field. When large
13231 13088 * pages are more prevalent in the system, we can maintain the mapping list
13232 13089 * in order and we don't have to traverse the list each time. Just check the
13233 13090 * next and prev entries, and if both are of different size, we clear the bit.
13234 13091 */
13235 13092 static void
13236 13093 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13237 13094 {
13238 13095 struct sf_hment *sfhmep;
13239 13096 struct hme_blk *hmeblkp;
13240 13097 int index;
13241 13098 pgcnt_t npgs;
13242 13099
13243 13100 ASSERT(ttesz > TTE8K);
13244 13101
13245 13102 ASSERT(sfmmu_mlist_held(pp));
13246 13103
13247 13104 ASSERT(PP_ISMAPPED_LARGE(pp));
13248 13105
13249 13106 /*
13250 13107 * Traverse mapping list looking for another mapping of same size.
13251 13108 * since we only want to clear index field if all mappings of
13252 13109 * that size are gone.
13253 13110 */
13254 13111
13255 13112 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13256 13113 if (IS_PAHME(sfhmep))
13257 13114 continue;
13258 13115 hmeblkp = sfmmu_hmetohblk(sfhmep);
13259 13116 if (hme_size(sfhmep) == ttesz) {
13260 13117 /*
13261 13118 * another mapping of the same size. don't clear index.
13262 13119 */
13263 13120 return;
13264 13121 }
13265 13122 }
13266 13123
13267 13124 /*
13268 13125 * Clear the p_index bit for large page.
13269 13126 */
13270 13127 index = PAGESZ_TO_INDEX(ttesz);
13271 13128 npgs = TTEPAGES(ttesz);
13272 13129 while (npgs-- > 0) {
13273 13130 ASSERT(pp->p_index & index);
13274 13131 pp->p_index &= ~index;
13275 13132 pp = PP_PAGENEXT(pp);
13276 13133 }
13277 13134 }
13278 13135
13279 13136 /*
13280 13137 * return supported features
13281 13138 */
13282 13139 /* ARGSUSED */
13283 13140 int
13284 13141 hat_supported(enum hat_features feature, void *arg)
13285 13142 {
13286 13143 switch (feature) {
13287 13144 case HAT_SHARED_PT:
13288 13145 case HAT_DYNAMIC_ISM_UNMAP:
13289 13146 case HAT_VMODSORT:
13290 13147 return (1);
13291 13148 case HAT_SHARED_REGIONS:
13292 13149 if (shctx_on)
13293 13150 return (1);
13294 13151 else
13295 13152 return (0);
13296 13153 default:
13297 13154 return (0);
13298 13155 }
13299 13156 }
13300 13157
13301 13158 void
13302 13159 hat_enter(struct hat *hat)
13303 13160 {
13304 13161 hatlock_t *hatlockp;
13305 13162
13306 13163 if (hat != ksfmmup) {
13307 13164 hatlockp = TSB_HASH(hat);
13308 13165 mutex_enter(HATLOCK_MUTEXP(hatlockp));
13309 13166 }
13310 13167 }
13311 13168
13312 13169 void
13313 13170 hat_exit(struct hat *hat)
13314 13171 {
13315 13172 hatlock_t *hatlockp;
13316 13173
13317 13174 if (hat != ksfmmup) {
13318 13175 hatlockp = TSB_HASH(hat);
13319 13176 mutex_exit(HATLOCK_MUTEXP(hatlockp));
13320 13177 }
13321 13178 }
13322 13179
13323 13180 /*ARGSUSED*/
13324 13181 void
13325 13182 hat_reserve(struct as *as, caddr_t addr, size_t len)
13326 13183 {
13327 13184 }
13328 13185
13329 13186 static void
13330 13187 hat_kstat_init(void)
13331 13188 {
13332 13189 kstat_t *ksp;
13333 13190
13334 13191 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13335 13192 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13336 13193 KSTAT_FLAG_VIRTUAL);
13337 13194 if (ksp) {
13338 13195 ksp->ks_data = (void *) &sfmmu_global_stat;
13339 13196 kstat_install(ksp);
13340 13197 }
13341 13198 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13342 13199 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13343 13200 KSTAT_FLAG_VIRTUAL);
13344 13201 if (ksp) {
13345 13202 ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13346 13203 kstat_install(ksp);
13347 13204 }
13348 13205 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13349 13206 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13350 13207 KSTAT_FLAG_WRITABLE);
13351 13208 if (ksp) {
13352 13209 ksp->ks_update = sfmmu_kstat_percpu_update;
13353 13210 kstat_install(ksp);
13354 13211 }
13355 13212 }
13356 13213
13357 13214 /* ARGSUSED */
13358 13215 static int
13359 13216 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13360 13217 {
13361 13218 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13362 13219 struct tsbmiss *tsbm = tsbmiss_area;
13363 13220 struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13364 13221 int i;
13365 13222
13366 13223 ASSERT(cpu_kstat);
13367 13224 if (rw == KSTAT_READ) {
13368 13225 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13369 13226 cpu_kstat->sf_itlb_misses = 0;
13370 13227 cpu_kstat->sf_dtlb_misses = 0;
13371 13228 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13372 13229 tsbm->uprot_traps;
13373 13230 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13374 13231 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13375 13232 cpu_kstat->sf_tsb_hits = 0;
13376 13233 cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13377 13234 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13378 13235 }
13379 13236 } else {
13380 13237 /* KSTAT_WRITE is used to clear stats */
13381 13238 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13382 13239 tsbm->utsb_misses = 0;
13383 13240 tsbm->ktsb_misses = 0;
13384 13241 tsbm->uprot_traps = 0;
13385 13242 tsbm->kprot_traps = 0;
13386 13243 kpmtsbm->kpm_dtlb_misses = 0;
13387 13244 kpmtsbm->kpm_tsb_misses = 0;
13388 13245 }
13389 13246 }
13390 13247 return (0);
13391 13248 }
13392 13249
13393 13250 #ifdef DEBUG
13394 13251
13395 13252 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13396 13253
13397 13254 /*
13398 13255 * A tte checker. *orig_old is the value we read before cas.
13399 13256 * *cur is the value returned by cas.
13400 13257 * *new is the desired value when we do the cas.
13401 13258 *
13402 13259 * *hmeblkp is currently unused.
13403 13260 */
13404 13261
13405 13262 /* ARGSUSED */
13406 13263 void
13407 13264 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13408 13265 {
13409 13266 pfn_t i, j, k;
13410 13267 int cpuid = CPU->cpu_id;
13411 13268
13412 13269 gorig[cpuid] = orig_old;
13413 13270 gcur[cpuid] = cur;
13414 13271 gnew[cpuid] = new;
13415 13272
13416 13273 #ifdef lint
13417 13274 hmeblkp = hmeblkp;
13418 13275 #endif
13419 13276
13420 13277 if (TTE_IS_VALID(orig_old)) {
13421 13278 if (TTE_IS_VALID(cur)) {
13422 13279 i = TTE_TO_TTEPFN(orig_old);
13423 13280 j = TTE_TO_TTEPFN(cur);
13424 13281 k = TTE_TO_TTEPFN(new);
13425 13282 if (i != j) {
13426 13283 /* remap error? */
13427 13284 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13428 13285 }
13429 13286
13430 13287 if (i != k) {
13431 13288 /* remap error? */
13432 13289 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13433 13290 }
13434 13291 } else {
13435 13292 if (TTE_IS_VALID(new)) {
13436 13293 panic("chk_tte: invalid cur? ");
13437 13294 }
13438 13295
13439 13296 i = TTE_TO_TTEPFN(orig_old);
13440 13297 k = TTE_TO_TTEPFN(new);
13441 13298 if (i != k) {
13442 13299 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13443 13300 }
13444 13301 }
13445 13302 } else {
13446 13303 if (TTE_IS_VALID(cur)) {
13447 13304 j = TTE_TO_TTEPFN(cur);
13448 13305 if (TTE_IS_VALID(new)) {
13449 13306 k = TTE_TO_TTEPFN(new);
13450 13307 if (j != k) {
13451 13308 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13452 13309 j, k);
13453 13310 }
13454 13311 } else {
13455 13312 panic("chk_tte: why here?");
13456 13313 }
13457 13314 } else {
13458 13315 if (!TTE_IS_VALID(new)) {
13459 13316 panic("chk_tte: why here2 ?");
13460 13317 }
13461 13318 }
13462 13319 }
13463 13320 }
13464 13321
13465 13322 #endif /* DEBUG */
13466 13323
13467 13324 extern void prefetch_tsbe_read(struct tsbe *);
13468 13325 extern void prefetch_tsbe_write(struct tsbe *);
13469 13326
13470 13327
13471 13328 /*
13472 13329 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives
13473 13330 * us optimal performance on Cheetah+. You can only have 8 outstanding
13474 13331 * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13475 13332 * prefetch to make the most utilization of the prefetch capability.
13476 13333 */
13477 13334 #define TSBE_PREFETCH_STRIDE (7)
13478 13335
13479 13336 void
13480 13337 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13481 13338 {
13482 13339 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13483 13340 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13484 13341 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13485 13342 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13486 13343 struct tsbe *old;
13487 13344 struct tsbe *new;
13488 13345 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13489 13346 uint64_t va;
13490 13347 int new_offset;
13491 13348 int i;
13492 13349 int vpshift;
13493 13350 int last_prefetch;
13494 13351
13495 13352 if (old_bytes == new_bytes) {
13496 13353 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13497 13354 } else {
13498 13355
13499 13356 /*
13500 13357 * A TSBE is 16 bytes which means there are four TSBE's per
13501 13358 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13502 13359 */
13503 13360 old = (struct tsbe *)old_tsbinfo->tsb_va;
13504 13361 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13505 13362 for (i = 0; i < old_entries; i++, old++) {
13506 13363 if (((i & (4-1)) == 0) && (i < last_prefetch))
13507 13364 prefetch_tsbe_read(old);
13508 13365 if (!old->tte_tag.tag_invalid) {
13509 13366 /*
13510 13367 * We have a valid TTE to remap. Check the
13511 13368 * size. We won't remap 64K or 512K TTEs
13512 13369 * because they span more than one TSB entry
13513 13370 * and are indexed using an 8K virt. page.
13514 13371 * Ditto for 32M and 256M TTEs.
13515 13372 */
13516 13373 if (TTE_CSZ(&old->tte_data) == TTE64K ||
13517 13374 TTE_CSZ(&old->tte_data) == TTE512K)
13518 13375 continue;
13519 13376 if (mmu_page_sizes == max_mmu_page_sizes) {
13520 13377 if (TTE_CSZ(&old->tte_data) == TTE32M ||
13521 13378 TTE_CSZ(&old->tte_data) == TTE256M)
13522 13379 continue;
13523 13380 }
13524 13381
13525 13382 /* clear the lower 22 bits of the va */
13526 13383 va = *(uint64_t *)old << 22;
13527 13384 /* turn va into a virtual pfn */
13528 13385 va >>= 22 - TSB_START_SIZE;
13529 13386 /*
13530 13387 * or in bits from the offset in the tsb
13531 13388 * to get the real virtual pfn. These
13532 13389 * correspond to bits [21:13] in the va
13533 13390 */
13534 13391 vpshift =
13535 13392 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13536 13393 0x1ff;
13537 13394 va |= (i << vpshift);
13538 13395 va >>= vpshift;
13539 13396 new_offset = va & (new_entries - 1);
13540 13397 new = new_base + new_offset;
13541 13398 prefetch_tsbe_write(new);
13542 13399 *new = *old;
13543 13400 }
13544 13401 }
13545 13402 }
13546 13403 }
13547 13404
13548 13405 /*
13549 13406 * unused in sfmmu
13550 13407 */
13551 13408 void
13552 13409 hat_dump(void)
13553 13410 {
13554 13411 }
13555 13412
13556 13413 /*
13557 13414 * Called when a thread is exiting and we have switched to the kernel address
13558 13415 * space. Perform the same VM initialization resume() uses when switching
13559 13416 * processes.
13560 13417 *
13561 13418 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13562 13419 * we call it anyway in case the semantics change in the future.
13563 13420 */
13564 13421 /*ARGSUSED*/
13565 13422 void
13566 13423 hat_thread_exit(kthread_t *thd)
13567 13424 {
13568 13425 uint_t pgsz_cnum;
13569 13426 uint_t pstate_save;
13570 13427
13571 13428 ASSERT(thd->t_procp->p_as == &kas);
13572 13429
13573 13430 pgsz_cnum = KCONTEXT;
13574 13431 #ifdef sun4u
13575 13432 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13576 13433 #endif
13577 13434
13578 13435 /*
13579 13436 * Note that sfmmu_load_mmustate() is currently a no-op for
13580 13437 * kernel threads. We need to disable interrupts here,
13581 13438 * simply because otherwise sfmmu_load_mmustate() would panic
13582 13439 * if the caller does not disable interrupts.
13583 13440 */
13584 13441 pstate_save = sfmmu_disable_intrs();
13585 13442
13586 13443 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13587 13444 sfmmu_setctx_sec(pgsz_cnum);
13588 13445 sfmmu_load_mmustate(ksfmmup);
13589 13446 sfmmu_enable_intrs(pstate_save);
13590 13447 }
13591 13448
13592 13449
13593 13450 /*
13594 13451 * SRD support
13595 13452 */
13596 13453 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \
13597 13454 (((uintptr_t)(vp)) >> 11)) & \
13598 13455 srd_hashmask)
13599 13456
13600 13457 /*
13601 13458 * Attach the process to the srd struct associated with the exec vnode
13602 13459 * from which the process is started.
13603 13460 */
13604 13461 void
13605 13462 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13606 13463 {
13607 13464 uint_t hash = SRD_HASH_FUNCTION(evp);
13608 13465 sf_srd_t *srdp;
13609 13466 sf_srd_t *newsrdp;
13610 13467
13611 13468 ASSERT(sfmmup != ksfmmup);
13612 13469 ASSERT(sfmmup->sfmmu_srdp == NULL);
13613 13470
13614 13471 if (!shctx_on) {
13615 13472 return;
13616 13473 }
13617 13474
13618 13475 VN_HOLD(evp);
13619 13476
13620 13477 if (srd_buckets[hash].srdb_srdp != NULL) {
13621 13478 mutex_enter(&srd_buckets[hash].srdb_lock);
13622 13479 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13623 13480 srdp = srdp->srd_hash) {
13624 13481 if (srdp->srd_evp == evp) {
13625 13482 ASSERT(srdp->srd_refcnt >= 0);
13626 13483 sfmmup->sfmmu_srdp = srdp;
13627 13484 atomic_inc_32(
13628 13485 (volatile uint_t *)&srdp->srd_refcnt);
13629 13486 mutex_exit(&srd_buckets[hash].srdb_lock);
13630 13487 return;
13631 13488 }
13632 13489 }
13633 13490 mutex_exit(&srd_buckets[hash].srdb_lock);
13634 13491 }
13635 13492 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13636 13493 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13637 13494
13638 13495 newsrdp->srd_evp = evp;
13639 13496 newsrdp->srd_refcnt = 1;
13640 13497 newsrdp->srd_hmergnfree = NULL;
13641 13498 newsrdp->srd_ismrgnfree = NULL;
13642 13499
13643 13500 mutex_enter(&srd_buckets[hash].srdb_lock);
13644 13501 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13645 13502 srdp = srdp->srd_hash) {
13646 13503 if (srdp->srd_evp == evp) {
13647 13504 ASSERT(srdp->srd_refcnt >= 0);
13648 13505 sfmmup->sfmmu_srdp = srdp;
13649 13506 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13650 13507 mutex_exit(&srd_buckets[hash].srdb_lock);
13651 13508 kmem_cache_free(srd_cache, newsrdp);
13652 13509 return;
13653 13510 }
13654 13511 }
13655 13512 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13656 13513 srd_buckets[hash].srdb_srdp = newsrdp;
13657 13514 sfmmup->sfmmu_srdp = newsrdp;
13658 13515
13659 13516 mutex_exit(&srd_buckets[hash].srdb_lock);
13660 13517
13661 13518 }
13662 13519
13663 13520 static void
13664 13521 sfmmu_leave_srd(sfmmu_t *sfmmup)
13665 13522 {
13666 13523 vnode_t *evp;
13667 13524 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13668 13525 uint_t hash;
13669 13526 sf_srd_t **prev_srdpp;
13670 13527 sf_region_t *rgnp;
13671 13528 sf_region_t *nrgnp;
13672 13529 #ifdef DEBUG
13673 13530 int rgns = 0;
13674 13531 #endif
13675 13532 int i;
13676 13533
13677 13534 ASSERT(sfmmup != ksfmmup);
13678 13535 ASSERT(srdp != NULL);
13679 13536 ASSERT(srdp->srd_refcnt > 0);
13680 13537 ASSERT(sfmmup->sfmmu_scdp == NULL);
13681 13538 ASSERT(sfmmup->sfmmu_free == 1);
13682 13539
13683 13540 sfmmup->sfmmu_srdp = NULL;
13684 13541 evp = srdp->srd_evp;
13685 13542 ASSERT(evp != NULL);
13686 13543 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13687 13544 VN_RELE(evp);
13688 13545 return;
13689 13546 }
13690 13547
13691 13548 hash = SRD_HASH_FUNCTION(evp);
13692 13549 mutex_enter(&srd_buckets[hash].srdb_lock);
13693 13550 for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13694 13551 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13695 13552 if (srdp->srd_evp == evp) {
13696 13553 break;
13697 13554 }
13698 13555 }
13699 13556 if (srdp == NULL || srdp->srd_refcnt) {
13700 13557 mutex_exit(&srd_buckets[hash].srdb_lock);
13701 13558 VN_RELE(evp);
13702 13559 return;
13703 13560 }
13704 13561 *prev_srdpp = srdp->srd_hash;
13705 13562 mutex_exit(&srd_buckets[hash].srdb_lock);
13706 13563
13707 13564 ASSERT(srdp->srd_refcnt == 0);
13708 13565 VN_RELE(evp);
13709 13566
13710 13567 #ifdef DEBUG
13711 13568 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13712 13569 ASSERT(srdp->srd_rgnhash[i] == NULL);
13713 13570 }
13714 13571 #endif /* DEBUG */
13715 13572
13716 13573 /* free each hme regions in the srd */
13717 13574 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13718 13575 nrgnp = rgnp->rgn_next;
13719 13576 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13720 13577 ASSERT(rgnp->rgn_refcnt == 0);
13721 13578 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13722 13579 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13723 13580 ASSERT(rgnp->rgn_hmeflags == 0);
13724 13581 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13725 13582 #ifdef DEBUG
13726 13583 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13727 13584 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13728 13585 }
13729 13586 rgns++;
13730 13587 #endif /* DEBUG */
13731 13588 kmem_cache_free(region_cache, rgnp);
13732 13589 }
13733 13590 ASSERT(rgns == srdp->srd_next_hmerid);
13734 13591
13735 13592 #ifdef DEBUG
13736 13593 rgns = 0;
13737 13594 #endif
13738 13595 /* free each ism rgns in the srd */
13739 13596 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13740 13597 nrgnp = rgnp->rgn_next;
13741 13598 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13742 13599 ASSERT(rgnp->rgn_refcnt == 0);
13743 13600 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13744 13601 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13745 13602 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13746 13603 #ifdef DEBUG
13747 13604 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13748 13605 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13749 13606 }
13750 13607 rgns++;
13751 13608 #endif /* DEBUG */
13752 13609 kmem_cache_free(region_cache, rgnp);
13753 13610 }
13754 13611 ASSERT(rgns == srdp->srd_next_ismrid);
13755 13612 ASSERT(srdp->srd_ismbusyrgns == 0);
13756 13613 ASSERT(srdp->srd_hmebusyrgns == 0);
13757 13614
13758 13615 srdp->srd_next_ismrid = 0;
13759 13616 srdp->srd_next_hmerid = 0;
13760 13617
13761 13618 bzero((void *)srdp->srd_ismrgnp,
13762 13619 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13763 13620 bzero((void *)srdp->srd_hmergnp,
13764 13621 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13765 13622
13766 13623 ASSERT(srdp->srd_scdp == NULL);
13767 13624 kmem_cache_free(srd_cache, srdp);
13768 13625 }
13769 13626
13770 13627 /* ARGSUSED */
13771 13628 static int
13772 13629 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13773 13630 {
13774 13631 sf_srd_t *srdp = (sf_srd_t *)buf;
13775 13632 bzero(buf, sizeof (*srdp));
13776 13633
13777 13634 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13778 13635 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13779 13636 return (0);
13780 13637 }
13781 13638
13782 13639 /* ARGSUSED */
13783 13640 static void
13784 13641 sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13785 13642 {
13786 13643 sf_srd_t *srdp = (sf_srd_t *)buf;
13787 13644
13788 13645 mutex_destroy(&srdp->srd_mutex);
13789 13646 mutex_destroy(&srdp->srd_scd_mutex);
13790 13647 }
13791 13648
13792 13649 /*
13793 13650 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13794 13651 * at the same time for the same process and address range. This is ensured by
13795 13652 * the fact that address space is locked as writer when a process joins the
13796 13653 * regions. Therefore there's no need to hold an srd lock during the entire
13797 13654 * execution of hat_join_region()/hat_leave_region().
13798 13655 */
13799 13656
13800 13657 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13801 13658 (((uintptr_t)(obj)) >> 11)) & \
13802 13659 srd_rgn_hashmask)
13803 13660 /*
13804 13661 * This routine implements the shared context functionality required when
13805 13662 * attaching a segment to an address space. It must be called from
13806 13663 * hat_share() for D(ISM) segments and from segvn_create() for segments
13807 13664 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13808 13665 * which is saved in the private segment data for hme segments and
13809 13666 * the ism_map structure for ism segments.
13810 13667 */
13811 13668 hat_region_cookie_t
13812 13669 hat_join_region(struct hat *sfmmup,
13813 13670 caddr_t r_saddr,
13814 13671 size_t r_size,
13815 13672 void *r_obj,
13816 13673 u_offset_t r_objoff,
13817 13674 uchar_t r_perm,
13818 13675 uchar_t r_pgszc,
13819 13676 hat_rgn_cb_func_t r_cb_function,
13820 13677 uint_t flags)
13821 13678 {
13822 13679 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13823 13680 uint_t rhash;
13824 13681 uint_t rid;
13825 13682 hatlock_t *hatlockp;
13826 13683 sf_region_t *rgnp;
13827 13684 sf_region_t *new_rgnp = NULL;
13828 13685 int i;
13829 13686 uint16_t *nextidp;
13830 13687 sf_region_t **freelistp;
13831 13688 int maxids;
13832 13689 sf_region_t **rarrp;
13833 13690 uint16_t *busyrgnsp;
13834 13691 ulong_t rttecnt;
13835 13692 uchar_t tteflag;
13836 13693 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13837 13694 int text = (r_type == HAT_REGION_TEXT);
13838 13695
13839 13696 if (srdp == NULL || r_size == 0) {
13840 13697 return (HAT_INVALID_REGION_COOKIE);
13841 13698 }
13842 13699
13843 13700 ASSERT(sfmmup != ksfmmup);
13844 13701 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
13845 13702 ASSERT(srdp->srd_refcnt > 0);
13846 13703 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
13847 13704 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
13848 13705 ASSERT(r_pgszc < mmu_page_sizes);
13849 13706 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
13850 13707 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
13851 13708 panic("hat_join_region: region addr or size is not aligned\n");
13852 13709 }
13853 13710
13854 13711
13855 13712 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
13856 13713 SFMMU_REGION_HME;
13857 13714 /*
13858 13715 * Currently only support shared hmes for the read only main text
13859 13716 * region.
13860 13717 */
13861 13718 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
13862 13719 (r_perm & PROT_WRITE))) {
13863 13720 return (HAT_INVALID_REGION_COOKIE);
13864 13721 }
13865 13722
13866 13723 rhash = RGN_HASH_FUNCTION(r_obj);
13867 13724
13868 13725 if (r_type == SFMMU_REGION_ISM) {
13869 13726 nextidp = &srdp->srd_next_ismrid;
13870 13727 freelistp = &srdp->srd_ismrgnfree;
13871 13728 maxids = SFMMU_MAX_ISM_REGIONS;
13872 13729 rarrp = srdp->srd_ismrgnp;
13873 13730 busyrgnsp = &srdp->srd_ismbusyrgns;
13874 13731 } else {
13875 13732 nextidp = &srdp->srd_next_hmerid;
13876 13733 freelistp = &srdp->srd_hmergnfree;
13877 13734 maxids = SFMMU_MAX_HME_REGIONS;
13878 13735 rarrp = srdp->srd_hmergnp;
13879 13736 busyrgnsp = &srdp->srd_hmebusyrgns;
13880 13737 }
13881 13738
13882 13739 mutex_enter(&srdp->srd_mutex);
13883 13740
13884 13741 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
13885 13742 rgnp = rgnp->rgn_hash) {
13886 13743 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
13887 13744 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
13888 13745 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
13889 13746 break;
13890 13747 }
13891 13748 }
13892 13749
13893 13750 rfound:
13894 13751 if (rgnp != NULL) {
13895 13752 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
13896 13753 ASSERT(rgnp->rgn_cb_function == r_cb_function);
13897 13754 ASSERT(rgnp->rgn_refcnt >= 0);
13898 13755 rid = rgnp->rgn_id;
13899 13756 ASSERT(rid < maxids);
13900 13757 ASSERT(rarrp[rid] == rgnp);
13901 13758 ASSERT(rid < *nextidp);
13902 13759 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
13903 13760 mutex_exit(&srdp->srd_mutex);
13904 13761 if (new_rgnp != NULL) {
13905 13762 kmem_cache_free(region_cache, new_rgnp);
13906 13763 }
13907 13764 if (r_type == SFMMU_REGION_HME) {
13908 13765 int myjoin =
13909 13766 (sfmmup == astosfmmu(curthread->t_procp->p_as));
13910 13767
13911 13768 sfmmu_link_to_hmeregion(sfmmup, rgnp);
13912 13769 /*
13913 13770 * bitmap should be updated after linking sfmmu on
13914 13771 * region list so that pageunload() doesn't skip
13915 13772 * TSB/TLB flush. As soon as bitmap is updated another
13916 13773 * thread in this process can already start accessing
13917 13774 * this region.
13918 13775 */
13919 13776 /*
13920 13777 * Normally ttecnt accounting is done as part of
13921 13778 * pagefault handling. But a process may not take any
13922 13779 * pagefaults on shared hmeblks created by some other
13923 13780 * process. To compensate for this assume that the
13924 13781 * entire region will end up faulted in using
13925 13782 * the region's pagesize.
13926 13783 *
13927 13784 */
13928 13785 if (r_pgszc > TTE8K) {
13929 13786 tteflag = 1 << r_pgszc;
13930 13787 if (disable_large_pages & tteflag) {
13931 13788 tteflag = 0;
13932 13789 }
13933 13790 } else {
13934 13791 tteflag = 0;
13935 13792 }
13936 13793 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
13937 13794 hatlockp = sfmmu_hat_enter(sfmmup);
13938 13795 sfmmup->sfmmu_rtteflags |= tteflag;
13939 13796 sfmmu_hat_exit(hatlockp);
13940 13797 }
13941 13798 hatlockp = sfmmu_hat_enter(sfmmup);
13942 13799
13943 13800 /*
13944 13801 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
13945 13802 * region to allow for large page allocation failure.
13946 13803 */
13947 13804 if (r_pgszc >= TTE4M) {
13948 13805 sfmmup->sfmmu_tsb0_4minflcnt +=
13949 13806 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
13950 13807 }
13951 13808
13952 13809 /* update sfmmu_ttecnt with the shme rgn ttecnt */
13953 13810 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
13954 13811 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
13955 13812 rttecnt);
13956 13813
13957 13814 if (text && r_pgszc >= TTE4M &&
13958 13815 (tteflag || ((disable_large_pages >> TTE4M) &
13959 13816 ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
13960 13817 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
13961 13818 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
13962 13819 }
13963 13820
13964 13821 sfmmu_hat_exit(hatlockp);
13965 13822 /*
13966 13823 * On Panther we need to make sure TLB is programmed
13967 13824 * to accept 32M/256M pages. Call
13968 13825 * sfmmu_check_page_sizes() now to make sure TLB is
13969 13826 * setup before making hmeregions visible to other
13970 13827 * threads.
13971 13828 */
13972 13829 sfmmu_check_page_sizes(sfmmup, 1);
13973 13830 hatlockp = sfmmu_hat_enter(sfmmup);
13974 13831 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
13975 13832
13976 13833 /*
13977 13834 * if context is invalid tsb miss exception code will
13978 13835 * call sfmmu_check_page_sizes() and update tsbmiss
13979 13836 * area later.
13980 13837 */
13981 13838 kpreempt_disable();
13982 13839 if (myjoin &&
13983 13840 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
13984 13841 != INVALID_CONTEXT)) {
13985 13842 struct tsbmiss *tsbmp;
13986 13843
13987 13844 tsbmp = &tsbmiss_area[CPU->cpu_id];
13988 13845 ASSERT(sfmmup == tsbmp->usfmmup);
13989 13846 BT_SET(tsbmp->shmermap, rid);
13990 13847 if (r_pgszc > TTE64K) {
13991 13848 tsbmp->uhat_rtteflags |= tteflag;
13992 13849 }
13993 13850
13994 13851 }
13995 13852 kpreempt_enable();
13996 13853
13997 13854 sfmmu_hat_exit(hatlockp);
13998 13855 ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
13999 13856 HAT_INVALID_REGION_COOKIE);
14000 13857 } else {
14001 13858 hatlockp = sfmmu_hat_enter(sfmmup);
14002 13859 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14003 13860 sfmmu_hat_exit(hatlockp);
14004 13861 }
14005 13862 ASSERT(rid < maxids);
14006 13863
14007 13864 if (r_type == SFMMU_REGION_ISM) {
14008 13865 sfmmu_find_scd(sfmmup);
14009 13866 }
14010 13867 return ((hat_region_cookie_t)((uint64_t)rid));
14011 13868 }
14012 13869
14013 13870 ASSERT(new_rgnp == NULL);
14014 13871
14015 13872 if (*busyrgnsp >= maxids) {
14016 13873 mutex_exit(&srdp->srd_mutex);
14017 13874 return (HAT_INVALID_REGION_COOKIE);
14018 13875 }
14019 13876
14020 13877 ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14021 13878 if (*freelistp != NULL) {
14022 13879 rgnp = *freelistp;
14023 13880 *freelistp = rgnp->rgn_next;
14024 13881 ASSERT(rgnp->rgn_id < *nextidp);
14025 13882 ASSERT(rgnp->rgn_id < maxids);
14026 13883 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14027 13884 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14028 13885 == r_type);
14029 13886 ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14030 13887 ASSERT(rgnp->rgn_hmeflags == 0);
14031 13888 } else {
14032 13889 /*
14033 13890 * release local locks before memory allocation.
14034 13891 */
14035 13892 mutex_exit(&srdp->srd_mutex);
14036 13893
14037 13894 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14038 13895
14039 13896 mutex_enter(&srdp->srd_mutex);
14040 13897 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14041 13898 rgnp = rgnp->rgn_hash) {
14042 13899 if (rgnp->rgn_saddr == r_saddr &&
14043 13900 rgnp->rgn_size == r_size &&
14044 13901 rgnp->rgn_obj == r_obj &&
14045 13902 rgnp->rgn_objoff == r_objoff &&
14046 13903 rgnp->rgn_perm == r_perm &&
14047 13904 rgnp->rgn_pgszc == r_pgszc) {
14048 13905 break;
14049 13906 }
14050 13907 }
14051 13908 if (rgnp != NULL) {
14052 13909 goto rfound;
14053 13910 }
14054 13911
14055 13912 if (*nextidp >= maxids) {
14056 13913 mutex_exit(&srdp->srd_mutex);
14057 13914 goto fail;
14058 13915 }
14059 13916 rgnp = new_rgnp;
14060 13917 new_rgnp = NULL;
14061 13918 rgnp->rgn_id = (*nextidp)++;
14062 13919 ASSERT(rgnp->rgn_id < maxids);
14063 13920 ASSERT(rarrp[rgnp->rgn_id] == NULL);
14064 13921 rarrp[rgnp->rgn_id] = rgnp;
14065 13922 }
14066 13923
14067 13924 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14068 13925 ASSERT(rgnp->rgn_hmeflags == 0);
14069 13926 #ifdef DEBUG
14070 13927 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14071 13928 ASSERT(rgnp->rgn_ttecnt[i] == 0);
14072 13929 }
14073 13930 #endif
14074 13931 rgnp->rgn_saddr = r_saddr;
14075 13932 rgnp->rgn_size = r_size;
14076 13933 rgnp->rgn_obj = r_obj;
14077 13934 rgnp->rgn_objoff = r_objoff;
14078 13935 rgnp->rgn_perm = r_perm;
14079 13936 rgnp->rgn_pgszc = r_pgszc;
14080 13937 rgnp->rgn_flags = r_type;
14081 13938 rgnp->rgn_refcnt = 0;
14082 13939 rgnp->rgn_cb_function = r_cb_function;
14083 13940 rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14084 13941 srdp->srd_rgnhash[rhash] = rgnp;
14085 13942 (*busyrgnsp)++;
14086 13943 ASSERT(*busyrgnsp <= maxids);
14087 13944 goto rfound;
14088 13945
14089 13946 fail:
14090 13947 ASSERT(new_rgnp != NULL);
14091 13948 kmem_cache_free(region_cache, new_rgnp);
14092 13949 return (HAT_INVALID_REGION_COOKIE);
14093 13950 }
14094 13951
14095 13952 /*
14096 13953 * This function implements the shared context functionality required
14097 13954 * when detaching a segment from an address space. It must be called
14098 13955 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14099 13956 * for segments with a valid region_cookie.
14100 13957 * It will also be called from all seg_vn routines which change a
14101 13958 * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14102 13959 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14103 13960 * from segvn_fault().
14104 13961 */
14105 13962 void
14106 13963 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14107 13964 {
14108 13965 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14109 13966 sf_scd_t *scdp;
14110 13967 uint_t rhash;
14111 13968 uint_t rid = (uint_t)((uint64_t)rcookie);
14112 13969 hatlock_t *hatlockp = NULL;
14113 13970 sf_region_t *rgnp;
14114 13971 sf_region_t **prev_rgnpp;
14115 13972 sf_region_t *cur_rgnp;
14116 13973 void *r_obj;
14117 13974 int i;
14118 13975 caddr_t r_saddr;
14119 13976 caddr_t r_eaddr;
14120 13977 size_t r_size;
14121 13978 uchar_t r_pgszc;
14122 13979 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14123 13980
14124 13981 ASSERT(sfmmup != ksfmmup);
14125 13982 ASSERT(srdp != NULL);
14126 13983 ASSERT(srdp->srd_refcnt > 0);
14127 13984 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14128 13985 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14129 13986 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14130 13987
14131 13988 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14132 13989 SFMMU_REGION_HME;
14133 13990
14134 13991 if (r_type == SFMMU_REGION_ISM) {
14135 13992 ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14136 13993 ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
14137 13994 rgnp = srdp->srd_ismrgnp[rid];
14138 13995 } else {
14139 13996 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14140 13997 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14141 13998 rgnp = srdp->srd_hmergnp[rid];
14142 13999 }
14143 14000 ASSERT(rgnp != NULL);
14144 14001 ASSERT(rgnp->rgn_id == rid);
14145 14002 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14146 14003 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14147 14004 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
14148 14005
14149 14006 if (sfmmup->sfmmu_free) {
14150 14007 ulong_t rttecnt;
14151 14008 r_pgszc = rgnp->rgn_pgszc;
14152 14009 r_size = rgnp->rgn_size;
14153 14010
14154 14011 ASSERT(sfmmup->sfmmu_scdp == NULL);
14155 14012 if (r_type == SFMMU_REGION_ISM) {
14156 14013 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14157 14014 } else {
14158 14015 /* update shme rgns ttecnt in sfmmu_ttecnt */
14159 14016 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14160 14017 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14161 14018
14162 14019 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14163 14020 -rttecnt);
14164 14021
14165 14022 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14166 14023 }
14167 14024 } else if (r_type == SFMMU_REGION_ISM) {
14168 14025 hatlockp = sfmmu_hat_enter(sfmmup);
14169 14026 ASSERT(rid < srdp->srd_next_ismrid);
14170 14027 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14171 14028 scdp = sfmmup->sfmmu_scdp;
14172 14029 if (scdp != NULL &&
14173 14030 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14174 14031 sfmmu_leave_scd(sfmmup, r_type);
14175 14032 ASSERT(sfmmu_hat_lock_held(sfmmup));
14176 14033 }
14177 14034 sfmmu_hat_exit(hatlockp);
14178 14035 } else {
14179 14036 ulong_t rttecnt;
14180 14037 r_pgszc = rgnp->rgn_pgszc;
14181 14038 r_saddr = rgnp->rgn_saddr;
14182 14039 r_size = rgnp->rgn_size;
14183 14040 r_eaddr = r_saddr + r_size;
14184 14041
14185 14042 ASSERT(r_type == SFMMU_REGION_HME);
14186 14043 hatlockp = sfmmu_hat_enter(sfmmup);
14187 14044 ASSERT(rid < srdp->srd_next_hmerid);
14188 14045 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14189 14046
14190 14047 /*
14191 14048 * If region is part of an SCD call sfmmu_leave_scd().
14192 14049 * Otherwise if process is not exiting and has valid context
14193 14050 * just drop the context on the floor to lose stale TLB
14194 14051 * entries and force the update of tsb miss area to reflect
14195 14052 * the new region map. After that clean our TSB entries.
14196 14053 */
14197 14054 scdp = sfmmup->sfmmu_scdp;
14198 14055 if (scdp != NULL &&
14199 14056 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14200 14057 sfmmu_leave_scd(sfmmup, r_type);
14201 14058 ASSERT(sfmmu_hat_lock_held(sfmmup));
14202 14059 }
14203 14060 sfmmu_invalidate_ctx(sfmmup);
14204 14061
14205 14062 i = TTE8K;
14206 14063 while (i < mmu_page_sizes) {
14207 14064 if (rgnp->rgn_ttecnt[i] != 0) {
14208 14065 sfmmu_unload_tsb_range(sfmmup, r_saddr,
14209 14066 r_eaddr, i);
14210 14067 if (i < TTE4M) {
14211 14068 i = TTE4M;
14212 14069 continue;
14213 14070 } else {
14214 14071 break;
14215 14072 }
14216 14073 }
14217 14074 i++;
14218 14075 }
14219 14076 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14220 14077 if (r_pgszc >= TTE4M) {
14221 14078 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14222 14079 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14223 14080 rttecnt);
14224 14081 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14225 14082 }
14226 14083
14227 14084 /* update shme rgns ttecnt in sfmmu_ttecnt */
14228 14085 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14229 14086 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14230 14087 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14231 14088
14232 14089 sfmmu_hat_exit(hatlockp);
14233 14090 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14234 14091 /* sfmmup left the scd, grow private tsb */
14235 14092 sfmmu_check_page_sizes(sfmmup, 1);
14236 14093 } else {
14237 14094 sfmmu_check_page_sizes(sfmmup, 0);
14238 14095 }
14239 14096 }
14240 14097
14241 14098 if (r_type == SFMMU_REGION_HME) {
14242 14099 sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14243 14100 }
14244 14101
14245 14102 r_obj = rgnp->rgn_obj;
14246 14103 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14247 14104 return;
14248 14105 }
14249 14106
14250 14107 /*
14251 14108 * looks like nobody uses this region anymore. Free it.
14252 14109 */
14253 14110 rhash = RGN_HASH_FUNCTION(r_obj);
14254 14111 mutex_enter(&srdp->srd_mutex);
14255 14112 for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14256 14113 (cur_rgnp = *prev_rgnpp) != NULL;
14257 14114 prev_rgnpp = &cur_rgnp->rgn_hash) {
14258 14115 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14259 14116 break;
14260 14117 }
14261 14118 }
14262 14119
14263 14120 if (cur_rgnp == NULL) {
14264 14121 mutex_exit(&srdp->srd_mutex);
14265 14122 return;
14266 14123 }
14267 14124
14268 14125 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14269 14126 *prev_rgnpp = rgnp->rgn_hash;
14270 14127 if (r_type == SFMMU_REGION_ISM) {
14271 14128 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14272 14129 ASSERT(rid < srdp->srd_next_ismrid);
14273 14130 rgnp->rgn_next = srdp->srd_ismrgnfree;
14274 14131 srdp->srd_ismrgnfree = rgnp;
14275 14132 ASSERT(srdp->srd_ismbusyrgns > 0);
14276 14133 srdp->srd_ismbusyrgns--;
14277 14134 mutex_exit(&srdp->srd_mutex);
14278 14135 return;
14279 14136 }
14280 14137 mutex_exit(&srdp->srd_mutex);
14281 14138
14282 14139 /*
14283 14140 * Destroy region's hmeblks.
14284 14141 */
14285 14142 sfmmu_unload_hmeregion(srdp, rgnp);
14286 14143
14287 14144 rgnp->rgn_hmeflags = 0;
14288 14145
14289 14146 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14290 14147 ASSERT(rgnp->rgn_id == rid);
14291 14148 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14292 14149 rgnp->rgn_ttecnt[i] = 0;
14293 14150 }
14294 14151 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14295 14152 mutex_enter(&srdp->srd_mutex);
14296 14153 ASSERT(rid < srdp->srd_next_hmerid);
14297 14154 rgnp->rgn_next = srdp->srd_hmergnfree;
14298 14155 srdp->srd_hmergnfree = rgnp;
14299 14156 ASSERT(srdp->srd_hmebusyrgns > 0);
14300 14157 srdp->srd_hmebusyrgns--;
14301 14158 mutex_exit(&srdp->srd_mutex);
14302 14159 }
14303 14160
14304 14161 /*
14305 14162 * For now only called for hmeblk regions and not for ISM regions.
14306 14163 */
14307 14164 void
14308 14165 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14309 14166 {
14310 14167 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14311 14168 uint_t rid = (uint_t)((uint64_t)rcookie);
14312 14169 sf_region_t *rgnp;
14313 14170 sf_rgn_link_t *rlink;
14314 14171 sf_rgn_link_t *hrlink;
14315 14172 ulong_t rttecnt;
14316 14173
14317 14174 ASSERT(sfmmup != ksfmmup);
14318 14175 ASSERT(srdp != NULL);
14319 14176 ASSERT(srdp->srd_refcnt > 0);
14320 14177
14321 14178 ASSERT(rid < srdp->srd_next_hmerid);
14322 14179 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14323 14180 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14324 14181
14325 14182 rgnp = srdp->srd_hmergnp[rid];
14326 14183 ASSERT(rgnp->rgn_refcnt > 0);
14327 14184 ASSERT(rgnp->rgn_id == rid);
14328 14185 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14329 14186 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14330 14187
14331 14188 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14332 14189
14333 14190 /* LINTED: constant in conditional context */
14334 14191 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14335 14192 ASSERT(rlink != NULL);
14336 14193 mutex_enter(&rgnp->rgn_mutex);
14337 14194 ASSERT(rgnp->rgn_sfmmu_head != NULL);
14338 14195 /* LINTED: constant in conditional context */
14339 14196 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14340 14197 ASSERT(hrlink != NULL);
14341 14198 ASSERT(hrlink->prev == NULL);
14342 14199 rlink->next = rgnp->rgn_sfmmu_head;
14343 14200 rlink->prev = NULL;
14344 14201 hrlink->prev = sfmmup;
14345 14202 /*
14346 14203 * make sure rlink's next field is correct
14347 14204 * before making this link visible.
14348 14205 */
14349 14206 membar_stst();
14350 14207 rgnp->rgn_sfmmu_head = sfmmup;
14351 14208 mutex_exit(&rgnp->rgn_mutex);
14352 14209
14353 14210 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14354 14211 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14355 14212 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14356 14213 /* update tsb0 inflation count */
14357 14214 if (rgnp->rgn_pgszc >= TTE4M) {
14358 14215 sfmmup->sfmmu_tsb0_4minflcnt +=
14359 14216 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14360 14217 }
14361 14218 /*
14362 14219 * Update regionid bitmask without hat lock since no other thread
14363 14220 * can update this region bitmask right now.
14364 14221 */
14365 14222 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14366 14223 }
14367 14224
14368 14225 /* ARGSUSED */
14369 14226 static int
14370 14227 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14371 14228 {
14372 14229 sf_region_t *rgnp = (sf_region_t *)buf;
14373 14230 bzero(buf, sizeof (*rgnp));
14374 14231
14375 14232 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14376 14233
14377 14234 return (0);
14378 14235 }
14379 14236
14380 14237 /* ARGSUSED */
14381 14238 static void
14382 14239 sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14383 14240 {
14384 14241 sf_region_t *rgnp = (sf_region_t *)buf;
14385 14242 mutex_destroy(&rgnp->rgn_mutex);
14386 14243 }
14387 14244
14388 14245 static int
14389 14246 sfrgnmap_isnull(sf_region_map_t *map)
14390 14247 {
14391 14248 int i;
14392 14249
14393 14250 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14394 14251 if (map->bitmap[i] != 0) {
14395 14252 return (0);
14396 14253 }
14397 14254 }
14398 14255 return (1);
14399 14256 }
14400 14257
14401 14258 static int
14402 14259 sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14403 14260 {
14404 14261 int i;
14405 14262
14406 14263 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14407 14264 if (map->bitmap[i] != 0) {
14408 14265 return (0);
14409 14266 }
14410 14267 }
14411 14268 return (1);
14412 14269 }
14413 14270
14414 14271 #ifdef DEBUG
14415 14272 static void
14416 14273 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14417 14274 {
14418 14275 sfmmu_t *sp;
14419 14276 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14420 14277
14421 14278 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14422 14279 ASSERT(srdp == sp->sfmmu_srdp);
14423 14280 if (sp == sfmmup) {
14424 14281 if (onlist) {
14425 14282 return;
14426 14283 } else {
14427 14284 panic("shctx: sfmmu 0x%p found on scd"
14428 14285 "list 0x%p", (void *)sfmmup,
14429 14286 (void *)*headp);
14430 14287 }
14431 14288 }
14432 14289 }
14433 14290 if (onlist) {
14434 14291 panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14435 14292 (void *)sfmmup, (void *)*headp);
14436 14293 } else {
14437 14294 return;
14438 14295 }
14439 14296 }
14440 14297 #else /* DEBUG */
14441 14298 #define check_scd_sfmmu_list(headp, sfmmup, onlist)
14442 14299 #endif /* DEBUG */
14443 14300
14444 14301 /*
14445 14302 * Removes an sfmmu from the SCD sfmmu list.
14446 14303 */
14447 14304 static void
14448 14305 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14449 14306 {
14450 14307 ASSERT(sfmmup->sfmmu_srdp != NULL);
14451 14308 check_scd_sfmmu_list(headp, sfmmup, 1);
14452 14309 if (sfmmup->sfmmu_scd_link.prev != NULL) {
14453 14310 ASSERT(*headp != sfmmup);
14454 14311 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14455 14312 sfmmup->sfmmu_scd_link.next;
14456 14313 } else {
14457 14314 ASSERT(*headp == sfmmup);
14458 14315 *headp = sfmmup->sfmmu_scd_link.next;
14459 14316 }
14460 14317 if (sfmmup->sfmmu_scd_link.next != NULL) {
14461 14318 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14462 14319 sfmmup->sfmmu_scd_link.prev;
14463 14320 }
14464 14321 }
14465 14322
14466 14323
14467 14324 /*
14468 14325 * Adds an sfmmu to the start of the queue.
14469 14326 */
14470 14327 static void
14471 14328 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14472 14329 {
14473 14330 check_scd_sfmmu_list(headp, sfmmup, 0);
14474 14331 sfmmup->sfmmu_scd_link.prev = NULL;
14475 14332 sfmmup->sfmmu_scd_link.next = *headp;
14476 14333 if (*headp != NULL)
14477 14334 (*headp)->sfmmu_scd_link.prev = sfmmup;
14478 14335 *headp = sfmmup;
14479 14336 }
14480 14337
14481 14338 /*
14482 14339 * Remove an scd from the start of the queue.
14483 14340 */
14484 14341 static void
14485 14342 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14486 14343 {
14487 14344 if (scdp->scd_prev != NULL) {
14488 14345 ASSERT(*headp != scdp);
14489 14346 scdp->scd_prev->scd_next = scdp->scd_next;
14490 14347 } else {
14491 14348 ASSERT(*headp == scdp);
14492 14349 *headp = scdp->scd_next;
14493 14350 }
14494 14351
14495 14352 if (scdp->scd_next != NULL) {
14496 14353 scdp->scd_next->scd_prev = scdp->scd_prev;
14497 14354 }
14498 14355 }
14499 14356
14500 14357 /*
14501 14358 * Add an scd to the start of the queue.
14502 14359 */
14503 14360 static void
14504 14361 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14505 14362 {
14506 14363 scdp->scd_prev = NULL;
14507 14364 scdp->scd_next = *headp;
14508 14365 if (*headp != NULL) {
14509 14366 (*headp)->scd_prev = scdp;
14510 14367 }
14511 14368 *headp = scdp;
14512 14369 }
14513 14370
14514 14371 static int
14515 14372 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14516 14373 {
14517 14374 uint_t rid;
14518 14375 uint_t i;
14519 14376 uint_t j;
14520 14377 ulong_t w;
14521 14378 sf_region_t *rgnp;
14522 14379 ulong_t tte8k_cnt = 0;
14523 14380 ulong_t tte4m_cnt = 0;
14524 14381 uint_t tsb_szc;
14525 14382 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14526 14383 sfmmu_t *ism_hatid;
14527 14384 struct tsb_info *newtsb;
14528 14385 int szc;
14529 14386
14530 14387 ASSERT(srdp != NULL);
14531 14388
14532 14389 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14533 14390 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14534 14391 continue;
14535 14392 }
14536 14393 j = 0;
14537 14394 while (w) {
14538 14395 if (!(w & 0x1)) {
14539 14396 j++;
14540 14397 w >>= 1;
14541 14398 continue;
14542 14399 }
14543 14400 rid = (i << BT_ULSHIFT) | j;
14544 14401 j++;
14545 14402 w >>= 1;
14546 14403
14547 14404 if (rid < SFMMU_MAX_HME_REGIONS) {
14548 14405 rgnp = srdp->srd_hmergnp[rid];
14549 14406 ASSERT(rgnp->rgn_id == rid);
14550 14407 ASSERT(rgnp->rgn_refcnt > 0);
14551 14408
14552 14409 if (rgnp->rgn_pgszc < TTE4M) {
14553 14410 tte8k_cnt += rgnp->rgn_size >>
14554 14411 TTE_PAGE_SHIFT(TTE8K);
14555 14412 } else {
14556 14413 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14557 14414 tte4m_cnt += rgnp->rgn_size >>
14558 14415 TTE_PAGE_SHIFT(TTE4M);
14559 14416 /*
14560 14417 * Inflate SCD tsb0 by preallocating
14561 14418 * 1/4 8k ttecnt for 4M regions to
14562 14419 * allow for lgpg alloc failure.
14563 14420 */
14564 14421 tte8k_cnt += rgnp->rgn_size >>
14565 14422 (TTE_PAGE_SHIFT(TTE8K) + 2);
14566 14423 }
14567 14424 } else {
14568 14425 rid -= SFMMU_MAX_HME_REGIONS;
14569 14426 rgnp = srdp->srd_ismrgnp[rid];
14570 14427 ASSERT(rgnp->rgn_id == rid);
14571 14428 ASSERT(rgnp->rgn_refcnt > 0);
14572 14429
14573 14430 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14574 14431 ASSERT(ism_hatid->sfmmu_ismhat);
14575 14432
14576 14433 for (szc = 0; szc < TTE4M; szc++) {
14577 14434 tte8k_cnt +=
14578 14435 ism_hatid->sfmmu_ttecnt[szc] <<
14579 14436 TTE_BSZS_SHIFT(szc);
14580 14437 }
14581 14438
14582 14439 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14583 14440 if (rgnp->rgn_pgszc >= TTE4M) {
14584 14441 tte4m_cnt += rgnp->rgn_size >>
14585 14442 TTE_PAGE_SHIFT(TTE4M);
14586 14443 }
14587 14444 }
14588 14445 }
14589 14446 }
14590 14447
14591 14448 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14592 14449
14593 14450 /* Allocate both the SCD TSBs here. */
14594 14451 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14595 14452 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14596 14453 (tsb_szc <= TSB_4M_SZCODE ||
14597 14454 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14598 14455 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14599 14456 TSB_ALLOC, scsfmmup))) {
14600 14457
14601 14458 SFMMU_STAT(sf_scd_1sttsb_allocfail);
14602 14459 return (TSB_ALLOCFAIL);
14603 14460 } else {
14604 14461 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14605 14462
14606 14463 if (tte4m_cnt) {
14607 14464 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14608 14465 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14609 14466 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14610 14467 (tsb_szc <= TSB_4M_SZCODE ||
14611 14468 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14612 14469 TSB4M|TSB32M|TSB256M,
14613 14470 TSB_ALLOC, scsfmmup))) {
14614 14471 /*
14615 14472 * If we fail to allocate the 2nd shared tsb,
14616 14473 * just free the 1st tsb, return failure.
14617 14474 */
14618 14475 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14619 14476 SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14620 14477 return (TSB_ALLOCFAIL);
14621 14478 } else {
14622 14479 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14623 14480 newtsb->tsb_flags |= TSB_SHAREDCTX;
14624 14481 scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14625 14482 SFMMU_STAT(sf_scd_2ndtsb_alloc);
14626 14483 }
14627 14484 }
14628 14485 SFMMU_STAT(sf_scd_1sttsb_alloc);
14629 14486 }
14630 14487 return (TSB_SUCCESS);
14631 14488 }
14632 14489
14633 14490 static void
14634 14491 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14635 14492 {
14636 14493 while (scd_sfmmu->sfmmu_tsb != NULL) {
14637 14494 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14638 14495 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14639 14496 scd_sfmmu->sfmmu_tsb = next;
14640 14497 }
14641 14498 }
14642 14499
14643 14500 /*
14644 14501 * Link the sfmmu onto the hme region list.
14645 14502 */
14646 14503 void
14647 14504 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14648 14505 {
14649 14506 uint_t rid;
14650 14507 sf_rgn_link_t *rlink;
14651 14508 sfmmu_t *head;
14652 14509 sf_rgn_link_t *hrlink;
14653 14510
14654 14511 rid = rgnp->rgn_id;
14655 14512 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14656 14513
14657 14514 /* LINTED: constant in conditional context */
14658 14515 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14659 14516 ASSERT(rlink != NULL);
14660 14517 mutex_enter(&rgnp->rgn_mutex);
14661 14518 if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14662 14519 rlink->next = NULL;
14663 14520 rlink->prev = NULL;
14664 14521 /*
14665 14522 * make sure rlink's next field is NULL
14666 14523 * before making this link visible.
14667 14524 */
14668 14525 membar_stst();
14669 14526 rgnp->rgn_sfmmu_head = sfmmup;
14670 14527 } else {
14671 14528 /* LINTED: constant in conditional context */
14672 14529 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14673 14530 ASSERT(hrlink != NULL);
14674 14531 ASSERT(hrlink->prev == NULL);
14675 14532 rlink->next = head;
14676 14533 rlink->prev = NULL;
14677 14534 hrlink->prev = sfmmup;
14678 14535 /*
14679 14536 * make sure rlink's next field is correct
14680 14537 * before making this link visible.
14681 14538 */
14682 14539 membar_stst();
14683 14540 rgnp->rgn_sfmmu_head = sfmmup;
14684 14541 }
14685 14542 mutex_exit(&rgnp->rgn_mutex);
14686 14543 }
14687 14544
14688 14545 /*
14689 14546 * Unlink the sfmmu from the hme region list.
14690 14547 */
14691 14548 void
14692 14549 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14693 14550 {
14694 14551 uint_t rid;
14695 14552 sf_rgn_link_t *rlink;
14696 14553
14697 14554 rid = rgnp->rgn_id;
14698 14555 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14699 14556
14700 14557 /* LINTED: constant in conditional context */
14701 14558 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14702 14559 ASSERT(rlink != NULL);
14703 14560 mutex_enter(&rgnp->rgn_mutex);
14704 14561 if (rgnp->rgn_sfmmu_head == sfmmup) {
14705 14562 sfmmu_t *next = rlink->next;
14706 14563 rgnp->rgn_sfmmu_head = next;
14707 14564 /*
14708 14565 * if we are stopped by xc_attention() after this
14709 14566 * point the forward link walking in
14710 14567 * sfmmu_rgntlb_demap() will work correctly since the
14711 14568 * head correctly points to the next element.
14712 14569 */
14713 14570 membar_stst();
14714 14571 rlink->next = NULL;
14715 14572 ASSERT(rlink->prev == NULL);
14716 14573 if (next != NULL) {
14717 14574 sf_rgn_link_t *nrlink;
14718 14575 /* LINTED: constant in conditional context */
14719 14576 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14720 14577 ASSERT(nrlink != NULL);
14721 14578 ASSERT(nrlink->prev == sfmmup);
14722 14579 nrlink->prev = NULL;
14723 14580 }
14724 14581 } else {
14725 14582 sfmmu_t *next = rlink->next;
14726 14583 sfmmu_t *prev = rlink->prev;
14727 14584 sf_rgn_link_t *prlink;
14728 14585
14729 14586 ASSERT(prev != NULL);
14730 14587 /* LINTED: constant in conditional context */
14731 14588 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14732 14589 ASSERT(prlink != NULL);
14733 14590 ASSERT(prlink->next == sfmmup);
14734 14591 prlink->next = next;
14735 14592 /*
14736 14593 * if we are stopped by xc_attention()
14737 14594 * after this point the forward link walking
14738 14595 * will work correctly since the prev element
14739 14596 * correctly points to the next element.
14740 14597 */
14741 14598 membar_stst();
14742 14599 rlink->next = NULL;
14743 14600 rlink->prev = NULL;
14744 14601 if (next != NULL) {
14745 14602 sf_rgn_link_t *nrlink;
14746 14603 /* LINTED: constant in conditional context */
14747 14604 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14748 14605 ASSERT(nrlink != NULL);
14749 14606 ASSERT(nrlink->prev == sfmmup);
14750 14607 nrlink->prev = prev;
14751 14608 }
14752 14609 }
14753 14610 mutex_exit(&rgnp->rgn_mutex);
14754 14611 }
14755 14612
14756 14613 /*
14757 14614 * Link scd sfmmu onto ism or hme region list for each region in the
14758 14615 * scd region map.
14759 14616 */
14760 14617 void
14761 14618 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14762 14619 {
14763 14620 uint_t rid;
14764 14621 uint_t i;
14765 14622 uint_t j;
14766 14623 ulong_t w;
14767 14624 sf_region_t *rgnp;
14768 14625 sfmmu_t *scsfmmup;
14769 14626
14770 14627 scsfmmup = scdp->scd_sfmmup;
14771 14628 ASSERT(scsfmmup->sfmmu_scdhat);
14772 14629 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14773 14630 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14774 14631 continue;
14775 14632 }
14776 14633 j = 0;
14777 14634 while (w) {
14778 14635 if (!(w & 0x1)) {
14779 14636 j++;
14780 14637 w >>= 1;
14781 14638 continue;
14782 14639 }
14783 14640 rid = (i << BT_ULSHIFT) | j;
14784 14641 j++;
14785 14642 w >>= 1;
14786 14643
14787 14644 if (rid < SFMMU_MAX_HME_REGIONS) {
14788 14645 rgnp = srdp->srd_hmergnp[rid];
14789 14646 ASSERT(rgnp->rgn_id == rid);
14790 14647 ASSERT(rgnp->rgn_refcnt > 0);
14791 14648 sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14792 14649 } else {
14793 14650 sfmmu_t *ism_hatid = NULL;
14794 14651 ism_ment_t *ism_ment;
14795 14652 rid -= SFMMU_MAX_HME_REGIONS;
14796 14653 rgnp = srdp->srd_ismrgnp[rid];
14797 14654 ASSERT(rgnp->rgn_id == rid);
14798 14655 ASSERT(rgnp->rgn_refcnt > 0);
14799 14656
14800 14657 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14801 14658 ASSERT(ism_hatid->sfmmu_ismhat);
14802 14659 ism_ment = &scdp->scd_ism_links[rid];
14803 14660 ism_ment->iment_hat = scsfmmup;
14804 14661 ism_ment->iment_base_va = rgnp->rgn_saddr;
14805 14662 mutex_enter(&ism_mlist_lock);
14806 14663 iment_add(ism_ment, ism_hatid);
14807 14664 mutex_exit(&ism_mlist_lock);
14808 14665
14809 14666 }
14810 14667 }
14811 14668 }
14812 14669 }
14813 14670 /*
14814 14671 * Unlink scd sfmmu from ism or hme region list for each region in the
14815 14672 * scd region map.
14816 14673 */
14817 14674 void
14818 14675 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14819 14676 {
14820 14677 uint_t rid;
14821 14678 uint_t i;
14822 14679 uint_t j;
14823 14680 ulong_t w;
14824 14681 sf_region_t *rgnp;
14825 14682 sfmmu_t *scsfmmup;
14826 14683
14827 14684 scsfmmup = scdp->scd_sfmmup;
14828 14685 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14829 14686 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14830 14687 continue;
14831 14688 }
14832 14689 j = 0;
14833 14690 while (w) {
14834 14691 if (!(w & 0x1)) {
14835 14692 j++;
14836 14693 w >>= 1;
14837 14694 continue;
14838 14695 }
14839 14696 rid = (i << BT_ULSHIFT) | j;
14840 14697 j++;
14841 14698 w >>= 1;
14842 14699
14843 14700 if (rid < SFMMU_MAX_HME_REGIONS) {
14844 14701 rgnp = srdp->srd_hmergnp[rid];
14845 14702 ASSERT(rgnp->rgn_id == rid);
14846 14703 ASSERT(rgnp->rgn_refcnt > 0);
14847 14704 sfmmu_unlink_from_hmeregion(scsfmmup,
14848 14705 rgnp);
14849 14706
14850 14707 } else {
14851 14708 sfmmu_t *ism_hatid = NULL;
14852 14709 ism_ment_t *ism_ment;
14853 14710 rid -= SFMMU_MAX_HME_REGIONS;
14854 14711 rgnp = srdp->srd_ismrgnp[rid];
14855 14712 ASSERT(rgnp->rgn_id == rid);
14856 14713 ASSERT(rgnp->rgn_refcnt > 0);
14857 14714
14858 14715 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14859 14716 ASSERT(ism_hatid->sfmmu_ismhat);
14860 14717 ism_ment = &scdp->scd_ism_links[rid];
14861 14718 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
14862 14719 ASSERT(ism_ment->iment_base_va ==
14863 14720 rgnp->rgn_saddr);
14864 14721 mutex_enter(&ism_mlist_lock);
14865 14722 iment_sub(ism_ment, ism_hatid);
14866 14723 mutex_exit(&ism_mlist_lock);
14867 14724
14868 14725 }
14869 14726 }
14870 14727 }
14871 14728 }
14872 14729 /*
14873 14730 * Allocates and initialises a new SCD structure, this is called with
14874 14731 * the srd_scd_mutex held and returns with the reference count
14875 14732 * initialised to 1.
14876 14733 */
14877 14734 static sf_scd_t *
14878 14735 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
14879 14736 {
14880 14737 sf_scd_t *new_scdp;
14881 14738 sfmmu_t *scsfmmup;
14882 14739 int i;
14883 14740
14884 14741 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
14885 14742 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
14886 14743
14887 14744 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
14888 14745 new_scdp->scd_sfmmup = scsfmmup;
14889 14746 scsfmmup->sfmmu_srdp = srdp;
14890 14747 scsfmmup->sfmmu_scdp = new_scdp;
14891 14748 scsfmmup->sfmmu_tsb0_4minflcnt = 0;
14892 14749 scsfmmup->sfmmu_scdhat = 1;
14893 14750 CPUSET_ALL(scsfmmup->sfmmu_cpusran);
14894 14751 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
14895 14752
14896 14753 ASSERT(max_mmu_ctxdoms > 0);
14897 14754 for (i = 0; i < max_mmu_ctxdoms; i++) {
14898 14755 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
14899 14756 scsfmmup->sfmmu_ctxs[i].gnum = 0;
14900 14757 }
14901 14758
14902 14759 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14903 14760 new_scdp->scd_rttecnt[i] = 0;
14904 14761 }
14905 14762
14906 14763 new_scdp->scd_region_map = *new_map;
14907 14764 new_scdp->scd_refcnt = 1;
14908 14765 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
14909 14766 kmem_cache_free(scd_cache, new_scdp);
14910 14767 kmem_cache_free(sfmmuid_cache, scsfmmup);
14911 14768 return (NULL);
14912 14769 }
14913 14770 if (&mmu_init_scd) {
14914 14771 mmu_init_scd(new_scdp);
14915 14772 }
14916 14773 return (new_scdp);
14917 14774 }
14918 14775
14919 14776 /*
14920 14777 * The first phase of a process joining an SCD. The hat structure is
14921 14778 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
14922 14779 * and a cross-call with context invalidation is used to cause the
14923 14780 * remaining work to be carried out in the sfmmu_tsbmiss_exception()
14924 14781 * routine.
14925 14782 */
14926 14783 static void
14927 14784 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
14928 14785 {
14929 14786 hatlock_t *hatlockp;
14930 14787 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14931 14788 int i;
14932 14789 sf_scd_t *old_scdp;
14933 14790
14934 14791 ASSERT(srdp != NULL);
14935 14792 ASSERT(scdp != NULL);
14936 14793 ASSERT(scdp->scd_refcnt > 0);
14937 14794 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
14938 14795
14939 14796 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
14940 14797 ASSERT(old_scdp != scdp);
14941 14798
14942 14799 mutex_enter(&old_scdp->scd_mutex);
14943 14800 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
14944 14801 mutex_exit(&old_scdp->scd_mutex);
14945 14802 /*
14946 14803 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
14947 14804 * include the shme rgn ttecnt for rgns that
14948 14805 * were in the old SCD
14949 14806 */
14950 14807 for (i = 0; i < mmu_page_sizes; i++) {
14951 14808 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
14952 14809 old_scdp->scd_rttecnt[i]);
14953 14810 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14954 14811 sfmmup->sfmmu_scdrttecnt[i]);
14955 14812 }
14956 14813 }
14957 14814
14958 14815 /*
14959 14816 * Move sfmmu to the scd lists.
14960 14817 */
14961 14818 mutex_enter(&scdp->scd_mutex);
14962 14819 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
14963 14820 mutex_exit(&scdp->scd_mutex);
14964 14821 SF_SCD_INCR_REF(scdp);
14965 14822
14966 14823 hatlockp = sfmmu_hat_enter(sfmmup);
14967 14824 /*
14968 14825 * For a multi-thread process, we must stop
14969 14826 * all the other threads before joining the scd.
14970 14827 */
14971 14828
14972 14829 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
14973 14830
14974 14831 sfmmu_invalidate_ctx(sfmmup);
14975 14832 sfmmup->sfmmu_scdp = scdp;
14976 14833
14977 14834 /*
14978 14835 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
14979 14836 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
14980 14837 */
14981 14838 for (i = 0; i < mmu_page_sizes; i++) {
14982 14839 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
14983 14840 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
14984 14841 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14985 14842 -sfmmup->sfmmu_scdrttecnt[i]);
14986 14843 }
14987 14844 /* update tsb0 inflation count */
14988 14845 if (old_scdp != NULL) {
14989 14846 sfmmup->sfmmu_tsb0_4minflcnt +=
14990 14847 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14991 14848 }
14992 14849 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14993 14850 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
14994 14851 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14995 14852
14996 14853 sfmmu_hat_exit(hatlockp);
14997 14854
14998 14855 if (old_scdp != NULL) {
14999 14856 SF_SCD_DECR_REF(srdp, old_scdp);
15000 14857 }
15001 14858
15002 14859 }
15003 14860
15004 14861 /*
15005 14862 * This routine is called by a process to become part of an SCD. It is called
15006 14863 * from sfmmu_tsbmiss_exception() once most of the initial work has been
15007 14864 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15008 14865 */
15009 14866 static void
15010 14867 sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15011 14868 {
15012 14869 struct tsb_info *tsbinfop;
15013 14870
15014 14871 ASSERT(sfmmu_hat_lock_held(sfmmup));
15015 14872 ASSERT(sfmmup->sfmmu_scdp != NULL);
15016 14873 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15017 14874 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15018 14875 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15019 14876
15020 14877 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15021 14878 tsbinfop = tsbinfop->tsb_next) {
15022 14879 if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15023 14880 continue;
15024 14881 }
15025 14882 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15026 14883
15027 14884 sfmmu_inv_tsb(tsbinfop->tsb_va,
15028 14885 TSB_BYTES(tsbinfop->tsb_szc));
15029 14886 }
15030 14887
15031 14888 /* Set HAT_CTX1_FLAG for all SCD ISMs */
15032 14889 sfmmu_ism_hatflags(sfmmup, 1);
15033 14890
15034 14891 SFMMU_STAT(sf_join_scd);
15035 14892 }
15036 14893
15037 14894 /*
15038 14895 * This routine is called in order to check if there is an SCD which matches
15039 14896 * the process's region map if not then a new SCD may be created.
15040 14897 */
15041 14898 static void
15042 14899 sfmmu_find_scd(sfmmu_t *sfmmup)
15043 14900 {
15044 14901 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15045 14902 sf_scd_t *scdp, *new_scdp;
15046 14903 int ret;
15047 14904
15048 14905 ASSERT(srdp != NULL);
15049 14906 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
15050 14907
15051 14908 mutex_enter(&srdp->srd_scd_mutex);
15052 14909 for (scdp = srdp->srd_scdp; scdp != NULL;
15053 14910 scdp = scdp->scd_next) {
15054 14911 SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15055 14912 &sfmmup->sfmmu_region_map, ret);
15056 14913 if (ret == 1) {
15057 14914 SF_SCD_INCR_REF(scdp);
15058 14915 mutex_exit(&srdp->srd_scd_mutex);
15059 14916 sfmmu_join_scd(scdp, sfmmup);
15060 14917 ASSERT(scdp->scd_refcnt >= 2);
15061 14918 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
15062 14919 return;
15063 14920 } else {
15064 14921 /*
15065 14922 * If the sfmmu region map is a subset of the scd
15066 14923 * region map, then the assumption is that this process
15067 14924 * will continue attaching to ISM segments until the
15068 14925 * region maps are equal.
15069 14926 */
15070 14927 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15071 14928 &sfmmup->sfmmu_region_map, ret);
15072 14929 if (ret == 1) {
15073 14930 mutex_exit(&srdp->srd_scd_mutex);
15074 14931 return;
15075 14932 }
15076 14933 }
15077 14934 }
15078 14935
15079 14936 ASSERT(scdp == NULL);
15080 14937 /*
15081 14938 * No matching SCD has been found, create a new one.
15082 14939 */
15083 14940 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15084 14941 NULL) {
15085 14942 mutex_exit(&srdp->srd_scd_mutex);
15086 14943 return;
15087 14944 }
15088 14945
15089 14946 /*
15090 14947 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15091 14948 */
15092 14949
15093 14950 /* Set scd_rttecnt for shme rgns in SCD */
15094 14951 sfmmu_set_scd_rttecnt(srdp, new_scdp);
15095 14952
15096 14953 /*
15097 14954 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15098 14955 */
15099 14956 sfmmu_link_scd_to_regions(srdp, new_scdp);
15100 14957 sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15101 14958 SFMMU_STAT_ADD(sf_create_scd, 1);
15102 14959
15103 14960 mutex_exit(&srdp->srd_scd_mutex);
15104 14961 sfmmu_join_scd(new_scdp, sfmmup);
15105 14962 ASSERT(new_scdp->scd_refcnt >= 2);
15106 14963 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
15107 14964 }
15108 14965
15109 14966 /*
15110 14967 * This routine is called by a process to remove itself from an SCD. It is
15111 14968 * either called when the processes has detached from a segment or from
15112 14969 * hat_free_start() as a result of calling exit.
15113 14970 */
15114 14971 static void
15115 14972 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15116 14973 {
15117 14974 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15118 14975 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15119 14976 hatlock_t *hatlockp = TSB_HASH(sfmmup);
15120 14977 int i;
15121 14978
15122 14979 ASSERT(scdp != NULL);
15123 14980 ASSERT(srdp != NULL);
15124 14981
15125 14982 if (sfmmup->sfmmu_free) {
15126 14983 /*
15127 14984 * If the process is part of an SCD the sfmmu is unlinked
15128 14985 * from scd_sf_list.
15129 14986 */
15130 14987 mutex_enter(&scdp->scd_mutex);
15131 14988 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15132 14989 mutex_exit(&scdp->scd_mutex);
15133 14990 /*
15134 14991 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15135 14992 * are about to leave the SCD
15136 14993 */
15137 14994 for (i = 0; i < mmu_page_sizes; i++) {
15138 14995 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15139 14996 scdp->scd_rttecnt[i]);
15140 14997 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15141 14998 sfmmup->sfmmu_scdrttecnt[i]);
15142 14999 sfmmup->sfmmu_scdrttecnt[i] = 0;
15143 15000 }
15144 15001 sfmmup->sfmmu_scdp = NULL;
15145 15002
15146 15003 SF_SCD_DECR_REF(srdp, scdp);
15147 15004 return;
15148 15005 }
15149 15006
15150 15007 ASSERT(r_type != SFMMU_REGION_ISM ||
15151 15008 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15152 15009 ASSERT(scdp->scd_refcnt);
15153 15010 ASSERT(!sfmmup->sfmmu_free);
15154 15011 ASSERT(sfmmu_hat_lock_held(sfmmup));
15155 15012 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
15156 15013
15157 15014 /*
15158 15015 * Wait for ISM maps to be updated.
15159 15016 */
15160 15017 if (r_type != SFMMU_REGION_ISM) {
15161 15018 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15162 15019 sfmmup->sfmmu_scdp != NULL) {
15163 15020 cv_wait(&sfmmup->sfmmu_tsb_cv,
15164 15021 HATLOCK_MUTEXP(hatlockp));
15165 15022 }
15166 15023
15167 15024 if (sfmmup->sfmmu_scdp == NULL) {
15168 15025 sfmmu_hat_exit(hatlockp);
15169 15026 return;
15170 15027 }
15171 15028 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15172 15029 }
15173 15030
15174 15031 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15175 15032 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15176 15033 /*
15177 15034 * Since HAT_JOIN_SCD was set our context
15178 15035 * is still invalid.
15179 15036 */
15180 15037 } else {
15181 15038 /*
15182 15039 * For a multi-thread process, we must stop
15183 15040 * all the other threads before leaving the scd.
15184 15041 */
15185 15042
15186 15043 sfmmu_invalidate_ctx(sfmmup);
15187 15044 }
15188 15045
15189 15046 /* Clear all the rid's for ISM, delete flags, etc */
15190 15047 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15191 15048 sfmmu_ism_hatflags(sfmmup, 0);
15192 15049
15193 15050 /*
15194 15051 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15195 15052 * are in SCD before this sfmmup leaves the SCD.
15196 15053 */
15197 15054 for (i = 0; i < mmu_page_sizes; i++) {
15198 15055 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15199 15056 scdp->scd_rttecnt[i]);
15200 15057 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15201 15058 sfmmup->sfmmu_scdrttecnt[i]);
15202 15059 sfmmup->sfmmu_scdrttecnt[i] = 0;
15203 15060 /* update ismttecnt to include SCD ism before hat leaves SCD */
15204 15061 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15205 15062 sfmmup->sfmmu_scdismttecnt[i] = 0;
15206 15063 }
15207 15064 /* update tsb0 inflation count */
15208 15065 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15209 15066
15210 15067 if (r_type != SFMMU_REGION_ISM) {
15211 15068 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15212 15069 }
15213 15070 sfmmup->sfmmu_scdp = NULL;
15214 15071
15215 15072 sfmmu_hat_exit(hatlockp);
15216 15073
15217 15074 /*
15218 15075 * Unlink sfmmu from scd_sf_list this can be done without holding
15219 15076 * the hat lock as we hold the sfmmu_as lock which prevents
15220 15077 * hat_join_region from adding this thread to the scd again. Other
15221 15078 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15222 15079 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15223 15080 * while holding the hat lock.
15224 15081 */
15225 15082 mutex_enter(&scdp->scd_mutex);
15226 15083 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15227 15084 mutex_exit(&scdp->scd_mutex);
15228 15085 SFMMU_STAT(sf_leave_scd);
15229 15086
15230 15087 SF_SCD_DECR_REF(srdp, scdp);
15231 15088 hatlockp = sfmmu_hat_enter(sfmmup);
15232 15089
15233 15090 }
15234 15091
15235 15092 /*
15236 15093 * Unlink and free up an SCD structure with a reference count of 0.
15237 15094 */
15238 15095 static void
15239 15096 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15240 15097 {
15241 15098 sfmmu_t *scsfmmup;
15242 15099 sf_scd_t *sp;
15243 15100 hatlock_t *shatlockp;
15244 15101 int i, ret;
15245 15102
15246 15103 mutex_enter(&srdp->srd_scd_mutex);
15247 15104 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15248 15105 if (sp == scdp)
15249 15106 break;
15250 15107 }
15251 15108 if (sp == NULL || sp->scd_refcnt) {
15252 15109 mutex_exit(&srdp->srd_scd_mutex);
15253 15110 return;
15254 15111 }
15255 15112
15256 15113 /*
15257 15114 * It is possible that the scd has been freed and reallocated with a
15258 15115 * different region map while we've been waiting for the srd_scd_mutex.
15259 15116 */
15260 15117 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15261 15118 if (ret != 1) {
15262 15119 mutex_exit(&srdp->srd_scd_mutex);
15263 15120 return;
15264 15121 }
15265 15122
15266 15123 ASSERT(scdp->scd_sf_list == NULL);
15267 15124 /*
15268 15125 * Unlink scd from srd_scdp list.
15269 15126 */
15270 15127 sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15271 15128 mutex_exit(&srdp->srd_scd_mutex);
15272 15129
15273 15130 sfmmu_unlink_scd_from_regions(srdp, scdp);
15274 15131
15275 15132 /* Clear shared context tsb and release ctx */
15276 15133 scsfmmup = scdp->scd_sfmmup;
15277 15134
15278 15135 /*
15279 15136 * create a barrier so that scd will not be destroyed
15280 15137 * if other thread still holds the same shared hat lock.
15281 15138 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15282 15139 * shared hat lock before checking the shared tsb reloc flag.
15283 15140 */
15284 15141 shatlockp = sfmmu_hat_enter(scsfmmup);
15285 15142 sfmmu_hat_exit(shatlockp);
15286 15143
15287 15144 sfmmu_free_scd_tsbs(scsfmmup);
15288 15145
15289 15146 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15290 15147 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15291 15148 kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15292 15149 SFMMU_L2_HMERLINKS_SIZE);
15293 15150 scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15294 15151 }
15295 15152 }
15296 15153 kmem_cache_free(sfmmuid_cache, scsfmmup);
15297 15154 kmem_cache_free(scd_cache, scdp);
15298 15155 SFMMU_STAT(sf_destroy_scd);
15299 15156 }
15300 15157
15301 15158 /*
15302 15159 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15303 15160 * bits which are set in the ism_region_map parameter. This flag indicates to
15304 15161 * the tsbmiss handler that mapping for these segments should be loaded using
15305 15162 * the shared context.
15306 15163 */
15307 15164 static void
15308 15165 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15309 15166 {
15310 15167 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15311 15168 ism_blk_t *ism_blkp;
15312 15169 ism_map_t *ism_map;
15313 15170 int i, rid;
15314 15171
15315 15172 ASSERT(sfmmup->sfmmu_iblk != NULL);
15316 15173 ASSERT(scdp != NULL);
15317 15174 /*
15318 15175 * Note that the caller either set HAT_ISMBUSY flag or checked
15319 15176 * under hat lock that HAT_ISMBUSY was not set by another thread.
15320 15177 */
15321 15178 ASSERT(sfmmu_hat_lock_held(sfmmup));
15322 15179
15323 15180 ism_blkp = sfmmup->sfmmu_iblk;
15324 15181 while (ism_blkp != NULL) {
15325 15182 ism_map = ism_blkp->iblk_maps;
15326 15183 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15327 15184 rid = ism_map[i].imap_rid;
15328 15185 if (rid == SFMMU_INVALID_ISMRID) {
15329 15186 continue;
15330 15187 }
15331 15188 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15332 15189 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15333 15190 addflag) {
15334 15191 ism_map[i].imap_hatflags |=
15335 15192 HAT_CTX1_FLAG;
15336 15193 } else {
15337 15194 ism_map[i].imap_hatflags &=
15338 15195 ~HAT_CTX1_FLAG;
15339 15196 }
15340 15197 }
15341 15198 ism_blkp = ism_blkp->iblk_next;
15342 15199 }
15343 15200 }
15344 15201
15345 15202 static int
15346 15203 sfmmu_srd_lock_held(sf_srd_t *srdp)
15347 15204 {
15348 15205 return (MUTEX_HELD(&srdp->srd_mutex));
15349 15206 }
15350 15207
15351 15208 /* ARGSUSED */
15352 15209 static int
15353 15210 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15354 15211 {
15355 15212 sf_scd_t *scdp = (sf_scd_t *)buf;
15356 15213
15357 15214 bzero(buf, sizeof (sf_scd_t));
15358 15215 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15359 15216 return (0);
15360 15217 }
15361 15218
15362 15219 /* ARGSUSED */
15363 15220 static void
15364 15221 sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15365 15222 {
15366 15223 sf_scd_t *scdp = (sf_scd_t *)buf;
15367 15224
15368 15225 mutex_destroy(&scdp->scd_mutex);
15369 15226 }
15370 15227
15371 15228 /*
15372 15229 * The listp parameter is a pointer to a list of hmeblks which are partially
15373 15230 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15374 15231 * freeing process is to cross-call all cpus to ensure that there are no
15375 15232 * remaining cached references.
15376 15233 *
15377 15234 * If the local generation number is less than the global then we can free
15378 15235 * hmeblks which are already on the pending queue as another cpu has completed
15379 15236 * the cross-call.
15380 15237 *
15381 15238 * We cross-call to make sure that there are no threads on other cpus accessing
15382 15239 * these hmblks and then complete the process of freeing them under the
15383 15240 * following conditions:
15384 15241 * The total number of pending hmeblks is greater than the threshold
15385 15242 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15386 15243 * It is at least 1 second since the last time we cross-called
15387 15244 *
15388 15245 * Otherwise, we add the hmeblks to the per-cpu pending queue.
15389 15246 */
15390 15247 static void
15391 15248 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15392 15249 {
15393 15250 struct hme_blk *hblkp, *pr_hblkp = NULL;
15394 15251 int count = 0;
15395 15252 cpuset_t cpuset = cpu_ready_set;
15396 15253 cpu_hme_pend_t *cpuhp;
15397 15254 timestruc_t now;
15398 15255 int one_second_expired = 0;
15399 15256
15400 15257 gethrestime_lasttick(&now);
15401 15258
15402 15259 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15403 15260 ASSERT(hblkp->hblk_shw_bit == 0);
15404 15261 ASSERT(hblkp->hblk_shared == 0);
15405 15262 count++;
15406 15263 pr_hblkp = hblkp;
15407 15264 }
15408 15265
15409 15266 cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15410 15267 mutex_enter(&cpuhp->chp_mutex);
15411 15268
15412 15269 if ((cpuhp->chp_count + count) == 0) {
15413 15270 mutex_exit(&cpuhp->chp_mutex);
15414 15271 return;
15415 15272 }
15416 15273
15417 15274 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15418 15275 one_second_expired = 1;
15419 15276 }
15420 15277
15421 15278 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15422 15279 (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15423 15280 one_second_expired)) {
15424 15281 /* Append global list to local */
15425 15282 if (pr_hblkp == NULL) {
15426 15283 *listp = cpuhp->chp_listp;
15427 15284 } else {
15428 15285 pr_hblkp->hblk_next = cpuhp->chp_listp;
15429 15286 }
15430 15287 cpuhp->chp_listp = NULL;
15431 15288 cpuhp->chp_count = 0;
15432 15289 cpuhp->chp_timestamp = now.tv_sec;
15433 15290 mutex_exit(&cpuhp->chp_mutex);
15434 15291
15435 15292 kpreempt_disable();
15436 15293 CPUSET_DEL(cpuset, CPU->cpu_id);
15437 15294 xt_sync(cpuset);
15438 15295 xt_sync(cpuset);
15439 15296 kpreempt_enable();
15440 15297
15441 15298 /*
15442 15299 * At this stage we know that no trap handlers on other
15443 15300 * cpus can have references to hmeblks on the list.
15444 15301 */
15445 15302 sfmmu_hblk_free(listp);
15446 15303 } else if (*listp != NULL) {
15447 15304 pr_hblkp->hblk_next = cpuhp->chp_listp;
15448 15305 cpuhp->chp_listp = *listp;
15449 15306 cpuhp->chp_count += count;
15450 15307 *listp = NULL;
15451 15308 mutex_exit(&cpuhp->chp_mutex);
15452 15309 } else {
15453 15310 mutex_exit(&cpuhp->chp_mutex);
15454 15311 }
15455 15312 }
15456 15313
15457 15314 /*
15458 15315 * Add an hmeblk to the the hash list.
15459 15316 */
15460 15317 void
15461 15318 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15462 15319 uint64_t hblkpa)
15463 15320 {
15464 15321 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15465 15322 #ifdef DEBUG
15466 15323 if (hmebp->hmeblkp == NULL) {
15467 15324 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15468 15325 }
15469 15326 #endif /* DEBUG */
15470 15327
15471 15328 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15472 15329 /*
15473 15330 * Since the TSB miss handler now does not lock the hash chain before
15474 15331 * walking it, make sure that the hmeblks nextpa is globally visible
15475 15332 * before we make the hmeblk globally visible by updating the chain root
15476 15333 * pointer in the hash bucket.
15477 15334 */
15478 15335 membar_producer();
15479 15336 hmebp->hmeh_nextpa = hblkpa;
15480 15337 hmeblkp->hblk_next = hmebp->hmeblkp;
15481 15338 hmebp->hmeblkp = hmeblkp;
15482 15339
15483 15340 }
15484 15341
15485 15342 /*
15486 15343 * This function is the first part of a 2 part process to remove an hmeblk
15487 15344 * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15488 15345 * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15489 15346 * a per-cpu pending list using the virtual address pointer.
15490 15347 *
15491 15348 * TSB miss trap handlers that start after this phase will no longer see
15492 15349 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15493 15350 * can still use it for further chain traversal because we haven't yet modifed
15494 15351 * the next physical pointer or freed it.
15495 15352 *
15496 15353 * In the second phase of hmeblk removal we'll issue a barrier xcall before
15497 15354 * we reuse or free this hmeblk. This will make sure all lingering references to
15498 15355 * the hmeblk after first phase disappear before we finally reclaim it.
15499 15356 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15500 15357 * during their traversal.
15501 15358 *
15502 15359 * The hmehash_mutex must be held when calling this function.
15503 15360 *
15504 15361 * Input:
15505 15362 * hmebp - hme hash bucket pointer
15506 15363 * hmeblkp - address of hmeblk to be removed
15507 15364 * pr_hblk - virtual address of previous hmeblkp
15508 15365 * listp - pointer to list of hmeblks linked by virtual address
15509 15366 * free_now flag - indicates that a complete removal from the hash chains
15510 15367 * is necessary.
15511 15368 *
15512 15369 * It is inefficient to use the free_now flag as a cross-call is required to
15513 15370 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15514 15371 * in short supply.
15515 15372 */
15516 15373 void
15517 15374 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15518 15375 struct hme_blk *pr_hblk, struct hme_blk **listp,
15519 15376 int free_now)
15520 15377 {
15521 15378 int shw_size, vshift;
15522 15379 struct hme_blk *shw_hblkp;
15523 15380 uint_t shw_mask, newshw_mask;
15524 15381 caddr_t vaddr;
15525 15382 int size;
15526 15383 cpuset_t cpuset = cpu_ready_set;
15527 15384
15528 15385 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15529 15386
15530 15387 if (hmebp->hmeblkp == hmeblkp) {
15531 15388 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15532 15389 hmebp->hmeblkp = hmeblkp->hblk_next;
15533 15390 } else {
15534 15391 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15535 15392 pr_hblk->hblk_next = hmeblkp->hblk_next;
15536 15393 }
15537 15394
15538 15395 size = get_hblk_ttesz(hmeblkp);
15539 15396 shw_hblkp = hmeblkp->hblk_shadow;
15540 15397 if (shw_hblkp) {
15541 15398 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15542 15399 ASSERT(!hmeblkp->hblk_shared);
15543 15400 #ifdef DEBUG
15544 15401 if (mmu_page_sizes == max_mmu_page_sizes) {
15545 15402 ASSERT(size < TTE256M);
15546 15403 } else {
15547 15404 ASSERT(size < TTE4M);
15548 15405 }
15549 15406 #endif /* DEBUG */
15550 15407
15551 15408 shw_size = get_hblk_ttesz(shw_hblkp);
15552 15409 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15553 15410 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15554 15411 ASSERT(vshift < 8);
15555 15412 /*
15556 15413 * Atomically clear shadow mask bit
15557 15414 */
15558 15415 do {
15559 15416 shw_mask = shw_hblkp->hblk_shw_mask;
15560 15417 ASSERT(shw_mask & (1 << vshift));
15561 15418 newshw_mask = shw_mask & ~(1 << vshift);
15562 15419 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15563 15420 shw_mask, newshw_mask);
15564 15421 } while (newshw_mask != shw_mask);
15565 15422 hmeblkp->hblk_shadow = NULL;
15566 15423 }
15567 15424 hmeblkp->hblk_shw_bit = 0;
15568 15425
15569 15426 if (hmeblkp->hblk_shared) {
15570 15427 #ifdef DEBUG
15571 15428 sf_srd_t *srdp;
15572 15429 sf_region_t *rgnp;
15573 15430 uint_t rid;
15574 15431
15575 15432 srdp = hblktosrd(hmeblkp);
15576 15433 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15577 15434 rid = hmeblkp->hblk_tag.htag_rid;
15578 15435 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15579 15436 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15580 15437 rgnp = srdp->srd_hmergnp[rid];
15581 15438 ASSERT(rgnp != NULL);
15582 15439 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15583 15440 #endif /* DEBUG */
15584 15441 hmeblkp->hblk_shared = 0;
15585 15442 }
15586 15443 if (free_now) {
15587 15444 kpreempt_disable();
15588 15445 CPUSET_DEL(cpuset, CPU->cpu_id);
15589 15446 xt_sync(cpuset);
15590 15447 xt_sync(cpuset);
15591 15448 kpreempt_enable();
15592 15449
15593 15450 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15594 15451 hmeblkp->hblk_next = NULL;
15595 15452 } else {
15596 15453 /* Append hmeblkp to listp for processing later. */
15597 15454 hmeblkp->hblk_next = *listp;
15598 15455 *listp = hmeblkp;
15599 15456 }
15600 15457 }
15601 15458
15602 15459 /*
15603 15460 * This routine is called when memory is in short supply and returns a free
15604 15461 * hmeblk of the requested size from the cpu pending lists.
15605 15462 */
15606 15463 static struct hme_blk *
15607 15464 sfmmu_check_pending_hblks(int size)
15608 15465 {
15609 15466 int i;
15610 15467 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15611 15468 int found_hmeblk;
15612 15469 cpuset_t cpuset = cpu_ready_set;
15613 15470 cpu_hme_pend_t *cpuhp;
15614 15471
15615 15472 /* Flush cpu hblk pending queues */
15616 15473 for (i = 0; i < NCPU; i++) {
15617 15474 cpuhp = &cpu_hme_pend[i];
15618 15475 if (cpuhp->chp_listp != NULL) {
15619 15476 mutex_enter(&cpuhp->chp_mutex);
15620 15477 if (cpuhp->chp_listp == NULL) {
15621 15478 mutex_exit(&cpuhp->chp_mutex);
15622 15479 continue;
15623 15480 }
15624 15481 found_hmeblk = 0;
15625 15482 last_hmeblkp = NULL;
15626 15483 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15627 15484 hmeblkp = hmeblkp->hblk_next) {
15628 15485 if (get_hblk_ttesz(hmeblkp) == size) {
15629 15486 if (last_hmeblkp == NULL) {
15630 15487 cpuhp->chp_listp =
15631 15488 hmeblkp->hblk_next;
15632 15489 } else {
15633 15490 last_hmeblkp->hblk_next =
15634 15491 hmeblkp->hblk_next;
15635 15492 }
15636 15493 ASSERT(cpuhp->chp_count > 0);
15637 15494 cpuhp->chp_count--;
15638 15495 found_hmeblk = 1;
15639 15496 break;
15640 15497 } else {
15641 15498 last_hmeblkp = hmeblkp;
15642 15499 }
15643 15500 }
15644 15501 mutex_exit(&cpuhp->chp_mutex);
15645 15502
15646 15503 if (found_hmeblk) {
15647 15504 kpreempt_disable();
15648 15505 CPUSET_DEL(cpuset, CPU->cpu_id);
15649 15506 xt_sync(cpuset);
15650 15507 xt_sync(cpuset);
15651 15508 kpreempt_enable();
15652 15509 return (hmeblkp);
15653 15510 }
15654 15511 }
15655 15512 }
15656 15513 return (NULL);
15657 15514 }
↓ open down ↓ |
5834 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX