1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 */
27
28 /*
29 * VM - Hardware Address Translation management for Spitfire MMU.
30 *
31 * This file implements the machine specific hardware translation
32 * needed by the VM system. The machine independent interface is
33 * described in <vm/hat.h> while the machine dependent interface
34 * and data structures are described in <vm/hat_sfmmu.h>.
35 *
36 * The hat layer manages the address translation hardware as a cache
37 * driven by calls from the higher levels in the VM system.
38 */
39
40 #include <sys/types.h>
41 #include <sys/kstat.h>
42 #include <vm/hat.h>
43 #include <vm/hat_sfmmu.h>
44 #include <vm/page.h>
45 #include <sys/pte.h>
46 #include <sys/systm.h>
47 #include <sys/mman.h>
48 #include <sys/sysmacros.h>
49 #include <sys/machparam.h>
50 #include <sys/vtrace.h>
51 #include <sys/kmem.h>
52 #include <sys/mmu.h>
53 #include <sys/cmn_err.h>
54 #include <sys/cpu.h>
55 #include <sys/cpuvar.h>
56 #include <sys/debug.h>
57 #include <sys/lgrp.h>
58 #include <sys/archsystm.h>
59 #include <sys/machsystm.h>
60 #include <sys/vmsystm.h>
61 #include <vm/as.h>
62 #include <vm/seg.h>
63 #include <vm/seg_kp.h>
64 #include <vm/seg_kmem.h>
65 #include <vm/seg_kpm.h>
66 #include <vm/rm.h>
67 #include <sys/t_lock.h>
68 #include <sys/obpdefs.h>
69 #include <sys/vm_machparam.h>
70 #include <sys/var.h>
71 #include <sys/trap.h>
72 #include <sys/machtrap.h>
73 #include <sys/scb.h>
74 #include <sys/bitmap.h>
75 #include <sys/machlock.h>
76 #include <sys/membar.h>
77 #include <sys/atomic.h>
78 #include <sys/cpu_module.h>
79 #include <sys/prom_debug.h>
80 #include <sys/ksynch.h>
81 #include <sys/mem_config.h>
82 #include <sys/mem_cage.h>
83 #include <vm/vm_dep.h>
84 #include <sys/fpu/fpusystm.h>
85 #include <vm/mach_kpm.h>
86 #include <sys/callb.h>
87
88 #ifdef DEBUG
89 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \
90 if (SFMMU_IS_SHMERID_VALID(rid)) { \
91 caddr_t _eaddr = (saddr) + (len); \
92 sf_srd_t *_srdp; \
93 sf_region_t *_rgnp; \
94 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
95 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \
96 ASSERT((hat) != ksfmmup); \
97 _srdp = (hat)->sfmmu_srdp; \
98 ASSERT(_srdp != NULL); \
99 ASSERT(_srdp->srd_refcnt != 0); \
100 _rgnp = _srdp->srd_hmergnp[(rid)]; \
101 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \
102 ASSERT(_rgnp->rgn_refcnt != 0); \
103 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \
104 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
105 SFMMU_REGION_HME); \
106 ASSERT((saddr) >= _rgnp->rgn_saddr); \
107 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \
108 ASSERT(_eaddr > _rgnp->rgn_saddr); \
109 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \
110 }
111
112 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \
113 { \
114 caddr_t _hsva; \
115 caddr_t _heva; \
116 caddr_t _rsva; \
117 caddr_t _reva; \
118 int _ttesz = get_hblk_ttesz(hmeblkp); \
119 int _flagtte; \
120 ASSERT((srdp)->srd_refcnt != 0); \
121 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
122 ASSERT((rgnp)->rgn_id == rid); \
123 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \
124 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
125 SFMMU_REGION_HME); \
126 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \
127 _hsva = (caddr_t)get_hblk_base(hmeblkp); \
128 _heva = get_hblk_endaddr(hmeblkp); \
129 _rsva = (caddr_t)P2ALIGN( \
130 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \
131 _reva = (caddr_t)P2ROUNDUP( \
132 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \
133 HBLK_MIN_BYTES); \
134 ASSERT(_hsva >= _rsva); \
135 ASSERT(_hsva < _reva); \
136 ASSERT(_heva > _rsva); \
137 ASSERT(_heva <= _reva); \
138 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
139 _ttesz; \
140 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \
141 }
142
143 #else /* DEBUG */
144 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
145 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
146 #endif /* DEBUG */
147
148 #if defined(SF_ERRATA_57)
149 extern caddr_t errata57_limit;
150 #endif
151
152 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \
153 (sizeof (int64_t)))
154 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve)
155
156 #define HBLK_RESERVE_CNT 128
157 #define HBLK_RESERVE_MIN 20
158
159 static struct hme_blk *freehblkp;
160 static kmutex_t freehblkp_lock;
161 static int freehblkcnt;
162
163 static int64_t hblk_reserve[HME8BLK_SZ_RND];
164 static kmutex_t hblk_reserve_lock;
165 static kthread_t *hblk_reserve_thread;
166
167 static nucleus_hblk8_info_t nucleus_hblk8;
168 static nucleus_hblk1_info_t nucleus_hblk1;
169
170 /*
171 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
172 * after the initial phase of removing an hmeblk from the hash chain, see
173 * the detailed comment in sfmmu_hblk_hash_rm() for further details.
174 */
175 static cpu_hme_pend_t *cpu_hme_pend;
176 static uint_t cpu_hme_pend_thresh;
177 /*
178 * SFMMU specific hat functions
179 */
180 void hat_pagecachectl(struct page *, int);
181
182 /* flags for hat_pagecachectl */
183 #define HAT_CACHE 0x1
184 #define HAT_UNCACHE 0x2
185 #define HAT_TMPNC 0x4
186
187 /*
188 * Flag to allow the creation of non-cacheable translations
189 * to system memory. It is off by default. At the moment this
190 * flag is used by the ecache error injector. The error injector
191 * will turn it on when creating such a translation then shut it
192 * off when it's finished.
193 */
194
195 int sfmmu_allow_nc_trans = 0;
196
197 /*
198 * Flag to disable large page support.
199 * value of 1 => disable all large pages.
200 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
201 *
202 * For example, use the value 0x4 to disable 512K pages.
203 *
204 */
205 #define LARGE_PAGES_OFF 0x1
206
207 /*
208 * The disable_large_pages and disable_ism_large_pages variables control
209 * hat_memload_array and the page sizes to be used by ISM and the kernel.
210 *
211 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
212 * are only used to control which OOB pages to use at upper VM segment creation
213 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
214 * Their values may come from platform or CPU specific code to disable page
215 * sizes that should not be used.
216 *
217 * WARNING: 512K pages are currently not supported for ISM/DISM.
218 */
219 uint_t disable_large_pages = 0;
220 uint_t disable_ism_large_pages = (1 << TTE512K);
221 uint_t disable_auto_data_large_pages = 0;
222 uint_t disable_auto_text_large_pages = 0;
223
224 /*
225 * Private sfmmu data structures for hat management
226 */
227 static struct kmem_cache *sfmmuid_cache;
228 static struct kmem_cache *mmuctxdom_cache;
229
230 /*
231 * Private sfmmu data structures for tsb management
232 */
233 static struct kmem_cache *sfmmu_tsbinfo_cache;
234 static struct kmem_cache *sfmmu_tsb8k_cache;
235 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
236 static vmem_t *kmem_bigtsb_arena;
237 static vmem_t *kmem_tsb_arena;
238
239 /*
240 * sfmmu static variables for hmeblk resource management.
241 */
242 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
243 static struct kmem_cache *sfmmu8_cache;
244 static struct kmem_cache *sfmmu1_cache;
245 static struct kmem_cache *pa_hment_cache;
246
247 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
248 /*
249 * private data for ism
250 */
251 static struct kmem_cache *ism_blk_cache;
252 static struct kmem_cache *ism_ment_cache;
253 #define ISMID_STARTADDR NULL
254
255 /*
256 * Region management data structures and function declarations.
257 */
258
259 static void sfmmu_leave_srd(sfmmu_t *);
260 static int sfmmu_srdcache_constructor(void *, void *, int);
261 static void sfmmu_srdcache_destructor(void *, void *);
262 static int sfmmu_rgncache_constructor(void *, void *, int);
263 static void sfmmu_rgncache_destructor(void *, void *);
264 static int sfrgnmap_isnull(sf_region_map_t *);
265 static int sfhmergnmap_isnull(sf_hmeregion_map_t *);
266 static int sfmmu_scdcache_constructor(void *, void *, int);
267 static void sfmmu_scdcache_destructor(void *, void *);
268 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
269 size_t, void *, u_offset_t);
270
271 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
272 static sf_srd_bucket_t *srd_buckets;
273 static struct kmem_cache *srd_cache;
274 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
275 static struct kmem_cache *region_cache;
276 static struct kmem_cache *scd_cache;
277
278 #ifdef sun4v
279 int use_bigtsb_arena = 1;
280 #else
281 int use_bigtsb_arena = 0;
282 #endif
283
284 /* External /etc/system tunable, for turning on&off the shctx support */
285 int disable_shctx = 0;
286 /* Internal variable, set by MD if the HW supports shctx feature */
287 int shctx_on = 0;
288
289 #ifdef DEBUG
290 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
291 #endif
292 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
293 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
294
295 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
296 static void sfmmu_find_scd(sfmmu_t *);
297 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
298 static void sfmmu_finish_join_scd(sfmmu_t *);
299 static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
300 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
301 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
302 static void sfmmu_free_scd_tsbs(sfmmu_t *);
303 static void sfmmu_tsb_inv_ctx(sfmmu_t *);
304 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
305 static void sfmmu_ism_hatflags(sfmmu_t *, int);
306 static int sfmmu_srd_lock_held(sf_srd_t *);
307 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
308 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
309 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
310 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
311 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
312 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
313
314 /*
315 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
316 * HAT flags, synchronizing TLB/TSB coherency, and context management.
317 * The lock is hashed on the sfmmup since the case where we need to lock
318 * all processes is rare but does occur (e.g. we need to unload a shared
319 * mapping from all processes using the mapping). We have a lot of buckets,
320 * and each slab of sfmmu_t's can use about a quarter of them, giving us
321 * a fairly good distribution without wasting too much space and overhead
322 * when we have to grab them all.
323 */
324 #define SFMMU_NUM_LOCK 128 /* must be power of two */
325 hatlock_t hat_lock[SFMMU_NUM_LOCK];
326
327 /*
328 * Hash algorithm optimized for a small number of slabs.
329 * 7 is (highbit((sizeof sfmmu_t)) - 1)
330 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
331 * kmem_cache, and thus they will be sequential within that cache. In
332 * addition, each new slab will have a different "color" up to cache_maxcolor
333 * which will skew the hashing for each successive slab which is allocated.
334 * If the size of sfmmu_t changed to a larger size, this algorithm may need
335 * to be revisited.
336 */
337 #define TSB_HASH_SHIFT_BITS (7)
338 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
339
340 #ifdef DEBUG
341 int tsb_hash_debug = 0;
342 #define TSB_HASH(sfmmup) \
343 (tsb_hash_debug ? &hat_lock[0] : \
344 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
345 #else /* DEBUG */
346 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
347 #endif /* DEBUG */
348
349
350 /* sfmmu_replace_tsb() return codes. */
351 typedef enum tsb_replace_rc {
352 TSB_SUCCESS,
353 TSB_ALLOCFAIL,
354 TSB_LOSTRACE,
355 TSB_ALREADY_SWAPPED,
356 TSB_CANTGROW
357 } tsb_replace_rc_t;
358
359 /*
360 * Flags for TSB allocation routines.
361 */
362 #define TSB_ALLOC 0x01
363 #define TSB_FORCEALLOC 0x02
364 #define TSB_GROW 0x04
365 #define TSB_SHRINK 0x08
366 #define TSB_SWAPIN 0x10
367
368 /*
369 * Support for HAT callbacks.
370 */
371 #define SFMMU_MAX_RELOC_CALLBACKS 10
372 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
373 static id_t sfmmu_cb_nextid = 0;
374 static id_t sfmmu_tsb_cb_id;
375 struct sfmmu_callback *sfmmu_cb_table;
376
377 kmutex_t kpr_mutex;
378 kmutex_t kpr_suspendlock;
379 kthread_t *kreloc_thread;
380
381 /*
382 * Enable VA->PA translation sanity checking on DEBUG kernels.
383 * Disabled by default. This is incompatible with some
384 * drivers (error injector, RSM) so if it breaks you get
385 * to keep both pieces.
386 */
387 int hat_check_vtop = 0;
388
389 /*
390 * Private sfmmu routines (prototypes)
391 */
392 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
393 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
394 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
395 uint_t);
396 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
397 caddr_t, demap_range_t *, uint_t);
398 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
399 caddr_t, int);
400 static void sfmmu_hblk_free(struct hme_blk **);
401 static void sfmmu_hblks_list_purge(struct hme_blk **, int);
402 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t);
403 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t);
404 static struct hme_blk *sfmmu_hblk_steal(int);
405 static int sfmmu_steal_this_hblk(struct hmehash_bucket *,
406 struct hme_blk *, uint64_t, struct hme_blk *);
407 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
408
409 static void hat_do_memload_array(struct hat *, caddr_t, size_t,
410 struct page **, uint_t, uint_t, uint_t);
411 static void hat_do_memload(struct hat *, caddr_t, struct page *,
412 uint_t, uint_t, uint_t);
413 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
414 uint_t, uint_t, pgcnt_t, uint_t);
415 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
416 uint_t);
417 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
418 uint_t, uint_t);
419 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
420 caddr_t, int, uint_t);
421 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
422 struct hmehash_bucket *, caddr_t, uint_t, uint_t,
423 uint_t);
424 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
425 caddr_t, page_t **, uint_t, uint_t);
426 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
427
428 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
429 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
430 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
431 #ifdef VAC
432 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
433 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *);
434 int tst_tnc(page_t *pp, pgcnt_t);
435 void conv_tnc(page_t *pp, int);
436 #endif
437
438 static void sfmmu_get_ctx(sfmmu_t *);
439 static void sfmmu_free_sfmmu(sfmmu_t *);
440
441 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
442 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
443
444 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int);
445 static void hat_pagereload(struct page *, struct page *);
446 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
447 #ifdef VAC
448 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
449 static void sfmmu_page_cache(page_t *, int, int, int);
450 #endif
451
452 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
453 struct hme_blk *, int);
454 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
455 pfn_t, int, int, int, int);
456 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
457 pfn_t, int);
458 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
459 static void sfmmu_tlb_range_demap(demap_range_t *);
460 static void sfmmu_invalidate_ctx(sfmmu_t *);
461 static void sfmmu_sync_mmustate(sfmmu_t *);
462
463 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
464 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
465 sfmmu_t *);
466 static void sfmmu_tsb_free(struct tsb_info *);
467 static void sfmmu_tsbinfo_free(struct tsb_info *);
468 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
469 sfmmu_t *);
470 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
471 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
472 static int sfmmu_select_tsb_szc(pgcnt_t);
473 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
474 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
475 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
476 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \
477 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
478 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
479 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
480 hatlock_t *, uint_t);
481 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
482
483 #ifdef VAC
484 void sfmmu_cache_flush(pfn_t, int);
485 void sfmmu_cache_flushcolor(int, pfn_t);
486 #endif
487 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
488 caddr_t, demap_range_t *, uint_t, int);
489
490 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
491 static uint_t sfmmu_ptov_attr(tte_t *);
492 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
493 caddr_t, demap_range_t *, uint_t);
494 static uint_t sfmmu_vtop_prot(uint_t, uint_t *);
495 static int sfmmu_idcache_constructor(void *, void *, int);
496 static void sfmmu_idcache_destructor(void *, void *);
497 static int sfmmu_hblkcache_constructor(void *, void *, int);
498 static void sfmmu_hblkcache_destructor(void *, void *);
499 static void sfmmu_hblkcache_reclaim(void *);
500 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
501 struct hmehash_bucket *);
502 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
503 struct hme_blk *, struct hme_blk **, int);
504 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
505 uint64_t);
506 static struct hme_blk *sfmmu_check_pending_hblks(int);
507 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
508 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
509 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
510 int, caddr_t *);
511 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
512
513 static void sfmmu_rm_large_mappings(page_t *, int);
514
515 static void hat_lock_init(void);
516 static void hat_kstat_init(void);
517 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
518 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
519 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
520 static void sfmmu_check_page_sizes(sfmmu_t *, int);
521 int fnd_mapping_sz(page_t *);
522 static void iment_add(struct ism_ment *, struct hat *);
523 static void iment_sub(struct ism_ment *, struct hat *);
524 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc);
525 extern void sfmmu_setup_tsbinfo(sfmmu_t *);
526 extern void sfmmu_clear_utsbinfo(void);
527
528 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
529
530 extern int vpm_enable;
531
532 /* kpm globals */
533 #ifdef DEBUG
534 /*
535 * Enable trap level tsbmiss handling
536 */
537 int kpm_tsbmtl = 1;
538
539 /*
540 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
541 * required TLB shootdowns in this case, so handle w/ care. Off by default.
542 */
543 int kpm_tlb_flush;
544 #endif /* DEBUG */
545
546 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
547
548 #ifdef DEBUG
549 static void sfmmu_check_hblk_flist();
550 #endif
551
552 /*
553 * Semi-private sfmmu data structures. Some of them are initialize in
554 * startup or in hat_init. Some of them are private but accessed by
555 * assembly code or mach_sfmmu.c
556 */
557 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */
558 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */
559 uint64_t uhme_hash_pa; /* PA of uhme_hash */
560 uint64_t khme_hash_pa; /* PA of khme_hash */
561 int uhmehash_num; /* # of buckets in user hash table */
562 int khmehash_num; /* # of buckets in kernel hash table */
563
564 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */
565 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */
566 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */
567
568 #define DEFAULT_NUM_CTXS_PER_MMU 8192
569 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
570
571 int cache; /* describes system cache */
572
573 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */
574 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */
575 int ktsb_szcode; /* kernel 8k-indexed tsb size code */
576 int ktsb_sz; /* kernel 8k-indexed tsb size */
577
578 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */
579 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */
580 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */
581 int ktsb4m_sz; /* kernel 4m-indexed tsb size */
582
583 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */
584 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */
585 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */
586 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */
587
588 #ifndef sun4v
589 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */
590 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
591 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */
592 caddr_t utsb_vabase; /* reserved kernel virtual memory */
593 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */
594 #endif /* sun4v */
595 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */
596 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */
597 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
598
599 /*
600 * Size to use for TSB slabs. Future platforms that support page sizes
601 * larger than 4M may wish to change these values, and provide their own
602 * assembly macros for building and decoding the TSB base register contents.
603 * Note disable_large_pages will override the value set here.
604 */
605 static uint_t tsb_slab_ttesz = TTE4M;
606 size_t tsb_slab_size = MMU_PAGESIZE4M;
607 uint_t tsb_slab_shift = MMU_PAGESHIFT4M;
608 /* PFN mask for TTE */
609 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
610
611 /*
612 * Size to use for TSB slabs. These are used only when 256M tsb arenas
613 * exist.
614 */
615 static uint_t bigtsb_slab_ttesz = TTE256M;
616 static size_t bigtsb_slab_size = MMU_PAGESIZE256M;
617 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M;
618 /* 256M page alignment for 8K pfn */
619 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
620
621 /* largest TSB size to grow to, will be smaller on smaller memory systems */
622 static int tsb_max_growsize = 0;
623
624 /*
625 * Tunable parameters dealing with TSB policies.
626 */
627
628 /*
629 * This undocumented tunable forces all 8K TSBs to be allocated from
630 * the kernel heap rather than from the kmem_tsb_default_arena arenas.
631 */
632 #ifdef DEBUG
633 int tsb_forceheap = 0;
634 #endif /* DEBUG */
635
636 /*
637 * Decide whether to use per-lgroup arenas, or one global set of
638 * TSB arenas. The default is not to break up per-lgroup, since
639 * most platforms don't recognize any tangible benefit from it.
640 */
641 int tsb_lgrp_affinity = 0;
642
643 /*
644 * Used for growing the TSB based on the process RSS.
645 * tsb_rss_factor is based on the smallest TSB, and is
646 * shifted by the TSB size to determine if we need to grow.
647 * The default will grow the TSB if the number of TTEs for
648 * this page size exceeds 75% of the number of TSB entries,
649 * which should _almost_ eliminate all conflict misses
650 * (at the expense of using up lots and lots of memory).
651 */
652 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
653 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc)
654 #define SELECT_TSB_SIZECODE(pgcnt) ( \
655 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
656 default_tsb_size)
657 #define TSB_OK_SHRINK() \
658 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
659 #define TSB_OK_GROW() \
660 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
661
662 int enable_tsb_rss_sizing = 1;
663 int tsb_rss_factor = (int)TSB_RSS_FACTOR;
664
665 /* which TSB size code to use for new address spaces or if rss sizing off */
666 int default_tsb_size = TSB_8K_SZCODE;
667
668 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
669 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
670 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32
671
672 #ifdef DEBUG
673 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
674 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
675 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */
676 static int tsb_alloc_fail_mtbf = 0;
677 static int tsb_alloc_count = 0;
678 #endif /* DEBUG */
679
680 /* if set to 1, will remap valid TTEs when growing TSB. */
681 int tsb_remap_ttes = 1;
682
683 /*
684 * If we have more than this many mappings, allocate a second TSB.
685 * This default is chosen because the I/D fully associative TLBs are
686 * assumed to have at least 8 available entries. Platforms with a
687 * larger fully-associative TLB could probably override the default.
688 */
689
690 #ifdef sun4v
691 int tsb_sectsb_threshold = 0;
692 #else
693 int tsb_sectsb_threshold = 8;
694 #endif
695
696 /*
697 * kstat data
698 */
699 struct sfmmu_global_stat sfmmu_global_stat;
700 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
701
702 /*
703 * Global data
704 */
705 sfmmu_t *ksfmmup; /* kernel's hat id */
706
707 #ifdef DEBUG
708 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
709 #endif
710
711 /* sfmmu locking operations */
712 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
713 static int sfmmu_mlspl_held(struct page *, int);
714
715 kmutex_t *sfmmu_page_enter(page_t *);
716 void sfmmu_page_exit(kmutex_t *);
717 int sfmmu_page_spl_held(struct page *);
718
719 /* sfmmu internal locking operations - accessed directly */
720 static void sfmmu_mlist_reloc_enter(page_t *, page_t *,
721 kmutex_t **, kmutex_t **);
722 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
723 static hatlock_t *
724 sfmmu_hat_enter(sfmmu_t *);
725 static hatlock_t *
726 sfmmu_hat_tryenter(sfmmu_t *);
727 static void sfmmu_hat_exit(hatlock_t *);
728 static void sfmmu_hat_lock_all(void);
729 static void sfmmu_hat_unlock_all(void);
730 static void sfmmu_ismhat_enter(sfmmu_t *, int);
731 static void sfmmu_ismhat_exit(sfmmu_t *, int);
732
733 kpm_hlk_t *kpmp_table;
734 uint_t kpmp_table_sz; /* must be a power of 2 */
735 uchar_t kpmp_shift;
736
737 kpm_shlk_t *kpmp_stable;
738 uint_t kpmp_stable_sz; /* must be a power of 2 */
739
740 /*
741 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
742 * SPL_SHIFT is log2(SPL_TABLE_SIZE).
743 */
744 #if ((2*NCPU_P2) > 128)
745 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1))
746 #else
747 #define SPL_SHIFT 7U
748 #endif
749 #define SPL_TABLE_SIZE (1U << SPL_SHIFT)
750 #define SPL_MASK (SPL_TABLE_SIZE - 1)
751
752 /*
753 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
754 * and by multiples of SPL_SHIFT to get as many varied bits as we can.
755 */
756 #define SPL_INDEX(pp) \
757 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \
758 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
761 SPL_MASK)
762
763 #define SPL_HASH(pp) \
764 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
765
766 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE];
767
768 /* Array of mutexes protecting a page's mapping list and p_nrm field. */
769
770 #define MML_TABLE_SIZE SPL_TABLE_SIZE
771 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex)
772
773 static pad_mutex_t mml_table[MML_TABLE_SIZE];
774
775 /*
776 * hat_unload_callback() will group together callbacks in order
777 * to avoid xt_sync() calls. This is the maximum size of the group.
778 */
779 #define MAX_CB_ADDR 32
780
781 tte_t hw_tte;
782 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
783
784 static char *mmu_ctx_kstat_names[] = {
785 "mmu_ctx_tsb_exceptions",
786 "mmu_ctx_tsb_raise_exception",
787 "mmu_ctx_wrap_around",
788 };
789
790 /*
791 * Wrapper for vmem_xalloc since vmem_create only allows limited
792 * parameters for vm_source_alloc functions. This function allows us
793 * to specify alignment consistent with the size of the object being
794 * allocated.
795 */
796 static void *
797 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
798 {
799 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
800 }
801
802 /* Common code for setting tsb_alloc_hiwater. */
803 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \
804 ptob(pages) / tsb_alloc_hiwater_factor
805
806 /*
807 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
808 * a single TSB. physmem is the number of physical pages so we need physmem 8K
809 * TTEs to represent all those physical pages. We round this up by using
810 * 1<<highbit(). To figure out which size code to use, remember that the size
811 * code is just an amount to shift the smallest TSB size to get the size of
812 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or
813 * highbit() - 1) to get the size code for the smallest TSB that can represent
814 * all of physical memory, while erring on the side of too much.
815 *
816 * Restrict tsb_max_growsize to make sure that:
817 * 1) TSBs can't grow larger than the TSB slab size
818 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE.
819 */
820 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \
821 int _i, _szc, _slabszc, _tsbszc; \
822 \
823 _i = highbit(pages); \
824 if ((1 << (_i - 1)) == (pages)) \
825 _i--; /* 2^n case, round down */ \
826 _szc = _i - TSB_START_SIZE; \
827 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
828 _tsbszc = MIN(_szc, _slabszc); \
829 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \
830 }
831
832 /*
833 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
834 * tsb_info which handles that TTE size.
835 */
836 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \
837 (tsbinfop) = (sfmmup)->sfmmu_tsb; \
838 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \
839 sfmmu_hat_lock_held(sfmmup)); \
840 if ((tte_szc) >= TTE4M) { \
841 ASSERT((tsbinfop) != NULL); \
842 (tsbinfop) = (tsbinfop)->tsb_next; \
843 } \
844 }
845
846 /*
847 * Macro to use to unload entries from the TSB.
848 * It has knowledge of which page sizes get replicated in the TSB
849 * and will call the appropriate unload routine for the appropriate size.
850 */
851 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
852 { \
853 int ttesz = get_hblk_ttesz(hmeblkp); \
854 if (ttesz == TTE8K || ttesz == TTE4M) { \
855 sfmmu_unload_tsb(sfmmup, addr, ttesz); \
856 } else { \
857 caddr_t sva = ismhat ? addr : \
858 (caddr_t)get_hblk_base(hmeblkp); \
859 caddr_t eva = sva + get_hblk_span(hmeblkp); \
860 ASSERT(addr >= sva && addr < eva); \
861 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \
862 } \
863 }
864
865
866 /* Update tsb_alloc_hiwater after memory is configured. */
867 /*ARGSUSED*/
868 static void
869 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
870 {
871 /* Assumes physmem has already been updated. */
872 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
873 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
874 }
875
876 /*
877 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here
878 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
879 * deleted.
880 */
881 /*ARGSUSED*/
882 static int
883 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
884 {
885 return (0);
886 }
887
888 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
889 /*ARGSUSED*/
890 static void
891 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
892 {
893 /*
894 * Whether the delete was cancelled or not, just go ahead and update
895 * tsb_alloc_hiwater and tsb_max_growsize.
896 */
897 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
898 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
899 }
900
901 static kphysm_setup_vector_t sfmmu_update_vec = {
902 KPHYSM_SETUP_VECTOR_VERSION, /* version */
903 sfmmu_update_post_add, /* post_add */
904 sfmmu_update_pre_del, /* pre_del */
905 sfmmu_update_post_del /* post_del */
906 };
907
908
909 /*
910 * HME_BLK HASH PRIMITIVES
911 */
912
913 /*
914 * Enter a hme on the mapping list for page pp.
915 * When large pages are more prevalent in the system we might want to
916 * keep the mapping list in ascending order by the hment size. For now,
917 * small pages are more frequent, so don't slow it down.
918 */
919 #define HME_ADD(hme, pp) \
920 { \
921 ASSERT(sfmmu_mlist_held(pp)); \
922 \
923 hme->hme_prev = NULL; \
924 hme->hme_next = pp->p_mapping; \
925 hme->hme_page = pp; \
926 if (pp->p_mapping) { \
927 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
928 ASSERT(pp->p_share > 0); \
929 } else { \
930 /* EMPTY */ \
931 ASSERT(pp->p_share == 0); \
932 } \
933 pp->p_mapping = hme; \
934 pp->p_share++; \
935 }
936
937 /*
938 * Enter a hme on the mapping list for page pp.
939 * If we are unmapping a large translation, we need to make sure that the
940 * change is reflect in the corresponding bit of the p_index field.
941 */
942 #define HME_SUB(hme, pp) \
943 { \
944 ASSERT(sfmmu_mlist_held(pp)); \
945 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \
946 \
947 if (pp->p_mapping == NULL) { \
948 panic("hme_remove - no mappings"); \
949 } \
950 \
951 membar_stst(); /* ensure previous stores finish */ \
952 \
953 ASSERT(pp->p_share > 0); \
954 pp->p_share--; \
955 \
956 if (hme->hme_prev) { \
957 ASSERT(pp->p_mapping != hme); \
958 ASSERT(hme->hme_prev->hme_page == pp || \
959 IS_PAHME(hme->hme_prev)); \
960 hme->hme_prev->hme_next = hme->hme_next; \
961 } else { \
962 ASSERT(pp->p_mapping == hme); \
963 pp->p_mapping = hme->hme_next; \
964 ASSERT((pp->p_mapping == NULL) ? \
965 (pp->p_share == 0) : 1); \
966 } \
967 \
968 if (hme->hme_next) { \
969 ASSERT(hme->hme_next->hme_page == pp || \
970 IS_PAHME(hme->hme_next)); \
971 hme->hme_next->hme_prev = hme->hme_prev; \
972 } \
973 \
974 /* zero out the entry */ \
975 hme->hme_next = NULL; \
976 hme->hme_prev = NULL; \
977 hme->hme_page = NULL; \
978 \
979 if (hme_size(hme) > TTE8K) { \
980 /* remove mappings for remainder of large pg */ \
981 sfmmu_rm_large_mappings(pp, hme_size(hme)); \
982 } \
983 }
984
985 /*
986 * This function returns the hment given the hme_blk and a vaddr.
987 * It assumes addr has already been checked to belong to hme_blk's
988 * range.
989 */
990 #define HBLKTOHME(hment, hmeblkp, addr) \
991 { \
992 int index; \
993 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
994 }
995
996 /*
997 * Version of HBLKTOHME that also returns the index in hmeblkp
998 * of the hment.
999 */
1000 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1001 { \
1002 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1003 \
1004 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1005 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1006 } else \
1007 idx = 0; \
1008 \
1009 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1010 }
1011
1012 /*
1013 * Disable any page sizes not supported by the CPU
1014 */
1015 void
1016 hat_init_pagesizes()
1017 {
1018 int i;
1019
1020 mmu_exported_page_sizes = 0;
1021 for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1022
1023 szc_2_userszc[i] = (uint_t)-1;
1024 userszc_2_szc[i] = (uint_t)-1;
1025
1026 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1027 disable_large_pages |= (1 << i);
1028 } else {
1029 szc_2_userszc[i] = mmu_exported_page_sizes;
1030 userszc_2_szc[mmu_exported_page_sizes] = i;
1031 mmu_exported_page_sizes++;
1032 }
1033 }
1034
1035 disable_ism_large_pages |= disable_large_pages;
1036 disable_auto_data_large_pages = disable_large_pages;
1037 disable_auto_text_large_pages = disable_large_pages;
1038
1039 /*
1040 * Initialize mmu-specific large page sizes.
1041 */
1042 if (&mmu_large_pages_disabled) {
1043 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1044 disable_ism_large_pages |=
1045 mmu_large_pages_disabled(HAT_LOAD_SHARE);
1046 disable_auto_data_large_pages |=
1047 mmu_large_pages_disabled(HAT_AUTO_DATA);
1048 disable_auto_text_large_pages |=
1049 mmu_large_pages_disabled(HAT_AUTO_TEXT);
1050 }
1051 }
1052
1053 /*
1054 * Initialize the hardware address translation structures.
1055 */
1056 void
1057 hat_init(void)
1058 {
1059 int i;
1060 uint_t sz;
1061 size_t size;
1062
1063 hat_lock_init();
1064 hat_kstat_init();
1065
1066 /*
1067 * Hardware-only bits in a TTE
1068 */
1069 MAKE_TTE_MASK(&hw_tte);
1070
1071 hat_init_pagesizes();
1072
1073 /* Initialize the hash locks */
1074 for (i = 0; i < khmehash_num; i++) {
1075 mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1076 MUTEX_DEFAULT, NULL);
1077 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1078 }
1079 for (i = 0; i < uhmehash_num; i++) {
1080 mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1081 MUTEX_DEFAULT, NULL);
1082 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1083 }
1084 khmehash_num--; /* make sure counter starts from 0 */
1085 uhmehash_num--; /* make sure counter starts from 0 */
1086
1087 /*
1088 * Allocate context domain structures.
1089 *
1090 * A platform may choose to modify max_mmu_ctxdoms in
1091 * set_platform_defaults(). If a platform does not define
1092 * a set_platform_defaults() or does not choose to modify
1093 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1094 *
1095 * For all platforms that have CPUs sharing MMUs, this
1096 * value must be defined.
1097 */
1098 if (max_mmu_ctxdoms == 0)
1099 max_mmu_ctxdoms = max_ncpus;
1100
1101 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1102 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1103
1104 /* mmu_ctx_t is 64 bytes aligned */
1105 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1106 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1107 /*
1108 * MMU context domain initialization for the Boot CPU.
1109 * This needs the context domains array allocated above.
1110 */
1111 mutex_enter(&cpu_lock);
1112 sfmmu_cpu_init(CPU);
1113 mutex_exit(&cpu_lock);
1114
1115 /*
1116 * Intialize ism mapping list lock.
1117 */
1118
1119 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1120
1121 /*
1122 * Each sfmmu structure carries an array of MMU context info
1123 * structures, one per context domain. The size of this array depends
1124 * on the maximum number of context domains. So, the size of the
1125 * sfmmu structure varies per platform.
1126 *
1127 * sfmmu is allocated from static arena, because trap
1128 * handler at TL > 0 is not allowed to touch kernel relocatable
1129 * memory. sfmmu's alignment is changed to 64 bytes from
1130 * default 8 bytes, as the lower 6 bits will be used to pass
1131 * pgcnt to vtag_flush_pgcnt_tl1.
1132 */
1133 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1134
1135 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1136 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1137 NULL, NULL, static_arena, 0);
1138
1139 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1140 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1141
1142 /*
1143 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1144 * from the heap when low on memory or when TSB_FORCEALLOC is
1145 * specified, don't use magazines to cache them--we want to return
1146 * them to the system as quickly as possible.
1147 */
1148 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1149 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1150 static_arena, KMC_NOMAGAZINE);
1151
1152 /*
1153 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1154 * memory, which corresponds to the old static reserve for TSBs.
1155 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of
1156 * memory we'll allocate for TSB slabs; beyond this point TSB
1157 * allocations will be taken from the kernel heap (via
1158 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1159 * consumer.
1160 */
1161 if (tsb_alloc_hiwater_factor == 0) {
1162 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1163 }
1164 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1165
1166 for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1167 if (!(disable_large_pages & (1 << sz)))
1168 break;
1169 }
1170
1171 if (sz < tsb_slab_ttesz) {
1172 tsb_slab_ttesz = sz;
1173 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1174 tsb_slab_size = 1 << tsb_slab_shift;
1175 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1176 use_bigtsb_arena = 0;
1177 } else if (use_bigtsb_arena &&
1178 (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1179 use_bigtsb_arena = 0;
1180 }
1181
1182 if (!use_bigtsb_arena) {
1183 bigtsb_slab_shift = tsb_slab_shift;
1184 }
1185 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1186
1187 /*
1188 * On smaller memory systems, allocate TSB memory in smaller chunks
1189 * than the default 4M slab size. We also honor disable_large_pages
1190 * here.
1191 *
1192 * The trap handlers need to be patched with the final slab shift,
1193 * since they need to be able to construct the TSB pointer at runtime.
1194 */
1195 if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1196 !(disable_large_pages & (1 << TTE512K))) {
1197 tsb_slab_ttesz = TTE512K;
1198 tsb_slab_shift = MMU_PAGESHIFT512K;
1199 tsb_slab_size = MMU_PAGESIZE512K;
1200 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1201 use_bigtsb_arena = 0;
1202 }
1203
1204 if (!use_bigtsb_arena) {
1205 bigtsb_slab_ttesz = tsb_slab_ttesz;
1206 bigtsb_slab_shift = tsb_slab_shift;
1207 bigtsb_slab_size = tsb_slab_size;
1208 bigtsb_slab_mask = tsb_slab_mask;
1209 }
1210
1211
1212 /*
1213 * Set up memory callback to update tsb_alloc_hiwater and
1214 * tsb_max_growsize.
1215 */
1216 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1217 ASSERT(i == 0);
1218
1219 /*
1220 * kmem_tsb_arena is the source from which large TSB slabs are
1221 * drawn. The quantum of this arena corresponds to the largest
1222 * TSB size we can dynamically allocate for user processes.
1223 * Currently it must also be a supported page size since we
1224 * use exactly one translation entry to map each slab page.
1225 *
1226 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1227 * which most TSBs are allocated. Since most TSB allocations are
1228 * typically 8K we have a kmem cache we stack on top of each
1229 * kmem_tsb_default_arena to speed up those allocations.
1230 *
1231 * Note the two-level scheme of arenas is required only
1232 * because vmem_create doesn't allow us to specify alignment
1233 * requirements. If this ever changes the code could be
1234 * simplified to use only one level of arenas.
1235 *
1236 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1237 * will be provided in addition to the 4M kmem_tsb_arena.
1238 */
1239 if (use_bigtsb_arena) {
1240 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1241 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1242 vmem_xfree, heap_arena, 0, VM_SLEEP);
1243 }
1244
1245 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1246 sfmmu_vmem_xalloc_aligned_wrapper,
1247 vmem_xfree, heap_arena, 0, VM_SLEEP);
1248
1249 if (tsb_lgrp_affinity) {
1250 char s[50];
1251 for (i = 0; i < NLGRPS_MAX; i++) {
1252 if (use_bigtsb_arena) {
1253 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1254 kmem_bigtsb_default_arena[i] = vmem_create(s,
1255 NULL, 0, 2 * tsb_slab_size,
1256 sfmmu_tsb_segkmem_alloc,
1257 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1258 0, VM_SLEEP | VM_BESTFIT);
1259 }
1260
1261 (void) sprintf(s, "kmem_tsb_lgrp%d", i);
1262 kmem_tsb_default_arena[i] = vmem_create(s,
1263 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1264 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1265 VM_SLEEP | VM_BESTFIT);
1266
1267 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1268 sfmmu_tsb_cache[i] = kmem_cache_create(s,
1269 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1270 kmem_tsb_default_arena[i], 0);
1271 }
1272 } else {
1273 if (use_bigtsb_arena) {
1274 kmem_bigtsb_default_arena[0] =
1275 vmem_create("kmem_bigtsb_default", NULL, 0,
1276 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1277 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1278 VM_SLEEP | VM_BESTFIT);
1279 }
1280
1281 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1282 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1283 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1284 VM_SLEEP | VM_BESTFIT);
1285 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1286 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1287 kmem_tsb_default_arena[0], 0);
1288 }
1289
1290 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1291 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1292 sfmmu_hblkcache_destructor,
1293 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1294 hat_memload_arena, KMC_NOHASH);
1295
1296 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1297 segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1298 VMC_DUMPSAFE | VM_SLEEP);
1299
1300 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1301 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1302 sfmmu_hblkcache_destructor,
1303 NULL, (void *)HME1BLK_SZ,
1304 hat_memload1_arena, KMC_NOHASH);
1305
1306 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1307 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1308
1309 ism_blk_cache = kmem_cache_create("ism_blk_cache",
1310 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1311 NULL, NULL, static_arena, KMC_NOHASH);
1312
1313 ism_ment_cache = kmem_cache_create("ism_ment_cache",
1314 sizeof (ism_ment_t), 0, NULL, NULL,
1315 NULL, NULL, NULL, 0);
1316
1317 /*
1318 * We grab the first hat for the kernel,
1319 */
1320 AS_LOCK_ENTER(&kas, RW_WRITER);
1321 kas.a_hat = hat_alloc(&kas);
1322 AS_LOCK_EXIT(&kas);
1323
1324 /*
1325 * Initialize hblk_reserve.
1326 */
1327 ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1328 va_to_pa((caddr_t)hblk_reserve);
1329
1330 #ifndef UTSB_PHYS
1331 /*
1332 * Reserve some kernel virtual address space for the locked TTEs
1333 * that allow us to probe the TSB from TL>0.
1334 */
1335 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1336 0, 0, NULL, NULL, VM_SLEEP);
1337 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1338 0, 0, NULL, NULL, VM_SLEEP);
1339 #endif
1340
1341 #ifdef VAC
1342 /*
1343 * The big page VAC handling code assumes VAC
1344 * will not be bigger than the smallest big
1345 * page- which is 64K.
1346 */
1347 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1348 cmn_err(CE_PANIC, "VAC too big!");
1349 }
1350 #endif
1351
1352 uhme_hash_pa = va_to_pa(uhme_hash);
1353 khme_hash_pa = va_to_pa(khme_hash);
1354
1355 /*
1356 * Initialize relocation locks. kpr_suspendlock is held
1357 * at PIL_MAX to prevent interrupts from pinning the holder
1358 * of a suspended TTE which may access it leading to a
1359 * deadlock condition.
1360 */
1361 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1362 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1363
1364 /*
1365 * If Shared context support is disabled via /etc/system
1366 * set shctx_on to 0 here if it was set to 1 earlier in boot
1367 * sequence by cpu module initialization code.
1368 */
1369 if (shctx_on && disable_shctx) {
1370 shctx_on = 0;
1371 }
1372
1373 if (shctx_on) {
1374 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1375 sizeof (srd_buckets[0]), KM_SLEEP);
1376 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1377 mutex_init(&srd_buckets[i].srdb_lock, NULL,
1378 MUTEX_DEFAULT, NULL);
1379 }
1380
1381 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1382 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1383 NULL, NULL, NULL, 0);
1384 region_cache = kmem_cache_create("region_cache",
1385 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1386 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1387 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1388 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor,
1389 NULL, NULL, NULL, 0);
1390 }
1391
1392 /*
1393 * Pre-allocate hrm_hashtab before enabling the collection of
1394 * refmod statistics. Allocating on the fly would mean us
1395 * running the risk of suffering recursive mutex enters or
1396 * deadlocks.
1397 */
1398 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1399 KM_SLEEP);
1400
1401 /* Allocate per-cpu pending freelist of hmeblks */
1402 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1403 KM_SLEEP);
1404 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1405 (uintptr_t)cpu_hme_pend, 64);
1406
1407 for (i = 0; i < NCPU; i++) {
1408 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1409 NULL);
1410 }
1411
1412 if (cpu_hme_pend_thresh == 0) {
1413 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1414 }
1415 }
1416
1417 /*
1418 * Initialize locking for the hat layer, called early during boot.
1419 */
1420 static void
1421 hat_lock_init()
1422 {
1423 int i;
1424
1425 /*
1426 * initialize the array of mutexes protecting a page's mapping
1427 * list and p_nrm field.
1428 */
1429 for (i = 0; i < MML_TABLE_SIZE; i++)
1430 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1431
1432 if (kpm_enable) {
1433 for (i = 0; i < kpmp_table_sz; i++) {
1434 mutex_init(&kpmp_table[i].khl_mutex, NULL,
1435 MUTEX_DEFAULT, NULL);
1436 }
1437 }
1438
1439 /*
1440 * Initialize array of mutex locks that protects sfmmu fields and
1441 * TSB lists.
1442 */
1443 for (i = 0; i < SFMMU_NUM_LOCK; i++)
1444 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1445 NULL);
1446 }
1447
1448 #define SFMMU_KERNEL_MAXVA \
1449 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1450
1451 /*
1452 * Allocate a hat structure.
1453 * Called when an address space first uses a hat.
1454 */
1455 struct hat *
1456 hat_alloc(struct as *as)
1457 {
1458 sfmmu_t *sfmmup;
1459 int i;
1460 uint64_t cnum;
1461 extern uint_t get_color_start(struct as *);
1462
1463 ASSERT(AS_WRITE_HELD(as));
1464 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1465 sfmmup->sfmmu_as = as;
1466 sfmmup->sfmmu_flags = 0;
1467 sfmmup->sfmmu_tteflags = 0;
1468 sfmmup->sfmmu_rtteflags = 0;
1469 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1470
1471 if (as == &kas) {
1472 ksfmmup = sfmmup;
1473 sfmmup->sfmmu_cext = 0;
1474 cnum = KCONTEXT;
1475
1476 sfmmup->sfmmu_clrstart = 0;
1477 sfmmup->sfmmu_tsb = NULL;
1478 /*
1479 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1480 * to setup tsb_info for ksfmmup.
1481 */
1482 } else {
1483
1484 /*
1485 * Just set to invalid ctx. When it faults, it will
1486 * get a valid ctx. This would avoid the situation
1487 * where we get a ctx, but it gets stolen and then
1488 * we fault when we try to run and so have to get
1489 * another ctx.
1490 */
1491 sfmmup->sfmmu_cext = 0;
1492 cnum = INVALID_CONTEXT;
1493
1494 /* initialize original physical page coloring bin */
1495 sfmmup->sfmmu_clrstart = get_color_start(as);
1496 #ifdef DEBUG
1497 if (tsb_random_size) {
1498 uint32_t randval = (uint32_t)gettick() >> 4;
1499 int size = randval % (tsb_max_growsize + 1);
1500
1501 /* chose a random tsb size for stress testing */
1502 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1503 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1504 } else
1505 #endif /* DEBUG */
1506 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1507 default_tsb_size,
1508 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1509 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1510 ASSERT(sfmmup->sfmmu_tsb != NULL);
1511 }
1512
1513 ASSERT(max_mmu_ctxdoms > 0);
1514 for (i = 0; i < max_mmu_ctxdoms; i++) {
1515 sfmmup->sfmmu_ctxs[i].cnum = cnum;
1516 sfmmup->sfmmu_ctxs[i].gnum = 0;
1517 }
1518
1519 for (i = 0; i < max_mmu_page_sizes; i++) {
1520 sfmmup->sfmmu_ttecnt[i] = 0;
1521 sfmmup->sfmmu_scdrttecnt[i] = 0;
1522 sfmmup->sfmmu_ismttecnt[i] = 0;
1523 sfmmup->sfmmu_scdismttecnt[i] = 0;
1524 sfmmup->sfmmu_pgsz[i] = TTE8K;
1525 }
1526 sfmmup->sfmmu_tsb0_4minflcnt = 0;
1527 sfmmup->sfmmu_iblk = NULL;
1528 sfmmup->sfmmu_ismhat = 0;
1529 sfmmup->sfmmu_scdhat = 0;
1530 sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1531 if (sfmmup == ksfmmup) {
1532 CPUSET_ALL(sfmmup->sfmmu_cpusran);
1533 } else {
1534 CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1535 }
1536 sfmmup->sfmmu_free = 0;
1537 sfmmup->sfmmu_rmstat = 0;
1538 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1539 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1540 sfmmup->sfmmu_srdp = NULL;
1541 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1542 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1543 sfmmup->sfmmu_scdp = NULL;
1544 sfmmup->sfmmu_scd_link.next = NULL;
1545 sfmmup->sfmmu_scd_link.prev = NULL;
1546 return (sfmmup);
1547 }
1548
1549 /*
1550 * Create per-MMU context domain kstats for a given MMU ctx.
1551 */
1552 static void
1553 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1554 {
1555 mmu_ctx_stat_t stat;
1556 kstat_t *mmu_kstat;
1557
1558 ASSERT(MUTEX_HELD(&cpu_lock));
1559 ASSERT(mmu_ctxp->mmu_kstat == NULL);
1560
1561 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1562 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1563
1564 if (mmu_kstat == NULL) {
1565 cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1566 mmu_ctxp->mmu_idx);
1567 } else {
1568 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1569 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1570 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1571 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1572 mmu_ctxp->mmu_kstat = mmu_kstat;
1573 kstat_install(mmu_kstat);
1574 }
1575 }
1576
1577 /*
1578 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1579 * context domain information for a given CPU. If a platform does not
1580 * specify that interface, then the function below is used instead to return
1581 * default information. The defaults are as follows:
1582 *
1583 * - The number of MMU context IDs supported on any CPU in the
1584 * system is 8K.
1585 * - There is one MMU context domain per CPU.
1586 */
1587 /*ARGSUSED*/
1588 static void
1589 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1590 {
1591 infop->mmu_nctxs = nctxs;
1592 infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1593 }
1594
1595 /*
1596 * Called during CPU initialization to set the MMU context-related information
1597 * for a CPU.
1598 *
1599 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1600 */
1601 void
1602 sfmmu_cpu_init(cpu_t *cp)
1603 {
1604 mmu_ctx_info_t info;
1605 mmu_ctx_t *mmu_ctxp;
1606
1607 ASSERT(MUTEX_HELD(&cpu_lock));
1608
1609 if (&plat_cpuid_to_mmu_ctx_info == NULL)
1610 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1611 else
1612 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1613
1614 ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1615
1616 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1617 /* Each mmu_ctx is cacheline aligned. */
1618 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1619 bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1620
1621 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1622 (void *)ipltospl(DISP_LEVEL));
1623 mmu_ctxp->mmu_idx = info.mmu_idx;
1624 mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1625 /*
1626 * Globally for lifetime of a system,
1627 * gnum must always increase.
1628 * mmu_saved_gnum is protected by the cpu_lock.
1629 */
1630 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1631 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1632
1633 sfmmu_mmu_kstat_create(mmu_ctxp);
1634
1635 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1636 } else {
1637 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1638 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1639 }
1640
1641 /*
1642 * The mmu_lock is acquired here to prevent races with
1643 * the wrap-around code.
1644 */
1645 mutex_enter(&mmu_ctxp->mmu_lock);
1646
1647
1648 mmu_ctxp->mmu_ncpus++;
1649 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1650 CPU_MMU_IDX(cp) = info.mmu_idx;
1651 CPU_MMU_CTXP(cp) = mmu_ctxp;
1652
1653 mutex_exit(&mmu_ctxp->mmu_lock);
1654 }
1655
1656 static void
1657 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1658 {
1659 ASSERT(MUTEX_HELD(&cpu_lock));
1660 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1661
1662 mutex_destroy(&mmu_ctxp->mmu_lock);
1663
1664 if (mmu_ctxp->mmu_kstat)
1665 kstat_delete(mmu_ctxp->mmu_kstat);
1666
1667 /* mmu_saved_gnum is protected by the cpu_lock. */
1668 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1669 mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1670
1671 kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1672 }
1673
1674 /*
1675 * Called to perform MMU context-related cleanup for a CPU.
1676 */
1677 void
1678 sfmmu_cpu_cleanup(cpu_t *cp)
1679 {
1680 mmu_ctx_t *mmu_ctxp;
1681
1682 ASSERT(MUTEX_HELD(&cpu_lock));
1683
1684 mmu_ctxp = CPU_MMU_CTXP(cp);
1685 ASSERT(mmu_ctxp != NULL);
1686
1687 /*
1688 * The mmu_lock is acquired here to prevent races with
1689 * the wrap-around code.
1690 */
1691 mutex_enter(&mmu_ctxp->mmu_lock);
1692
1693 CPU_MMU_CTXP(cp) = NULL;
1694
1695 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1696 if (--mmu_ctxp->mmu_ncpus == 0) {
1697 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1698 mutex_exit(&mmu_ctxp->mmu_lock);
1699 sfmmu_ctxdom_free(mmu_ctxp);
1700 return;
1701 }
1702
1703 mutex_exit(&mmu_ctxp->mmu_lock);
1704 }
1705
1706 uint_t
1707 sfmmu_ctxdom_nctxs(int idx)
1708 {
1709 return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1710 }
1711
1712 #ifdef sun4v
1713 /*
1714 * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1715 * consistant after suspend/resume on system that can resume on a different
1716 * hardware than it was suspended.
1717 *
1718 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1719 * from being allocated. It acquires all hat_locks, which blocks most access to
1720 * context data, except for a few cases that are handled separately or are
1721 * harmless. It wraps each domain to increment gnum and invalidate on-CPU
1722 * contexts, and forces cnum to its max. As a result of this call all user
1723 * threads that are running on CPUs trap and try to perform wrap around but
1724 * can't because hat_locks are taken. Threads that were not on CPUs but started
1725 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1726 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1727 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs
1728 * are paused, else it could deadlock acquiring locks held by paused CPUs.
1729 *
1730 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1731 * the CPUs that had them. It must be called after CPUs have been paused. This
1732 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1733 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1734 * runs with interrupts disabled. When CPUs are later resumed, they may enter
1735 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1736 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus
1737 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1738 * accessing the old context domains.
1739 *
1740 * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1741 * allocates new context domains based on hardware layout. It initializes
1742 * every CPU that had context domain before migration to have one again.
1743 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1744 * could deadlock acquiring locks held by paused CPUs.
1745 *
1746 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1747 * acquire new context ids and continue execution.
1748 *
1749 * Therefore functions should be called in the following order:
1750 * suspend_routine()
1751 * sfmmu_ctxdom_lock()
1752 * pause_cpus()
1753 * suspend()
1754 * if (suspend failed)
1755 * sfmmu_ctxdom_unlock()
1756 * ...
1757 * sfmmu_ctxdom_remove()
1758 * resume_cpus()
1759 * sfmmu_ctxdom_update()
1760 * sfmmu_ctxdom_unlock()
1761 */
1762 static cpuset_t sfmmu_ctxdoms_pset;
1763
1764 void
1765 sfmmu_ctxdoms_remove()
1766 {
1767 processorid_t id;
1768 cpu_t *cp;
1769
1770 /*
1771 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1772 * be restored post-migration. A CPU may be powered off and not have a
1773 * domain, for example.
1774 */
1775 CPUSET_ZERO(sfmmu_ctxdoms_pset);
1776
1777 for (id = 0; id < NCPU; id++) {
1778 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1779 CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1780 CPU_MMU_CTXP(cp) = NULL;
1781 }
1782 }
1783 }
1784
1785 void
1786 sfmmu_ctxdoms_lock(void)
1787 {
1788 int idx;
1789 mmu_ctx_t *mmu_ctxp;
1790
1791 sfmmu_hat_lock_all();
1792
1793 /*
1794 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1795 * hat_lock is always taken before calling it.
1796 *
1797 * For each domain, set mmu_cnum to max so no more contexts can be
1798 * allocated, and wrap to flush on-CPU contexts and force threads to
1799 * acquire a new context when we later drop hat_lock after migration.
1800 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1801 * but the latter uses CAS and will miscompare and not overwrite it.
1802 */
1803 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1804 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1805 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1806 mutex_enter(&mmu_ctxp->mmu_lock);
1807 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1808 /* make sure updated cnum visible */
1809 membar_enter();
1810 mutex_exit(&mmu_ctxp->mmu_lock);
1811 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1812 }
1813 }
1814 kpreempt_enable();
1815 }
1816
1817 void
1818 sfmmu_ctxdoms_unlock(void)
1819 {
1820 sfmmu_hat_unlock_all();
1821 }
1822
1823 void
1824 sfmmu_ctxdoms_update(void)
1825 {
1826 processorid_t id;
1827 cpu_t *cp;
1828 uint_t idx;
1829 mmu_ctx_t *mmu_ctxp;
1830
1831 /*
1832 * Free all context domains. As side effect, this increases
1833 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1834 * init gnum in the new domains, which therefore will be larger than the
1835 * sfmmu gnum for any process, guaranteeing that every process will see
1836 * a new generation and allocate a new context regardless of what new
1837 * domain it runs in.
1838 */
1839 mutex_enter(&cpu_lock);
1840
1841 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1842 if (mmu_ctxs_tbl[idx] != NULL) {
1843 mmu_ctxp = mmu_ctxs_tbl[idx];
1844 mmu_ctxs_tbl[idx] = NULL;
1845 sfmmu_ctxdom_free(mmu_ctxp);
1846 }
1847 }
1848
1849 for (id = 0; id < NCPU; id++) {
1850 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1851 (cp = cpu[id]) != NULL)
1852 sfmmu_cpu_init(cp);
1853 }
1854 mutex_exit(&cpu_lock);
1855 }
1856 #endif
1857
1858 /*
1859 * Hat_setup, makes an address space context the current active one.
1860 * In sfmmu this translates to setting the secondary context with the
1861 * corresponding context.
1862 */
1863 void
1864 hat_setup(struct hat *sfmmup, int allocflag)
1865 {
1866 hatlock_t *hatlockp;
1867
1868 /* Init needs some special treatment. */
1869 if (allocflag == HAT_INIT) {
1870 /*
1871 * Make sure that we have
1872 * 1. a TSB
1873 * 2. a valid ctx that doesn't get stolen after this point.
1874 */
1875 hatlockp = sfmmu_hat_enter(sfmmup);
1876
1877 /*
1878 * Swap in the TSB. hat_init() allocates tsbinfos without
1879 * TSBs, but we need one for init, since the kernel does some
1880 * special things to set up its stack and needs the TSB to
1881 * resolve page faults.
1882 */
1883 sfmmu_tsb_swapin(sfmmup, hatlockp);
1884
1885 sfmmu_get_ctx(sfmmup);
1886
1887 sfmmu_hat_exit(hatlockp);
1888 } else {
1889 ASSERT(allocflag == HAT_ALLOC);
1890
1891 hatlockp = sfmmu_hat_enter(sfmmup);
1892 kpreempt_disable();
1893
1894 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1895 /*
1896 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1897 * pagesize bits don't matter in this case since we are passing
1898 * INVALID_CONTEXT to it.
1899 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1900 */
1901 sfmmu_setctx_sec(INVALID_CONTEXT);
1902 sfmmu_clear_utsbinfo();
1903
1904 kpreempt_enable();
1905 sfmmu_hat_exit(hatlockp);
1906 }
1907 }
1908
1909 /*
1910 * Free all the translation resources for the specified address space.
1911 * Called from as_free when an address space is being destroyed.
1912 */
1913 void
1914 hat_free_start(struct hat *sfmmup)
1915 {
1916 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
1917 ASSERT(sfmmup != ksfmmup);
1918
1919 sfmmup->sfmmu_free = 1;
1920 if (sfmmup->sfmmu_scdp != NULL) {
1921 sfmmu_leave_scd(sfmmup, 0);
1922 }
1923
1924 ASSERT(sfmmup->sfmmu_scdp == NULL);
1925 }
1926
1927 void
1928 hat_free_end(struct hat *sfmmup)
1929 {
1930 int i;
1931
1932 ASSERT(sfmmup->sfmmu_free == 1);
1933 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1934 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1939
1940 if (sfmmup->sfmmu_rmstat) {
1941 hat_freestat(sfmmup->sfmmu_as, NULL);
1942 }
1943
1944 while (sfmmup->sfmmu_tsb != NULL) {
1945 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1946 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1947 sfmmup->sfmmu_tsb = next;
1948 }
1949
1950 if (sfmmup->sfmmu_srdp != NULL) {
1951 sfmmu_leave_srd(sfmmup);
1952 ASSERT(sfmmup->sfmmu_srdp == NULL);
1953 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1954 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1955 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1956 SFMMU_L2_HMERLINKS_SIZE);
1957 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1958 }
1959 }
1960 }
1961 sfmmu_free_sfmmu(sfmmup);
1962
1963 #ifdef DEBUG
1964 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1965 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1966 }
1967 #endif
1968
1969 kmem_cache_free(sfmmuid_cache, sfmmup);
1970 }
1971
1972 /*
1973 * Duplicate the translations of an as into another newas
1974 */
1975 /* ARGSUSED */
1976 int
1977 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
1978 uint_t flag)
1979 {
1980 sf_srd_t *srdp;
1981 sf_scd_t *scdp;
1982 int i;
1983 extern uint_t get_color_start(struct as *);
1984
1985 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
1986 (flag == HAT_DUP_SRD));
1987 ASSERT(hat != ksfmmup);
1988 ASSERT(newhat != ksfmmup);
1989 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
1990
1991 if (flag == HAT_DUP_COW) {
1992 panic("hat_dup: HAT_DUP_COW not supported");
1993 }
1994
1995 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
1996 ASSERT(srdp->srd_evp != NULL);
1997 VN_HOLD(srdp->srd_evp);
1998 ASSERT(srdp->srd_refcnt > 0);
1999 newhat->sfmmu_srdp = srdp;
2000 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2001 }
2002
2003 /*
2004 * HAT_DUP_ALL flag is used after as duplication is done.
2005 */
2006 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2007 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2008 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2009 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2010 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2011 }
2012
2013 /* check if need to join scd */
2014 if ((scdp = hat->sfmmu_scdp) != NULL &&
2015 newhat->sfmmu_scdp != scdp) {
2016 int ret;
2017 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2018 &scdp->scd_region_map, ret);
2019 ASSERT(ret);
2020 sfmmu_join_scd(scdp, newhat);
2021 ASSERT(newhat->sfmmu_scdp == scdp &&
2022 scdp->scd_refcnt >= 2);
2023 for (i = 0; i < max_mmu_page_sizes; i++) {
2024 newhat->sfmmu_ismttecnt[i] =
2025 hat->sfmmu_ismttecnt[i];
2026 newhat->sfmmu_scdismttecnt[i] =
2027 hat->sfmmu_scdismttecnt[i];
2028 }
2029 }
2030
2031 sfmmu_check_page_sizes(newhat, 1);
2032 }
2033
2034 if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2035 update_proc_pgcolorbase_after_fork != 0) {
2036 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2037 }
2038 return (0);
2039 }
2040
2041 void
2042 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2043 uint_t attr, uint_t flags)
2044 {
2045 hat_do_memload(hat, addr, pp, attr, flags,
2046 SFMMU_INVALID_SHMERID);
2047 }
2048
2049 void
2050 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2051 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2052 {
2053 uint_t rid;
2054 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2055 hat_do_memload(hat, addr, pp, attr, flags,
2056 SFMMU_INVALID_SHMERID);
2057 return;
2058 }
2059 rid = (uint_t)((uint64_t)rcookie);
2060 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2061 hat_do_memload(hat, addr, pp, attr, flags, rid);
2062 }
2063
2064 /*
2065 * Set up addr to map to page pp with protection prot.
2066 * As an optimization we also load the TSB with the
2067 * corresponding tte but it is no big deal if the tte gets kicked out.
2068 */
2069 static void
2070 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2071 uint_t attr, uint_t flags, uint_t rid)
2072 {
2073 tte_t tte;
2074
2075
2076 ASSERT(hat != NULL);
2077 ASSERT(PAGE_LOCKED(pp));
2078 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2079 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2080 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2081 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2082
2083 if (PP_ISFREE(pp)) {
2084 panic("hat_memload: loading a mapping to free page %p",
2085 (void *)pp);
2086 }
2087
2088 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2089
2090 if (flags & ~SFMMU_LOAD_ALLFLAG)
2091 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2092 flags & ~SFMMU_LOAD_ALLFLAG);
2093
2094 if (hat->sfmmu_rmstat)
2095 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2096
2097 #if defined(SF_ERRATA_57)
2098 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2099 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2100 !(flags & HAT_LOAD_SHARE)) {
2101 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2102 " page executable");
2103 attr &= ~PROT_EXEC;
2104 }
2105 #endif
2106
2107 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2108 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2109
2110 /*
2111 * Check TSB and TLB page sizes.
2112 */
2113 if ((flags & HAT_LOAD_SHARE) == 0) {
2114 sfmmu_check_page_sizes(hat, 1);
2115 }
2116 }
2117
2118 /*
2119 * hat_devload can be called to map real memory (e.g.
2120 * /dev/kmem) and even though hat_devload will determine pf is
2121 * for memory, it will be unable to get a shared lock on the
2122 * page (because someone else has it exclusively) and will
2123 * pass dp = NULL. If tteload doesn't get a non-NULL
2124 * page pointer it can't cache memory.
2125 */
2126 void
2127 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2128 uint_t attr, int flags)
2129 {
2130 tte_t tte;
2131 struct page *pp = NULL;
2132 int use_lgpg = 0;
2133
2134 ASSERT(hat != NULL);
2135
2136 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2137 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2138 ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2139 if (len == 0)
2140 panic("hat_devload: zero len");
2141 if (flags & ~SFMMU_LOAD_ALLFLAG)
2142 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2143 flags & ~SFMMU_LOAD_ALLFLAG);
2144
2145 #if defined(SF_ERRATA_57)
2146 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2147 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2148 !(flags & HAT_LOAD_SHARE)) {
2149 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2150 " page executable");
2151 attr &= ~PROT_EXEC;
2152 }
2153 #endif
2154
2155 /*
2156 * If it's a memory page find its pp
2157 */
2158 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2159 pp = page_numtopp_nolock(pfn);
2160 if (pp == NULL) {
2161 flags |= HAT_LOAD_NOCONSIST;
2162 } else {
2163 if (PP_ISFREE(pp)) {
2164 panic("hat_memload: loading "
2165 "a mapping to free page %p",
2166 (void *)pp);
2167 }
2168 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2169 panic("hat_memload: loading a mapping "
2170 "to unlocked relocatable page %p",
2171 (void *)pp);
2172 }
2173 ASSERT(len == MMU_PAGESIZE);
2174 }
2175 }
2176
2177 if (hat->sfmmu_rmstat)
2178 hat_resvstat(len, hat->sfmmu_as, addr);
2179
2180 if (flags & HAT_LOAD_NOCONSIST) {
2181 attr |= SFMMU_UNCACHEVTTE;
2182 use_lgpg = 1;
2183 }
2184 if (!pf_is_memory(pfn)) {
2185 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2186 use_lgpg = 1;
2187 switch (attr & HAT_ORDER_MASK) {
2188 case HAT_STRICTORDER:
2189 case HAT_UNORDERED_OK:
2190 /*
2191 * we set the side effect bit for all non
2192 * memory mappings unless merging is ok
2193 */
2194 attr |= SFMMU_SIDEFFECT;
2195 break;
2196 case HAT_MERGING_OK:
2197 case HAT_LOADCACHING_OK:
2198 case HAT_STORECACHING_OK:
2199 break;
2200 default:
2201 panic("hat_devload: bad attr");
2202 break;
2203 }
2204 }
2205 while (len) {
2206 if (!use_lgpg) {
2207 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2208 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2209 flags, SFMMU_INVALID_SHMERID);
2210 len -= MMU_PAGESIZE;
2211 addr += MMU_PAGESIZE;
2212 pfn++;
2213 continue;
2214 }
2215 /*
2216 * try to use large pages, check va/pa alignments
2217 * Note that 32M/256M page sizes are not (yet) supported.
2218 */
2219 if ((len >= MMU_PAGESIZE4M) &&
2220 !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2221 !(disable_large_pages & (1 << TTE4M)) &&
2222 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2223 sfmmu_memtte(&tte, pfn, attr, TTE4M);
2224 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2225 flags, SFMMU_INVALID_SHMERID);
2226 len -= MMU_PAGESIZE4M;
2227 addr += MMU_PAGESIZE4M;
2228 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2229 } else if ((len >= MMU_PAGESIZE512K) &&
2230 !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2231 !(disable_large_pages & (1 << TTE512K)) &&
2232 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2233 sfmmu_memtte(&tte, pfn, attr, TTE512K);
2234 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2235 flags, SFMMU_INVALID_SHMERID);
2236 len -= MMU_PAGESIZE512K;
2237 addr += MMU_PAGESIZE512K;
2238 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2239 } else if ((len >= MMU_PAGESIZE64K) &&
2240 !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2241 !(disable_large_pages & (1 << TTE64K)) &&
2242 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2243 sfmmu_memtte(&tte, pfn, attr, TTE64K);
2244 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2245 flags, SFMMU_INVALID_SHMERID);
2246 len -= MMU_PAGESIZE64K;
2247 addr += MMU_PAGESIZE64K;
2248 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2249 } else {
2250 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2251 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2252 flags, SFMMU_INVALID_SHMERID);
2253 len -= MMU_PAGESIZE;
2254 addr += MMU_PAGESIZE;
2255 pfn++;
2256 }
2257 }
2258
2259 /*
2260 * Check TSB and TLB page sizes.
2261 */
2262 if ((flags & HAT_LOAD_SHARE) == 0) {
2263 sfmmu_check_page_sizes(hat, 1);
2264 }
2265 }
2266
2267 void
2268 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2269 struct page **pps, uint_t attr, uint_t flags)
2270 {
2271 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2272 SFMMU_INVALID_SHMERID);
2273 }
2274
2275 void
2276 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2277 struct page **pps, uint_t attr, uint_t flags,
2278 hat_region_cookie_t rcookie)
2279 {
2280 uint_t rid;
2281 if (rcookie == HAT_INVALID_REGION_COOKIE) {
2282 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2283 SFMMU_INVALID_SHMERID);
2284 return;
2285 }
2286 rid = (uint_t)((uint64_t)rcookie);
2287 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2288 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2289 }
2290
2291 /*
2292 * Map the largest extend possible out of the page array. The array may NOT
2293 * be in order. The largest possible mapping a page can have
2294 * is specified in the p_szc field. The p_szc field
2295 * cannot change as long as there any mappings (large or small)
2296 * to any of the pages that make up the large page. (ie. any
2297 * promotion/demotion of page size is not up to the hat but up to
2298 * the page free list manager). The array
2299 * should consist of properly aligned contigous pages that are
2300 * part of a big page for a large mapping to be created.
2301 */
2302 static void
2303 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2304 struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2305 {
2306 int ttesz;
2307 size_t mapsz;
2308 pgcnt_t numpg, npgs;
2309 tte_t tte;
2310 page_t *pp;
2311 uint_t large_pages_disable;
2312
2313 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2314 SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2315
2316 if (hat->sfmmu_rmstat)
2317 hat_resvstat(len, hat->sfmmu_as, addr);
2318
2319 #if defined(SF_ERRATA_57)
2320 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2321 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2322 !(flags & HAT_LOAD_SHARE)) {
2323 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2324 "user page executable");
2325 attr &= ~PROT_EXEC;
2326 }
2327 #endif
2328
2329 /* Get number of pages */
2330 npgs = len >> MMU_PAGESHIFT;
2331
2332 if (flags & HAT_LOAD_SHARE) {
2333 large_pages_disable = disable_ism_large_pages;
2334 } else {
2335 large_pages_disable = disable_large_pages;
2336 }
2337
2338 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2339 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2340 rid);
2341 return;
2342 }
2343
2344 while (npgs >= NHMENTS) {
2345 pp = *pps;
2346 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2347 /*
2348 * Check if this page size is disabled.
2349 */
2350 if (large_pages_disable & (1 << ttesz))
2351 continue;
2352
2353 numpg = TTEPAGES(ttesz);
2354 mapsz = numpg << MMU_PAGESHIFT;
2355 if ((npgs >= numpg) &&
2356 IS_P2ALIGNED(addr, mapsz) &&
2357 IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2358 /*
2359 * At this point we have enough pages and
2360 * we know the virtual address and the pfn
2361 * are properly aligned. We still need
2362 * to check for physical contiguity but since
2363 * it is very likely that this is the case
2364 * we will assume they are so and undo
2365 * the request if necessary. It would
2366 * be great if we could get a hint flag
2367 * like HAT_CONTIG which would tell us
2368 * the pages are contigous for sure.
2369 */
2370 sfmmu_memtte(&tte, (*pps)->p_pagenum,
2371 attr, ttesz);
2372 if (!sfmmu_tteload_array(hat, &tte, addr,
2373 pps, flags, rid)) {
2374 break;
2375 }
2376 }
2377 }
2378 if (ttesz == TTE8K) {
2379 /*
2380 * We were not able to map array using a large page
2381 * batch a hmeblk or fraction at a time.
2382 */
2383 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2384 & (NHMENTS-1);
2385 numpg = NHMENTS - numpg;
2386 ASSERT(numpg <= npgs);
2387 mapsz = numpg * MMU_PAGESIZE;
2388 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2389 numpg, rid);
2390 }
2391 addr += mapsz;
2392 npgs -= numpg;
2393 pps += numpg;
2394 }
2395
2396 if (npgs) {
2397 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2398 rid);
2399 }
2400
2401 /*
2402 * Check TSB and TLB page sizes.
2403 */
2404 if ((flags & HAT_LOAD_SHARE) == 0) {
2405 sfmmu_check_page_sizes(hat, 1);
2406 }
2407 }
2408
2409 /*
2410 * Function tries to batch 8K pages into the same hme blk.
2411 */
2412 static void
2413 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2414 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2415 {
2416 tte_t tte;
2417 page_t *pp;
2418 struct hmehash_bucket *hmebp;
2419 struct hme_blk *hmeblkp;
2420 int index;
2421
2422 while (npgs) {
2423 /*
2424 * Acquire the hash bucket.
2425 */
2426 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2427 rid);
2428 ASSERT(hmebp);
2429
2430 /*
2431 * Find the hment block.
2432 */
2433 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2434 TTE8K, flags, rid);
2435 ASSERT(hmeblkp);
2436
2437 do {
2438 /*
2439 * Make the tte.
2440 */
2441 pp = *pps;
2442 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2443
2444 /*
2445 * Add the translation.
2446 */
2447 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2448 vaddr, pps, flags, rid);
2449
2450 /*
2451 * Goto next page.
2452 */
2453 pps++;
2454 npgs--;
2455
2456 /*
2457 * Goto next address.
2458 */
2459 vaddr += MMU_PAGESIZE;
2460
2461 /*
2462 * Don't crossover into a different hmentblk.
2463 */
2464 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2465 (NHMENTS-1));
2466
2467 } while (index != 0 && npgs != 0);
2468
2469 /*
2470 * Release the hash bucket.
2471 */
2472
2473 sfmmu_tteload_release_hashbucket(hmebp);
2474 }
2475 }
2476
2477 /*
2478 * Construct a tte for a page:
2479 *
2480 * tte_valid = 1
2481 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2482 * tte_size = size
2483 * tte_nfo = attr & HAT_NOFAULT
2484 * tte_ie = attr & HAT_STRUCTURE_LE
2485 * tte_hmenum = hmenum
2486 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2487 * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2488 * tte_ref = 1 (optimization)
2489 * tte_wr_perm = attr & PROT_WRITE;
2490 * tte_no_sync = attr & HAT_NOSYNC
2491 * tte_lock = attr & SFMMU_LOCKTTE
2492 * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2493 * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2494 * tte_e = attr & SFMMU_SIDEFFECT
2495 * tte_priv = !(attr & PROT_USER)
2496 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2497 * tte_glb = 0
2498 */
2499 void
2500 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2501 {
2502 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2503
2504 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2505 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2506
2507 if (TTE_IS_NOSYNC(ttep)) {
2508 TTE_SET_REF(ttep);
2509 if (TTE_IS_WRITABLE(ttep)) {
2510 TTE_SET_MOD(ttep);
2511 }
2512 }
2513 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2514 panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2515 }
2516 }
2517
2518 /*
2519 * This function will add a translation to the hme_blk and allocate the
2520 * hme_blk if one does not exist.
2521 * If a page structure is specified then it will add the
2522 * corresponding hment to the mapping list.
2523 * It will also update the hmenum field for the tte.
2524 *
2525 * Currently this function is only used for kernel mappings.
2526 * So pass invalid region to sfmmu_tteload_array().
2527 */
2528 void
2529 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2530 uint_t flags)
2531 {
2532 ASSERT(sfmmup == ksfmmup);
2533 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2534 SFMMU_INVALID_SHMERID);
2535 }
2536
2537 /*
2538 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2539 * Assumes that a particular page size may only be resident in one TSB.
2540 */
2541 static void
2542 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2543 {
2544 struct tsb_info *tsbinfop = NULL;
2545 uint64_t tag;
2546 struct tsbe *tsbe_addr;
2547 uint64_t tsb_base;
2548 uint_t tsb_size;
2549 int vpshift = MMU_PAGESHIFT;
2550 int phys = 0;
2551
2552 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2553 phys = ktsb_phys;
2554 if (ttesz >= TTE4M) {
2555 #ifndef sun4v
2556 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2557 #endif
2558 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2559 tsb_size = ktsb4m_szcode;
2560 } else {
2561 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2562 tsb_size = ktsb_szcode;
2563 }
2564 } else {
2565 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2566
2567 /*
2568 * If there isn't a TSB for this page size, or the TSB is
2569 * swapped out, there is nothing to do. Note that the latter
2570 * case seems impossible but can occur if hat_pageunload()
2571 * is called on an ISM mapping while the process is swapped
2572 * out.
2573 */
2574 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2575 return;
2576
2577 /*
2578 * If another thread is in the middle of relocating a TSB
2579 * we can't unload the entry so set a flag so that the
2580 * TSB will be flushed before it can be accessed by the
2581 * process.
2582 */
2583 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2584 if (ttep == NULL)
2585 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2586 return;
2587 }
2588 #if defined(UTSB_PHYS)
2589 phys = 1;
2590 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2591 #else
2592 tsb_base = (uint64_t)tsbinfop->tsb_va;
2593 #endif
2594 tsb_size = tsbinfop->tsb_szc;
2595 }
2596 if (ttesz >= TTE4M)
2597 vpshift = MMU_PAGESHIFT4M;
2598
2599 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2600 tag = sfmmu_make_tsbtag(vaddr);
2601
2602 if (ttep == NULL) {
2603 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2604 } else {
2605 if (ttesz >= TTE4M) {
2606 SFMMU_STAT(sf_tsb_load4m);
2607 } else {
2608 SFMMU_STAT(sf_tsb_load8k);
2609 }
2610
2611 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2612 }
2613 }
2614
2615 /*
2616 * Unmap all entries from [start, end) matching the given page size.
2617 *
2618 * This function is used primarily to unmap replicated 64K or 512K entries
2619 * from the TSB that are inserted using the base page size TSB pointer, but
2620 * it may also be called to unmap a range of addresses from the TSB.
2621 */
2622 void
2623 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2624 {
2625 struct tsb_info *tsbinfop;
2626 uint64_t tag;
2627 struct tsbe *tsbe_addr;
2628 caddr_t vaddr;
2629 uint64_t tsb_base;
2630 int vpshift, vpgsz;
2631 uint_t tsb_size;
2632 int phys = 0;
2633
2634 /*
2635 * Assumptions:
2636 * If ttesz == 8K, 64K or 512K, we walk through the range 8K
2637 * at a time shooting down any valid entries we encounter.
2638 *
2639 * If ttesz >= 4M we walk the range 4M at a time shooting
2640 * down any valid mappings we find.
2641 */
2642 if (sfmmup == ksfmmup) {
2643 phys = ktsb_phys;
2644 if (ttesz >= TTE4M) {
2645 #ifndef sun4v
2646 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2647 #endif
2648 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2649 tsb_size = ktsb4m_szcode;
2650 } else {
2651 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2652 tsb_size = ktsb_szcode;
2653 }
2654 } else {
2655 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2656
2657 /*
2658 * If there isn't a TSB for this page size, or the TSB is
2659 * swapped out, there is nothing to do. Note that the latter
2660 * case seems impossible but can occur if hat_pageunload()
2661 * is called on an ISM mapping while the process is swapped
2662 * out.
2663 */
2664 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2665 return;
2666
2667 /*
2668 * If another thread is in the middle of relocating a TSB
2669 * we can't unload the entry so set a flag so that the
2670 * TSB will be flushed before it can be accessed by the
2671 * process.
2672 */
2673 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2674 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2675 return;
2676 }
2677 #if defined(UTSB_PHYS)
2678 phys = 1;
2679 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2680 #else
2681 tsb_base = (uint64_t)tsbinfop->tsb_va;
2682 #endif
2683 tsb_size = tsbinfop->tsb_szc;
2684 }
2685 if (ttesz >= TTE4M) {
2686 vpshift = MMU_PAGESHIFT4M;
2687 vpgsz = MMU_PAGESIZE4M;
2688 } else {
2689 vpshift = MMU_PAGESHIFT;
2690 vpgsz = MMU_PAGESIZE;
2691 }
2692
2693 for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2694 tag = sfmmu_make_tsbtag(vaddr);
2695 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2696 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2697 }
2698 }
2699
2700 /*
2701 * Select the optimum TSB size given the number of mappings
2702 * that need to be cached.
2703 */
2704 static int
2705 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2706 {
2707 int szc = 0;
2708
2709 #ifdef DEBUG
2710 if (tsb_grow_stress) {
2711 uint32_t randval = (uint32_t)gettick() >> 4;
2712 return (randval % (tsb_max_growsize + 1));
2713 }
2714 #endif /* DEBUG */
2715
2716 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2717 szc++;
2718 return (szc);
2719 }
2720
2721 /*
2722 * This function will add a translation to the hme_blk and allocate the
2723 * hme_blk if one does not exist.
2724 * If a page structure is specified then it will add the
2725 * corresponding hment to the mapping list.
2726 * It will also update the hmenum field for the tte.
2727 * Furthermore, it attempts to create a large page translation
2728 * for <addr,hat> at page array pps. It assumes addr and first
2729 * pp is correctly aligned. It returns 0 if successful and 1 otherwise.
2730 */
2731 static int
2732 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2733 page_t **pps, uint_t flags, uint_t rid)
2734 {
2735 struct hmehash_bucket *hmebp;
2736 struct hme_blk *hmeblkp;
2737 int ret;
2738 uint_t size;
2739
2740 /*
2741 * Get mapping size.
2742 */
2743 size = TTE_CSZ(ttep);
2744 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2745
2746 /*
2747 * Acquire the hash bucket.
2748 */
2749 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2750 ASSERT(hmebp);
2751
2752 /*
2753 * Find the hment block.
2754 */
2755 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2756 rid);
2757 ASSERT(hmeblkp);
2758
2759 /*
2760 * Add the translation.
2761 */
2762 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2763 rid);
2764
2765 /*
2766 * Release the hash bucket.
2767 */
2768 sfmmu_tteload_release_hashbucket(hmebp);
2769
2770 return (ret);
2771 }
2772
2773 /*
2774 * Function locks and returns a pointer to the hash bucket for vaddr and size.
2775 */
2776 static struct hmehash_bucket *
2777 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2778 uint_t rid)
2779 {
2780 struct hmehash_bucket *hmebp;
2781 int hmeshift;
2782 void *htagid = sfmmutohtagid(sfmmup, rid);
2783
2784 ASSERT(htagid != NULL);
2785
2786 hmeshift = HME_HASH_SHIFT(size);
2787
2788 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2789
2790 SFMMU_HASH_LOCK(hmebp);
2791
2792 return (hmebp);
2793 }
2794
2795 /*
2796 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2797 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2798 * allocated.
2799 */
2800 static struct hme_blk *
2801 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2802 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2803 {
2804 hmeblk_tag hblktag;
2805 int hmeshift;
2806 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2807
2808 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2809
2810 hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2811 ASSERT(hblktag.htag_id != NULL);
2812 hmeshift = HME_HASH_SHIFT(size);
2813 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2814 hblktag.htag_rehash = HME_HASH_REHASH(size);
2815 hblktag.htag_rid = rid;
2816
2817 ttearray_realloc:
2818
2819 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2820
2821 /*
2822 * We block until hblk_reserve_lock is released; it's held by
2823 * the thread, temporarily using hblk_reserve, until hblk_reserve is
2824 * replaced by a hblk from sfmmu8_cache.
2825 */
2826 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
2827 hblk_reserve_thread != curthread) {
2828 SFMMU_HASH_UNLOCK(hmebp);
2829 mutex_enter(&hblk_reserve_lock);
2830 mutex_exit(&hblk_reserve_lock);
2831 SFMMU_STAT(sf_hblk_reserve_hit);
2832 SFMMU_HASH_LOCK(hmebp);
2833 goto ttearray_realloc;
2834 }
2835
2836 if (hmeblkp == NULL) {
2837 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
2838 hblktag, flags, rid);
2839 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2840 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2841 } else {
2842 /*
2843 * It is possible for 8k and 64k hblks to collide since they
2844 * have the same rehash value. This is because we
2845 * lazily free hblks and 8K/64K blks could be lingering.
2846 * If we find size mismatch we free the block and & try again.
2847 */
2848 if (get_hblk_ttesz(hmeblkp) != size) {
2849 ASSERT(!hmeblkp->hblk_vcnt);
2850 ASSERT(!hmeblkp->hblk_hmecnt);
2851 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2852 &list, 0);
2853 goto ttearray_realloc;
2854 }
2855 if (hmeblkp->hblk_shw_bit) {
2856 /*
2857 * if the hblk was previously used as a shadow hblk then
2858 * we will change it to a normal hblk
2859 */
2860 ASSERT(!hmeblkp->hblk_shared);
2861 if (hmeblkp->hblk_shw_mask) {
2862 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
2863 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
2864 goto ttearray_realloc;
2865 } else {
2866 hmeblkp->hblk_shw_bit = 0;
2867 }
2868 }
2869 SFMMU_STAT(sf_hblk_hit);
2870 }
2871
2872 /*
2873 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
2874 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
2875 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
2876 * just add these hmeblks to the per-cpu pending queue.
2877 */
2878 sfmmu_hblks_list_purge(&list, 1);
2879
2880 ASSERT(get_hblk_ttesz(hmeblkp) == size);
2881 ASSERT(!hmeblkp->hblk_shw_bit);
2882 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2883 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2884 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
2885
2886 return (hmeblkp);
2887 }
2888
2889 /*
2890 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
2891 * otherwise.
2892 */
2893 static int
2894 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
2895 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
2896 {
2897 page_t *pp = *pps;
2898 int hmenum, size, remap;
2899 tte_t tteold, flush_tte;
2900 #ifdef DEBUG
2901 tte_t orig_old;
2902 #endif /* DEBUG */
2903 struct sf_hment *sfhme;
2904 kmutex_t *pml, *pmtx;
2905 hatlock_t *hatlockp;
2906 int myflt;
2907
2908 /*
2909 * remove this panic when we decide to let user virtual address
2910 * space be >= USERLIMIT.
2911 */
2912 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
2913 panic("user addr %p in kernel space", (void *)vaddr);
2914 #if defined(TTE_IS_GLOBAL)
2915 if (TTE_IS_GLOBAL(ttep))
2916 panic("sfmmu_tteload: creating global tte");
2917 #endif
2918
2919 #ifdef DEBUG
2920 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
2921 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
2922 panic("sfmmu_tteload: non cacheable memory tte");
2923 #endif /* DEBUG */
2924
2925 /* don't simulate dirty bit for writeable ISM/DISM mappings */
2926 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
2927 TTE_SET_REF(ttep);
2928 TTE_SET_MOD(ttep);
2929 }
2930
2931 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
2932 !TTE_IS_MOD(ttep)) {
2933 /*
2934 * Don't load TSB for dummy as in ISM. Also don't preload
2935 * the TSB if the TTE isn't writable since we're likely to
2936 * fault on it again -- preloading can be fairly expensive.
2937 */
2938 flags |= SFMMU_NO_TSBLOAD;
2939 }
2940
2941 size = TTE_CSZ(ttep);
2942 switch (size) {
2943 case TTE8K:
2944 SFMMU_STAT(sf_tteload8k);
2945 break;
2946 case TTE64K:
2947 SFMMU_STAT(sf_tteload64k);
2948 break;
2949 case TTE512K:
2950 SFMMU_STAT(sf_tteload512k);
2951 break;
2952 case TTE4M:
2953 SFMMU_STAT(sf_tteload4m);
2954 break;
2955 case (TTE32M):
2956 SFMMU_STAT(sf_tteload32m);
2957 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
2958 break;
2959 case (TTE256M):
2960 SFMMU_STAT(sf_tteload256m);
2961 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
2962 break;
2963 }
2964
2965 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2966 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2967 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
2968 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
2969
2970 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
2971
2972 /*
2973 * Need to grab mlist lock here so that pageunload
2974 * will not change tte behind us.
2975 */
2976 if (pp) {
2977 pml = sfmmu_mlist_enter(pp);
2978 }
2979
2980 sfmmu_copytte(&sfhme->hme_tte, &tteold);
2981 /*
2982 * Look for corresponding hment and if valid verify
2983 * pfns are equal.
2984 */
2985 remap = TTE_IS_VALID(&tteold);
2986 if (remap) {
2987 pfn_t new_pfn, old_pfn;
2988
2989 old_pfn = TTE_TO_PFN(vaddr, &tteold);
2990 new_pfn = TTE_TO_PFN(vaddr, ttep);
2991
2992 if (flags & HAT_LOAD_REMAP) {
2993 /* make sure we are remapping same type of pages */
2994 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
2995 panic("sfmmu_tteload - tte remap io<->memory");
2996 }
2997 if (old_pfn != new_pfn &&
2998 (pp != NULL || sfhme->hme_page != NULL)) {
2999 panic("sfmmu_tteload - tte remap pp != NULL");
3000 }
3001 } else if (old_pfn != new_pfn) {
3002 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3003 (void *)hmeblkp);
3004 }
3005 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3006 }
3007
3008 if (pp) {
3009 if (size == TTE8K) {
3010 #ifdef VAC
3011 /*
3012 * Handle VAC consistency
3013 */
3014 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3015 sfmmu_vac_conflict(sfmmup, vaddr, pp);
3016 }
3017 #endif
3018
3019 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3020 pmtx = sfmmu_page_enter(pp);
3021 PP_CLRRO(pp);
3022 sfmmu_page_exit(pmtx);
3023 } else if (!PP_ISMAPPED(pp) &&
3024 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3025 pmtx = sfmmu_page_enter(pp);
3026 if (!(PP_ISMOD(pp))) {
3027 PP_SETRO(pp);
3028 }
3029 sfmmu_page_exit(pmtx);
3030 }
3031
3032 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3033 /*
3034 * sfmmu_pagearray_setup failed so return
3035 */
3036 sfmmu_mlist_exit(pml);
3037 return (1);
3038 }
3039 }
3040
3041 /*
3042 * Make sure hment is not on a mapping list.
3043 */
3044 ASSERT(remap || (sfhme->hme_page == NULL));
3045
3046 /* if it is not a remap then hme->next better be NULL */
3047 ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3048
3049 if (flags & HAT_LOAD_LOCK) {
3050 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3051 panic("too high lckcnt-hmeblk %p",
3052 (void *)hmeblkp);
3053 }
3054 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3055
3056 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3057 }
3058
3059 #ifdef VAC
3060 if (pp && PP_ISNC(pp)) {
3061 /*
3062 * If the physical page is marked to be uncacheable, like
3063 * by a vac conflict, make sure the new mapping is also
3064 * uncacheable.
3065 */
3066 TTE_CLR_VCACHEABLE(ttep);
3067 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3068 }
3069 #endif
3070 ttep->tte_hmenum = hmenum;
3071
3072 #ifdef DEBUG
3073 orig_old = tteold;
3074 #endif /* DEBUG */
3075
3076 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3077 if ((sfmmup == KHATID) &&
3078 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3079 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3080 }
3081 #ifdef DEBUG
3082 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3083 #endif /* DEBUG */
3084 }
3085 ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3086
3087 if (!TTE_IS_VALID(&tteold)) {
3088
3089 atomic_inc_16(&hmeblkp->hblk_vcnt);
3090 if (rid == SFMMU_INVALID_SHMERID) {
3091 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3092 } else {
3093 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3094 sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3095 /*
3096 * We already accounted for region ttecnt's in sfmmu
3097 * during hat_join_region() processing. Here we
3098 * only update ttecnt's in region struture.
3099 */
3100 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3101 }
3102 }
3103
3104 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3105 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3106 sfmmup != ksfmmup) {
3107 uchar_t tteflag = 1 << size;
3108 if (rid == SFMMU_INVALID_SHMERID) {
3109 if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3110 hatlockp = sfmmu_hat_enter(sfmmup);
3111 sfmmup->sfmmu_tteflags |= tteflag;
3112 sfmmu_hat_exit(hatlockp);
3113 }
3114 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3115 hatlockp = sfmmu_hat_enter(sfmmup);
3116 sfmmup->sfmmu_rtteflags |= tteflag;
3117 sfmmu_hat_exit(hatlockp);
3118 }
3119 /*
3120 * Update the current CPU tsbmiss area, so the current thread
3121 * won't need to take the tsbmiss for the new pagesize.
3122 * The other threads in the process will update their tsb
3123 * miss area lazily in sfmmu_tsbmiss_exception() when they
3124 * fail to find the translation for a newly added pagesize.
3125 */
3126 if (size > TTE64K && myflt) {
3127 struct tsbmiss *tsbmp;
3128 kpreempt_disable();
3129 tsbmp = &tsbmiss_area[CPU->cpu_id];
3130 if (rid == SFMMU_INVALID_SHMERID) {
3131 if (!(tsbmp->uhat_tteflags & tteflag)) {
3132 tsbmp->uhat_tteflags |= tteflag;
3133 }
3134 } else {
3135 if (!(tsbmp->uhat_rtteflags & tteflag)) {
3136 tsbmp->uhat_rtteflags |= tteflag;
3137 }
3138 }
3139 kpreempt_enable();
3140 }
3141 }
3142
3143 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3144 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3145 hatlockp = sfmmu_hat_enter(sfmmup);
3146 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3147 sfmmu_hat_exit(hatlockp);
3148 }
3149
3150 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3151 hw_tte.tte_intlo;
3152 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3153 hw_tte.tte_inthi;
3154
3155 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3156 /*
3157 * If remap and new tte differs from old tte we need
3158 * to sync the mod bit and flush TLB/TSB. We don't
3159 * need to sync ref bit because we currently always set
3160 * ref bit in tteload.
3161 */
3162 ASSERT(TTE_IS_REF(ttep));
3163 if (TTE_IS_MOD(&tteold)) {
3164 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3165 }
3166 /*
3167 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3168 * hmes are only used for read only text. Adding this code for
3169 * completeness and future use of shared hmeblks with writable
3170 * mappings of VMODSORT vnodes.
3171 */
3172 if (hmeblkp->hblk_shared) {
3173 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3174 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3175 xt_sync(cpuset);
3176 SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3177 } else {
3178 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3179 xt_sync(sfmmup->sfmmu_cpusran);
3180 }
3181 }
3182
3183 if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3184 /*
3185 * We only preload 8K and 4M mappings into the TSB, since
3186 * 64K and 512K mappings are replicated and hence don't
3187 * have a single, unique TSB entry. Ditto for 32M/256M.
3188 */
3189 if (size == TTE8K || size == TTE4M) {
3190 sf_scd_t *scdp;
3191 hatlockp = sfmmu_hat_enter(sfmmup);
3192 /*
3193 * Don't preload private TSB if the mapping is used
3194 * by the shctx in the SCD.
3195 */
3196 scdp = sfmmup->sfmmu_scdp;
3197 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3198 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3199 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3200 size);
3201 }
3202 sfmmu_hat_exit(hatlockp);
3203 }
3204 }
3205 if (pp) {
3206 if (!remap) {
3207 HME_ADD(sfhme, pp);
3208 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3209 ASSERT(hmeblkp->hblk_hmecnt > 0);
3210
3211 /*
3212 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3213 * see pageunload() for comment.
3214 */
3215 }
3216 sfmmu_mlist_exit(pml);
3217 }
3218
3219 return (0);
3220 }
3221 /*
3222 * Function unlocks hash bucket.
3223 */
3224 static void
3225 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3226 {
3227 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3228 SFMMU_HASH_UNLOCK(hmebp);
3229 }
3230
3231 /*
3232 * function which checks and sets up page array for a large
3233 * translation. Will set p_vcolor, p_index, p_ro fields.
3234 * Assumes addr and pfnum of first page are properly aligned.
3235 * Will check for physical contiguity. If check fails it return
3236 * non null.
3237 */
3238 static int
3239 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3240 {
3241 int i, index, ttesz;
3242 pfn_t pfnum;
3243 pgcnt_t npgs;
3244 page_t *pp, *pp1;
3245 kmutex_t *pmtx;
3246 #ifdef VAC
3247 int osz;
3248 int cflags = 0;
3249 int vac_err = 0;
3250 #endif
3251 int newidx = 0;
3252
3253 ttesz = TTE_CSZ(ttep);
3254
3255 ASSERT(ttesz > TTE8K);
3256
3257 npgs = TTEPAGES(ttesz);
3258 index = PAGESZ_TO_INDEX(ttesz);
3259
3260 pfnum = (*pps)->p_pagenum;
3261 ASSERT(IS_P2ALIGNED(pfnum, npgs));
3262
3263 /*
3264 * Save the first pp so we can do HAT_TMPNC at the end.
3265 */
3266 pp1 = *pps;
3267 #ifdef VAC
3268 osz = fnd_mapping_sz(pp1);
3269 #endif
3270
3271 for (i = 0; i < npgs; i++, pps++) {
3272 pp = *pps;
3273 ASSERT(PAGE_LOCKED(pp));
3274 ASSERT(pp->p_szc >= ttesz);
3275 ASSERT(pp->p_szc == pp1->p_szc);
3276 ASSERT(sfmmu_mlist_held(pp));
3277
3278 /*
3279 * XXX is it possible to maintain P_RO on the root only?
3280 */
3281 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3282 pmtx = sfmmu_page_enter(pp);
3283 PP_CLRRO(pp);
3284 sfmmu_page_exit(pmtx);
3285 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3286 !PP_ISMOD(pp)) {
3287 pmtx = sfmmu_page_enter(pp);
3288 if (!(PP_ISMOD(pp))) {
3289 PP_SETRO(pp);
3290 }
3291 sfmmu_page_exit(pmtx);
3292 }
3293
3294 /*
3295 * If this is a remap we skip vac & contiguity checks.
3296 */
3297 if (remap)
3298 continue;
3299
3300 /*
3301 * set p_vcolor and detect any vac conflicts.
3302 */
3303 #ifdef VAC
3304 if (vac_err == 0) {
3305 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3306
3307 }
3308 #endif
3309
3310 /*
3311 * Save current index in case we need to undo it.
3312 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))"
3313 * "SFMMU_INDEX_SHIFT 6"
3314 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)"
3315 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)"
3316 *
3317 * So: index = PAGESZ_TO_INDEX(ttesz);
3318 * if ttesz == 1 then index = 0x2
3319 * 2 then index = 0x4
3320 * 3 then index = 0x8
3321 * 4 then index = 0x10
3322 * 5 then index = 0x20
3323 * The code below checks if it's a new pagesize (ie, newidx)
3324 * in case we need to take it back out of p_index,
3325 * and then or's the new index into the existing index.
3326 */
3327 if ((PP_MAPINDEX(pp) & index) == 0)
3328 newidx = 1;
3329 pp->p_index = (PP_MAPINDEX(pp) | index);
3330
3331 /*
3332 * contiguity check
3333 */
3334 if (pp->p_pagenum != pfnum) {
3335 /*
3336 * If we fail the contiguity test then
3337 * the only thing we need to fix is the p_index field.
3338 * We might get a few extra flushes but since this
3339 * path is rare that is ok. The p_ro field will
3340 * get automatically fixed on the next tteload to
3341 * the page. NO TNC bit is set yet.
3342 */
3343 while (i >= 0) {
3344 pp = *pps;
3345 if (newidx)
3346 pp->p_index = (PP_MAPINDEX(pp) &
3347 ~index);
3348 pps--;
3349 i--;
3350 }
3351 return (1);
3352 }
3353 pfnum++;
3354 addr += MMU_PAGESIZE;
3355 }
3356
3357 #ifdef VAC
3358 if (vac_err) {
3359 if (ttesz > osz) {
3360 /*
3361 * There are some smaller mappings that causes vac
3362 * conflicts. Convert all existing small mappings to
3363 * TNC.
3364 */
3365 SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3366 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3367 npgs);
3368 } else {
3369 /* EMPTY */
3370 /*
3371 * If there exists an big page mapping,
3372 * that means the whole existing big page
3373 * has TNC setting already. No need to covert to
3374 * TNC again.
3375 */
3376 ASSERT(PP_ISTNC(pp1));
3377 }
3378 }
3379 #endif /* VAC */
3380
3381 return (0);
3382 }
3383
3384 #ifdef VAC
3385 /*
3386 * Routine that detects vac consistency for a large page. It also
3387 * sets virtual color for all pp's for this big mapping.
3388 */
3389 static int
3390 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3391 {
3392 int vcolor, ocolor;
3393
3394 ASSERT(sfmmu_mlist_held(pp));
3395
3396 if (PP_ISNC(pp)) {
3397 return (HAT_TMPNC);
3398 }
3399
3400 vcolor = addr_to_vcolor(addr);
3401 if (PP_NEWPAGE(pp)) {
3402 PP_SET_VCOLOR(pp, vcolor);
3403 return (0);
3404 }
3405
3406 ocolor = PP_GET_VCOLOR(pp);
3407 if (ocolor == vcolor) {
3408 return (0);
3409 }
3410
3411 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3412 /*
3413 * Previous user of page had a differnet color
3414 * but since there are no current users
3415 * we just flush the cache and change the color.
3416 * As an optimization for large pages we flush the
3417 * entire cache of that color and set a flag.
3418 */
3419 SFMMU_STAT(sf_pgcolor_conflict);
3420 if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3421 CacheColor_SetFlushed(*cflags, ocolor);
3422 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3423 }
3424 PP_SET_VCOLOR(pp, vcolor);
3425 return (0);
3426 }
3427
3428 /*
3429 * We got a real conflict with a current mapping.
3430 * set flags to start unencaching all mappings
3431 * and return failure so we restart looping
3432 * the pp array from the beginning.
3433 */
3434 return (HAT_TMPNC);
3435 }
3436 #endif /* VAC */
3437
3438 /*
3439 * creates a large page shadow hmeblk for a tte.
3440 * The purpose of this routine is to allow us to do quick unloads because
3441 * the vm layer can easily pass a very large but sparsely populated range.
3442 */
3443 static struct hme_blk *
3444 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3445 {
3446 struct hmehash_bucket *hmebp;
3447 hmeblk_tag hblktag;
3448 int hmeshift, size, vshift;
3449 uint_t shw_mask, newshw_mask;
3450 struct hme_blk *hmeblkp;
3451
3452 ASSERT(sfmmup != KHATID);
3453 if (mmu_page_sizes == max_mmu_page_sizes) {
3454 ASSERT(ttesz < TTE256M);
3455 } else {
3456 ASSERT(ttesz < TTE4M);
3457 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3458 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3459 }
3460
3461 if (ttesz == TTE8K) {
3462 size = TTE512K;
3463 } else {
3464 size = ++ttesz;
3465 }
3466
3467 hblktag.htag_id = sfmmup;
3468 hmeshift = HME_HASH_SHIFT(size);
3469 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3470 hblktag.htag_rehash = HME_HASH_REHASH(size);
3471 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3472 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3473
3474 SFMMU_HASH_LOCK(hmebp);
3475
3476 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3477 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3478 if (hmeblkp == NULL) {
3479 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3480 hblktag, flags, SFMMU_INVALID_SHMERID);
3481 }
3482 ASSERT(hmeblkp);
3483 if (!hmeblkp->hblk_shw_mask) {
3484 /*
3485 * if this is a unused hblk it was just allocated or could
3486 * potentially be a previous large page hblk so we need to
3487 * set the shadow bit.
3488 */
3489 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3490 hmeblkp->hblk_shw_bit = 1;
3491 } else if (hmeblkp->hblk_shw_bit == 0) {
3492 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3493 (void *)hmeblkp);
3494 }
3495 ASSERT(hmeblkp->hblk_shw_bit == 1);
3496 ASSERT(!hmeblkp->hblk_shared);
3497 vshift = vaddr_to_vshift(hblktag, vaddr, size);
3498 ASSERT(vshift < 8);
3499 /*
3500 * Atomically set shw mask bit
3501 */
3502 do {
3503 shw_mask = hmeblkp->hblk_shw_mask;
3504 newshw_mask = shw_mask | (1 << vshift);
3505 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3506 newshw_mask);
3507 } while (newshw_mask != shw_mask);
3508
3509 SFMMU_HASH_UNLOCK(hmebp);
3510
3511 return (hmeblkp);
3512 }
3513
3514 /*
3515 * This routine cleanup a previous shadow hmeblk and changes it to
3516 * a regular hblk. This happens rarely but it is possible
3517 * when a process wants to use large pages and there are hblks still
3518 * lying around from the previous as that used these hmeblks.
3519 * The alternative was to cleanup the shadow hblks at unload time
3520 * but since so few user processes actually use large pages, it is
3521 * better to be lazy and cleanup at this time.
3522 */
3523 static void
3524 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3525 struct hmehash_bucket *hmebp)
3526 {
3527 caddr_t addr, endaddr;
3528 int hashno, size;
3529
3530 ASSERT(hmeblkp->hblk_shw_bit);
3531 ASSERT(!hmeblkp->hblk_shared);
3532
3533 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3534
3535 if (!hmeblkp->hblk_shw_mask) {
3536 hmeblkp->hblk_shw_bit = 0;
3537 return;
3538 }
3539 addr = (caddr_t)get_hblk_base(hmeblkp);
3540 endaddr = get_hblk_endaddr(hmeblkp);
3541 size = get_hblk_ttesz(hmeblkp);
3542 hashno = size - 1;
3543 ASSERT(hashno > 0);
3544 SFMMU_HASH_UNLOCK(hmebp);
3545
3546 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3547
3548 SFMMU_HASH_LOCK(hmebp);
3549 }
3550
3551 static void
3552 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3553 int hashno)
3554 {
3555 int hmeshift, shadow = 0;
3556 hmeblk_tag hblktag;
3557 struct hmehash_bucket *hmebp;
3558 struct hme_blk *hmeblkp;
3559 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3560
3561 ASSERT(hashno > 0);
3562 hblktag.htag_id = sfmmup;
3563 hblktag.htag_rehash = hashno;
3564 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3565
3566 hmeshift = HME_HASH_SHIFT(hashno);
3567
3568 while (addr < endaddr) {
3569 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3570 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3571 SFMMU_HASH_LOCK(hmebp);
3572 /* inline HME_HASH_SEARCH */
3573 hmeblkp = hmebp->hmeblkp;
3574 pr_hblk = NULL;
3575 while (hmeblkp) {
3576 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3577 /* found hme_blk */
3578 ASSERT(!hmeblkp->hblk_shared);
3579 if (hmeblkp->hblk_shw_bit) {
3580 if (hmeblkp->hblk_shw_mask) {
3581 shadow = 1;
3582 sfmmu_shadow_hcleanup(sfmmup,
3583 hmeblkp, hmebp);
3584 break;
3585 } else {
3586 hmeblkp->hblk_shw_bit = 0;
3587 }
3588 }
3589
3590 /*
3591 * Hblk_hmecnt and hblk_vcnt could be non zero
3592 * since hblk_unload() does not gurantee that.
3593 *
3594 * XXX - this could cause tteload() to spin
3595 * where sfmmu_shadow_hcleanup() is called.
3596 */
3597 }
3598
3599 nx_hblk = hmeblkp->hblk_next;
3600 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3601 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3602 &list, 0);
3603 } else {
3604 pr_hblk = hmeblkp;
3605 }
3606 hmeblkp = nx_hblk;
3607 }
3608
3609 SFMMU_HASH_UNLOCK(hmebp);
3610
3611 if (shadow) {
3612 /*
3613 * We found another shadow hblk so cleaned its
3614 * children. We need to go back and cleanup
3615 * the original hblk so we don't change the
3616 * addr.
3617 */
3618 shadow = 0;
3619 } else {
3620 addr = (caddr_t)roundup((uintptr_t)addr + 1,
3621 (1 << hmeshift));
3622 }
3623 }
3624 sfmmu_hblks_list_purge(&list, 0);
3625 }
3626
3627 /*
3628 * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3629 * may still linger on after pageunload.
3630 */
3631 static void
3632 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3633 {
3634 int hmeshift;
3635 hmeblk_tag hblktag;
3636 struct hmehash_bucket *hmebp;
3637 struct hme_blk *hmeblkp;
3638 struct hme_blk *pr_hblk;
3639 struct hme_blk *list = NULL;
3640
3641 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3642 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3643
3644 hmeshift = HME_HASH_SHIFT(ttesz);
3645 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3646 hblktag.htag_rehash = ttesz;
3647 hblktag.htag_rid = rid;
3648 hblktag.htag_id = srdp;
3649 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3650
3651 SFMMU_HASH_LOCK(hmebp);
3652 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3653 if (hmeblkp != NULL) {
3654 ASSERT(hmeblkp->hblk_shared);
3655 ASSERT(!hmeblkp->hblk_shw_bit);
3656 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3657 panic("sfmmu_cleanup_rhblk: valid hmeblk");
3658 }
3659 ASSERT(!hmeblkp->hblk_lckcnt);
3660 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3661 &list, 0);
3662 }
3663 SFMMU_HASH_UNLOCK(hmebp);
3664 sfmmu_hblks_list_purge(&list, 0);
3665 }
3666
3667 /* ARGSUSED */
3668 static void
3669 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3670 size_t r_size, void *r_obj, u_offset_t r_objoff)
3671 {
3672 }
3673
3674 /*
3675 * Searches for an hmeblk which maps addr, then unloads this mapping
3676 * and updates *eaddrp, if the hmeblk is found.
3677 */
3678 static void
3679 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3680 caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3681 {
3682 int hmeshift;
3683 hmeblk_tag hblktag;
3684 struct hmehash_bucket *hmebp;
3685 struct hme_blk *hmeblkp;
3686 struct hme_blk *pr_hblk;
3687 struct hme_blk *list = NULL;
3688
3689 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3690 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3691 ASSERT(ttesz >= HBLK_MIN_TTESZ);
3692
3693 hmeshift = HME_HASH_SHIFT(ttesz);
3694 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3695 hblktag.htag_rehash = ttesz;
3696 hblktag.htag_rid = rid;
3697 hblktag.htag_id = srdp;
3698 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3699
3700 SFMMU_HASH_LOCK(hmebp);
3701 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3702 if (hmeblkp != NULL) {
3703 ASSERT(hmeblkp->hblk_shared);
3704 ASSERT(!hmeblkp->hblk_lckcnt);
3705 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3706 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3707 eaddr, NULL, HAT_UNLOAD);
3708 ASSERT(*eaddrp > addr);
3709 }
3710 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3711 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3712 &list, 0);
3713 }
3714 SFMMU_HASH_UNLOCK(hmebp);
3715 sfmmu_hblks_list_purge(&list, 0);
3716 }
3717
3718 static void
3719 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3720 {
3721 int ttesz = rgnp->rgn_pgszc;
3722 size_t rsz = rgnp->rgn_size;
3723 caddr_t rsaddr = rgnp->rgn_saddr;
3724 caddr_t readdr = rsaddr + rsz;
3725 caddr_t rhsaddr;
3726 caddr_t va;
3727 uint_t rid = rgnp->rgn_id;
3728 caddr_t cbsaddr;
3729 caddr_t cbeaddr;
3730 hat_rgn_cb_func_t rcbfunc;
3731 ulong_t cnt;
3732
3733 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3734 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3735
3736 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3737 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3738 if (ttesz < HBLK_MIN_TTESZ) {
3739 ttesz = HBLK_MIN_TTESZ;
3740 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3741 } else {
3742 rhsaddr = rsaddr;
3743 }
3744
3745 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3746 rcbfunc = sfmmu_rgn_cb_noop;
3747 }
3748
3749 while (ttesz >= HBLK_MIN_TTESZ) {
3750 cbsaddr = rsaddr;
3751 cbeaddr = rsaddr;
3752 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3753 ttesz--;
3754 continue;
3755 }
3756 cnt = 0;
3757 va = rsaddr;
3758 while (va < readdr) {
3759 ASSERT(va >= rhsaddr);
3760 if (va != cbeaddr) {
3761 if (cbeaddr != cbsaddr) {
3762 ASSERT(cbeaddr > cbsaddr);
3763 (*rcbfunc)(cbsaddr, cbeaddr,
3764 rsaddr, rsz, rgnp->rgn_obj,
3765 rgnp->rgn_objoff);
3766 }
3767 cbsaddr = va;
3768 cbeaddr = va;
3769 }
3770 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3771 ttesz, &cbeaddr);
3772 cnt++;
3773 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3774 }
3775 if (cbeaddr != cbsaddr) {
3776 ASSERT(cbeaddr > cbsaddr);
3777 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3778 rsz, rgnp->rgn_obj,
3779 rgnp->rgn_objoff);
3780 }
3781 ttesz--;
3782 }
3783 }
3784
3785 /*
3786 * Release one hardware address translation lock on the given address range.
3787 */
3788 void
3789 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3790 {
3791 struct hmehash_bucket *hmebp;
3792 hmeblk_tag hblktag;
3793 int hmeshift, hashno = 1;
3794 struct hme_blk *hmeblkp, *list = NULL;
3795 caddr_t endaddr;
3796
3797 ASSERT(sfmmup != NULL);
3798
3799 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
3800 ASSERT((len & MMU_PAGEOFFSET) == 0);
3801 endaddr = addr + len;
3802 hblktag.htag_id = sfmmup;
3803 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3804
3805 /*
3806 * Spitfire supports 4 page sizes.
3807 * Most pages are expected to be of the smallest page size (8K) and
3808 * these will not need to be rehashed. 64K pages also don't need to be
3809 * rehashed because an hmeblk spans 64K of address space. 512K pages
3810 * might need 1 rehash and and 4M pages might need 2 rehashes.
3811 */
3812 while (addr < endaddr) {
3813 hmeshift = HME_HASH_SHIFT(hashno);
3814 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3815 hblktag.htag_rehash = hashno;
3816 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3817
3818 SFMMU_HASH_LOCK(hmebp);
3819
3820 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3821 if (hmeblkp != NULL) {
3822 ASSERT(!hmeblkp->hblk_shared);
3823 /*
3824 * If we encounter a shadow hmeblk then
3825 * we know there are no valid hmeblks mapping
3826 * this address at this size or larger.
3827 * Just increment address by the smallest
3828 * page size.
3829 */
3830 if (hmeblkp->hblk_shw_bit) {
3831 addr += MMU_PAGESIZE;
3832 } else {
3833 addr = sfmmu_hblk_unlock(hmeblkp, addr,
3834 endaddr);
3835 }
3836 SFMMU_HASH_UNLOCK(hmebp);
3837 hashno = 1;
3838 continue;
3839 }
3840 SFMMU_HASH_UNLOCK(hmebp);
3841
3842 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
3843 /*
3844 * We have traversed the whole list and rehashed
3845 * if necessary without finding the address to unlock
3846 * which should never happen.
3847 */
3848 panic("sfmmu_unlock: addr not found. "
3849 "addr %p hat %p", (void *)addr, (void *)sfmmup);
3850 } else {
3851 hashno++;
3852 }
3853 }
3854
3855 sfmmu_hblks_list_purge(&list, 0);
3856 }
3857
3858 void
3859 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
3860 hat_region_cookie_t rcookie)
3861 {
3862 sf_srd_t *srdp;
3863 sf_region_t *rgnp;
3864 int ttesz;
3865 uint_t rid;
3866 caddr_t eaddr;
3867 caddr_t va;
3868 int hmeshift;
3869 hmeblk_tag hblktag;
3870 struct hmehash_bucket *hmebp;
3871 struct hme_blk *hmeblkp;
3872 struct hme_blk *pr_hblk;
3873 struct hme_blk *list;
3874
3875 if (rcookie == HAT_INVALID_REGION_COOKIE) {
3876 hat_unlock(sfmmup, addr, len);
3877 return;
3878 }
3879
3880 ASSERT(sfmmup != NULL);
3881 ASSERT(sfmmup != ksfmmup);
3882
3883 srdp = sfmmup->sfmmu_srdp;
3884 rid = (uint_t)((uint64_t)rcookie);
3885 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
3886 eaddr = addr + len;
3887 va = addr;
3888 list = NULL;
3889 rgnp = srdp->srd_hmergnp[rid];
3890 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
3891
3892 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
3893 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
3894 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
3895 ttesz = HBLK_MIN_TTESZ;
3896 } else {
3897 ttesz = rgnp->rgn_pgszc;
3898 }
3899 while (va < eaddr) {
3900 while (ttesz < rgnp->rgn_pgszc &&
3901 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
3902 ttesz++;
3903 }
3904 while (ttesz >= HBLK_MIN_TTESZ) {
3905 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3906 ttesz--;
3907 continue;
3908 }
3909 hmeshift = HME_HASH_SHIFT(ttesz);
3910 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
3911 hblktag.htag_rehash = ttesz;
3912 hblktag.htag_rid = rid;
3913 hblktag.htag_id = srdp;
3914 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
3915 SFMMU_HASH_LOCK(hmebp);
3916 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
3917 &list);
3918 if (hmeblkp == NULL) {
3919 SFMMU_HASH_UNLOCK(hmebp);
3920 ttesz--;
3921 continue;
3922 }
3923 ASSERT(hmeblkp->hblk_shared);
3924 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
3925 ASSERT(va >= eaddr ||
3926 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
3927 SFMMU_HASH_UNLOCK(hmebp);
3928 break;
3929 }
3930 if (ttesz < HBLK_MIN_TTESZ) {
3931 panic("hat_unlock_region: addr not found "
3932 "addr %p hat %p", (void *)va, (void *)sfmmup);
3933 }
3934 }
3935 sfmmu_hblks_list_purge(&list, 0);
3936 }
3937
3938 /*
3939 * Function to unlock a range of addresses in an hmeblk. It returns the
3940 * next address that needs to be unlocked.
3941 * Should be called with the hash lock held.
3942 */
3943 static caddr_t
3944 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
3945 {
3946 struct sf_hment *sfhme;
3947 tte_t tteold, ttemod;
3948 int ttesz, ret;
3949
3950 ASSERT(in_hblk_range(hmeblkp, addr));
3951 ASSERT(hmeblkp->hblk_shw_bit == 0);
3952
3953 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
3954 ttesz = get_hblk_ttesz(hmeblkp);
3955
3956 HBLKTOHME(sfhme, hmeblkp, addr);
3957 while (addr < endaddr) {
3958 readtte:
3959 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3960 if (TTE_IS_VALID(&tteold)) {
3961
3962 ttemod = tteold;
3963
3964 ret = sfmmu_modifytte_try(&tteold, &ttemod,
3965 &sfhme->hme_tte);
3966
3967 if (ret < 0)
3968 goto readtte;
3969
3970 if (hmeblkp->hblk_lckcnt == 0)
3971 panic("zero hblk lckcnt");
3972
3973 if (((uintptr_t)addr + TTEBYTES(ttesz)) >
3974 (uintptr_t)endaddr)
3975 panic("can't unlock large tte");
3976
3977 ASSERT(hmeblkp->hblk_lckcnt > 0);
3978 atomic_dec_32(&hmeblkp->hblk_lckcnt);
3979 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
3980 } else {
3981 panic("sfmmu_hblk_unlock: invalid tte");
3982 }
3983 addr += TTEBYTES(ttesz);
3984 sfhme++;
3985 }
3986 return (addr);
3987 }
3988
3989 /*
3990 * Physical Address Mapping Framework
3991 *
3992 * General rules:
3993 *
3994 * (1) Applies only to seg_kmem memory pages. To make things easier,
3995 * seg_kpm addresses are also accepted by the routines, but nothing
3996 * is done with them since by definition their PA mappings are static.
3997 * (2) hat_add_callback() may only be called while holding the page lock
3998 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
3999 * or passing HAC_PAGELOCK flag.
4000 * (3) prehandler() and posthandler() may not call hat_add_callback() or
4001 * hat_delete_callback(), nor should they allocate memory. Post quiesce
4002 * callbacks may not sleep or acquire adaptive mutex locks.
4003 * (4) Either prehandler() or posthandler() (but not both) may be specified
4004 * as being NULL. Specifying an errhandler() is optional.
4005 *
4006 * Details of using the framework:
4007 *
4008 * registering a callback (hat_register_callback())
4009 *
4010 * Pass prehandler, posthandler, errhandler addresses
4011 * as described below. If capture_cpus argument is nonzero,
4012 * suspend callback to the prehandler will occur with CPUs
4013 * captured and executing xc_loop() and CPUs will remain
4014 * captured until after the posthandler suspend callback
4015 * occurs.
4016 *
4017 * adding a callback (hat_add_callback())
4018 *
4019 * as_pagelock();
4020 * hat_add_callback();
4021 * save returned pfn in private data structures or program registers;
4022 * as_pageunlock();
4023 *
4024 * prehandler()
4025 *
4026 * Stop all accesses by physical address to this memory page.
4027 * Called twice: the first, PRESUSPEND, is a context safe to acquire
4028 * adaptive locks. The second, SUSPEND, is called at high PIL with
4029 * CPUs captured so adaptive locks may NOT be acquired (and all spin
4030 * locks must be XCALL_PIL or higher locks).
4031 *
4032 * May return the following errors:
4033 * EIO: A fatal error has occurred. This will result in panic.
4034 * EAGAIN: The page cannot be suspended. This will fail the
4035 * relocation.
4036 * 0: Success.
4037 *
4038 * posthandler()
4039 *
4040 * Save new pfn in private data structures or program registers;
4041 * not allowed to fail (non-zero return values will result in panic).
4042 *
4043 * errhandler()
4044 *
4045 * called when an error occurs related to the callback. Currently
4046 * the only such error is HAT_CB_ERR_LEAKED which indicates that
4047 * a page is being freed, but there are still outstanding callback(s)
4048 * registered on the page.
4049 *
4050 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4051 *
4052 * stop using physical address
4053 * hat_delete_callback();
4054 *
4055 */
4056
4057 /*
4058 * Register a callback class. Each subsystem should do this once and
4059 * cache the id_t returned for use in setting up and tearing down callbacks.
4060 *
4061 * There is no facility for removing callback IDs once they are created;
4062 * the "key" should be unique for each module, so in case a module is unloaded
4063 * and subsequently re-loaded, we can recycle the module's previous entry.
4064 */
4065 id_t
4066 hat_register_callback(int key,
4067 int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4068 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4069 int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4070 int capture_cpus)
4071 {
4072 id_t id;
4073
4074 /*
4075 * Search the table for a pre-existing callback associated with
4076 * the identifier "key". If one exists, we re-use that entry in
4077 * the table for this instance, otherwise we assign the next
4078 * available table slot.
4079 */
4080 for (id = 0; id < sfmmu_max_cb_id; id++) {
4081 if (sfmmu_cb_table[id].key == key)
4082 break;
4083 }
4084
4085 if (id == sfmmu_max_cb_id) {
4086 id = sfmmu_cb_nextid++;
4087 if (id >= sfmmu_max_cb_id)
4088 panic("hat_register_callback: out of callback IDs");
4089 }
4090
4091 ASSERT(prehandler != NULL || posthandler != NULL);
4092
4093 sfmmu_cb_table[id].key = key;
4094 sfmmu_cb_table[id].prehandler = prehandler;
4095 sfmmu_cb_table[id].posthandler = posthandler;
4096 sfmmu_cb_table[id].errhandler = errhandler;
4097 sfmmu_cb_table[id].capture_cpus = capture_cpus;
4098
4099 return (id);
4100 }
4101
4102 #define HAC_COOKIE_NONE (void *)-1
4103
4104 /*
4105 * Add relocation callbacks to the specified addr/len which will be called
4106 * when relocating the associated page. See the description of pre and
4107 * posthandler above for more details.
4108 *
4109 * If HAC_PAGELOCK is included in flags, the underlying memory page is
4110 * locked internally so the caller must be able to deal with the callback
4111 * running even before this function has returned. If HAC_PAGELOCK is not
4112 * set, it is assumed that the underlying memory pages are locked.
4113 *
4114 * Since the caller must track the individual page boundaries anyway,
4115 * we only allow a callback to be added to a single page (large
4116 * or small). Thus [addr, addr + len) MUST be contained within a single
4117 * page.
4118 *
4119 * Registering multiple callbacks on the same [addr, addr+len) is supported,
4120 * _provided_that_ a unique parameter is specified for each callback.
4121 * If multiple callbacks are registered on the same range the callback will
4122 * be invoked with each unique parameter. Registering the same callback with
4123 * the same argument more than once will result in corrupted kernel state.
4124 *
4125 * Returns the pfn of the underlying kernel page in *rpfn
4126 * on success, or PFN_INVALID on failure.
4127 *
4128 * cookiep (if passed) provides storage space for an opaque cookie
4129 * to return later to hat_delete_callback(). This cookie makes the callback
4130 * deletion significantly quicker by avoiding a potentially lengthy hash
4131 * search.
4132 *
4133 * Returns values:
4134 * 0: success
4135 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4136 * EINVAL: callback ID is not valid
4137 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4138 * space
4139 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4140 */
4141 int
4142 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4143 void *pvt, pfn_t *rpfn, void **cookiep)
4144 {
4145 struct hmehash_bucket *hmebp;
4146 hmeblk_tag hblktag;
4147 struct hme_blk *hmeblkp;
4148 int hmeshift, hashno;
4149 caddr_t saddr, eaddr, baseaddr;
4150 struct pa_hment *pahmep;
4151 struct sf_hment *sfhmep, *osfhmep;
4152 kmutex_t *pml;
4153 tte_t tte;
4154 page_t *pp;
4155 vnode_t *vp;
4156 u_offset_t off;
4157 pfn_t pfn;
4158 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4159 int locked = 0;
4160
4161 /*
4162 * For KPM mappings, just return the physical address since we
4163 * don't need to register any callbacks.
4164 */
4165 if (IS_KPM_ADDR(vaddr)) {
4166 uint64_t paddr;
4167 SFMMU_KPM_VTOP(vaddr, paddr);
4168 *rpfn = btop(paddr);
4169 if (cookiep != NULL)
4170 *cookiep = HAC_COOKIE_NONE;
4171 return (0);
4172 }
4173
4174 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4175 *rpfn = PFN_INVALID;
4176 return (EINVAL);
4177 }
4178
4179 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4180 *rpfn = PFN_INVALID;
4181 return (ENOMEM);
4182 }
4183
4184 sfhmep = &pahmep->sfment;
4185
4186 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4187 eaddr = saddr + len;
4188
4189 rehash:
4190 /* Find the mapping(s) for this page */
4191 for (hashno = TTE64K, hmeblkp = NULL;
4192 hmeblkp == NULL && hashno <= mmu_hashcnt;
4193 hashno++) {
4194 hmeshift = HME_HASH_SHIFT(hashno);
4195 hblktag.htag_id = ksfmmup;
4196 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4197 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4198 hblktag.htag_rehash = hashno;
4199 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4200
4201 SFMMU_HASH_LOCK(hmebp);
4202
4203 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4204
4205 if (hmeblkp == NULL)
4206 SFMMU_HASH_UNLOCK(hmebp);
4207 }
4208
4209 if (hmeblkp == NULL) {
4210 kmem_cache_free(pa_hment_cache, pahmep);
4211 *rpfn = PFN_INVALID;
4212 return (ENXIO);
4213 }
4214
4215 ASSERT(!hmeblkp->hblk_shared);
4216
4217 HBLKTOHME(osfhmep, hmeblkp, saddr);
4218 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4219
4220 if (!TTE_IS_VALID(&tte)) {
4221 SFMMU_HASH_UNLOCK(hmebp);
4222 kmem_cache_free(pa_hment_cache, pahmep);
4223 *rpfn = PFN_INVALID;
4224 return (ENXIO);
4225 }
4226
4227 /*
4228 * Make sure the boundaries for the callback fall within this
4229 * single mapping.
4230 */
4231 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4232 ASSERT(saddr >= baseaddr);
4233 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4234 SFMMU_HASH_UNLOCK(hmebp);
4235 kmem_cache_free(pa_hment_cache, pahmep);
4236 *rpfn = PFN_INVALID;
4237 return (ERANGE);
4238 }
4239
4240 pfn = sfmmu_ttetopfn(&tte, vaddr);
4241
4242 /*
4243 * The pfn may not have a page_t underneath in which case we
4244 * just return it. This can happen if we are doing I/O to a
4245 * static portion of the kernel's address space, for instance.
4246 */
4247 pp = osfhmep->hme_page;
4248 if (pp == NULL) {
4249 SFMMU_HASH_UNLOCK(hmebp);
4250 kmem_cache_free(pa_hment_cache, pahmep);
4251 *rpfn = pfn;
4252 if (cookiep)
4253 *cookiep = HAC_COOKIE_NONE;
4254 return (0);
4255 }
4256 ASSERT(pp == PP_PAGEROOT(pp));
4257
4258 vp = pp->p_vnode;
4259 off = pp->p_offset;
4260
4261 pml = sfmmu_mlist_enter(pp);
4262
4263 if (flags & HAC_PAGELOCK) {
4264 if (!page_trylock(pp, SE_SHARED)) {
4265 /*
4266 * Somebody is holding SE_EXCL lock. Might
4267 * even be hat_page_relocate(). Drop all
4268 * our locks, lookup the page in &kvp, and
4269 * retry. If it doesn't exist in &kvp and &zvp,
4270 * then we must be dealing with a kernel mapped
4271 * page which doesn't actually belong to
4272 * segkmem so we punt.
4273 */
4274 sfmmu_mlist_exit(pml);
4275 SFMMU_HASH_UNLOCK(hmebp);
4276 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4277
4278 /* check zvp before giving up */
4279 if (pp == NULL)
4280 pp = page_lookup(&zvp, (u_offset_t)saddr,
4281 SE_SHARED);
4282
4283 /* Okay, we didn't find it, give up */
4284 if (pp == NULL) {
4285 kmem_cache_free(pa_hment_cache, pahmep);
4286 *rpfn = pfn;
4287 if (cookiep)
4288 *cookiep = HAC_COOKIE_NONE;
4289 return (0);
4290 }
4291 page_unlock(pp);
4292 goto rehash;
4293 }
4294 locked = 1;
4295 }
4296
4297 if (!PAGE_LOCKED(pp) && !panicstr)
4298 panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4299
4300 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4301 pp->p_offset != off) {
4302 /*
4303 * The page moved before we got our hands on it. Drop
4304 * all the locks and try again.
4305 */
4306 ASSERT((flags & HAC_PAGELOCK) != 0);
4307 sfmmu_mlist_exit(pml);
4308 SFMMU_HASH_UNLOCK(hmebp);
4309 page_unlock(pp);
4310 locked = 0;
4311 goto rehash;
4312 }
4313
4314 if (!VN_ISKAS(vp)) {
4315 /*
4316 * This is not a segkmem page but another page which
4317 * has been kernel mapped. It had better have at least
4318 * a share lock on it. Return the pfn.
4319 */
4320 sfmmu_mlist_exit(pml);
4321 SFMMU_HASH_UNLOCK(hmebp);
4322 if (locked)
4323 page_unlock(pp);
4324 kmem_cache_free(pa_hment_cache, pahmep);
4325 ASSERT(PAGE_LOCKED(pp));
4326 *rpfn = pfn;
4327 if (cookiep)
4328 *cookiep = HAC_COOKIE_NONE;
4329 return (0);
4330 }
4331
4332 /*
4333 * Setup this pa_hment and link its embedded dummy sf_hment into
4334 * the mapping list.
4335 */
4336 pp->p_share++;
4337 pahmep->cb_id = callback_id;
4338 pahmep->addr = vaddr;
4339 pahmep->len = len;
4340 pahmep->refcnt = 1;
4341 pahmep->flags = 0;
4342 pahmep->pvt = pvt;
4343
4344 sfhmep->hme_tte.ll = 0;
4345 sfhmep->hme_data = pahmep;
4346 sfhmep->hme_prev = osfhmep;
4347 sfhmep->hme_next = osfhmep->hme_next;
4348
4349 if (osfhmep->hme_next)
4350 osfhmep->hme_next->hme_prev = sfhmep;
4351
4352 osfhmep->hme_next = sfhmep;
4353
4354 sfmmu_mlist_exit(pml);
4355 SFMMU_HASH_UNLOCK(hmebp);
4356
4357 if (locked)
4358 page_unlock(pp);
4359
4360 *rpfn = pfn;
4361 if (cookiep)
4362 *cookiep = (void *)pahmep;
4363
4364 return (0);
4365 }
4366
4367 /*
4368 * Remove the relocation callbacks from the specified addr/len.
4369 */
4370 void
4371 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4372 void *cookie)
4373 {
4374 struct hmehash_bucket *hmebp;
4375 hmeblk_tag hblktag;
4376 struct hme_blk *hmeblkp;
4377 int hmeshift, hashno;
4378 caddr_t saddr;
4379 struct pa_hment *pahmep;
4380 struct sf_hment *sfhmep, *osfhmep;
4381 kmutex_t *pml;
4382 tte_t tte;
4383 page_t *pp;
4384 vnode_t *vp;
4385 u_offset_t off;
4386 int locked = 0;
4387
4388 /*
4389 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4390 * remove so just return.
4391 */
4392 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4393 return;
4394
4395 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4396
4397 rehash:
4398 /* Find the mapping(s) for this page */
4399 for (hashno = TTE64K, hmeblkp = NULL;
4400 hmeblkp == NULL && hashno <= mmu_hashcnt;
4401 hashno++) {
4402 hmeshift = HME_HASH_SHIFT(hashno);
4403 hblktag.htag_id = ksfmmup;
4404 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4405 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4406 hblktag.htag_rehash = hashno;
4407 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4408
4409 SFMMU_HASH_LOCK(hmebp);
4410
4411 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4412
4413 if (hmeblkp == NULL)
4414 SFMMU_HASH_UNLOCK(hmebp);
4415 }
4416
4417 if (hmeblkp == NULL)
4418 return;
4419
4420 ASSERT(!hmeblkp->hblk_shared);
4421
4422 HBLKTOHME(osfhmep, hmeblkp, saddr);
4423
4424 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4425 if (!TTE_IS_VALID(&tte)) {
4426 SFMMU_HASH_UNLOCK(hmebp);
4427 return;
4428 }
4429
4430 pp = osfhmep->hme_page;
4431 if (pp == NULL) {
4432 SFMMU_HASH_UNLOCK(hmebp);
4433 ASSERT(cookie == NULL);
4434 return;
4435 }
4436
4437 vp = pp->p_vnode;
4438 off = pp->p_offset;
4439
4440 pml = sfmmu_mlist_enter(pp);
4441
4442 if (flags & HAC_PAGELOCK) {
4443 if (!page_trylock(pp, SE_SHARED)) {
4444 /*
4445 * Somebody is holding SE_EXCL lock. Might
4446 * even be hat_page_relocate(). Drop all
4447 * our locks, lookup the page in &kvp, and
4448 * retry. If it doesn't exist in &kvp and &zvp,
4449 * then we must be dealing with a kernel mapped
4450 * page which doesn't actually belong to
4451 * segkmem so we punt.
4452 */
4453 sfmmu_mlist_exit(pml);
4454 SFMMU_HASH_UNLOCK(hmebp);
4455 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4456 /* check zvp before giving up */
4457 if (pp == NULL)
4458 pp = page_lookup(&zvp, (u_offset_t)saddr,
4459 SE_SHARED);
4460
4461 if (pp == NULL) {
4462 ASSERT(cookie == NULL);
4463 return;
4464 }
4465 page_unlock(pp);
4466 goto rehash;
4467 }
4468 locked = 1;
4469 }
4470
4471 ASSERT(PAGE_LOCKED(pp));
4472
4473 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4474 pp->p_offset != off) {
4475 /*
4476 * The page moved before we got our hands on it. Drop
4477 * all the locks and try again.
4478 */
4479 ASSERT((flags & HAC_PAGELOCK) != 0);
4480 sfmmu_mlist_exit(pml);
4481 SFMMU_HASH_UNLOCK(hmebp);
4482 page_unlock(pp);
4483 locked = 0;
4484 goto rehash;
4485 }
4486
4487 if (!VN_ISKAS(vp)) {
4488 /*
4489 * This is not a segkmem page but another page which
4490 * has been kernel mapped.
4491 */
4492 sfmmu_mlist_exit(pml);
4493 SFMMU_HASH_UNLOCK(hmebp);
4494 if (locked)
4495 page_unlock(pp);
4496 ASSERT(cookie == NULL);
4497 return;
4498 }
4499
4500 if (cookie != NULL) {
4501 pahmep = (struct pa_hment *)cookie;
4502 sfhmep = &pahmep->sfment;
4503 } else {
4504 for (sfhmep = pp->p_mapping; sfhmep != NULL;
4505 sfhmep = sfhmep->hme_next) {
4506
4507 /*
4508 * skip va<->pa mappings
4509 */
4510 if (!IS_PAHME(sfhmep))
4511 continue;
4512
4513 pahmep = sfhmep->hme_data;
4514 ASSERT(pahmep != NULL);
4515
4516 /*
4517 * if pa_hment matches, remove it
4518 */
4519 if ((pahmep->pvt == pvt) &&
4520 (pahmep->addr == vaddr) &&
4521 (pahmep->len == len)) {
4522 break;
4523 }
4524 }
4525 }
4526
4527 if (sfhmep == NULL) {
4528 if (!panicstr) {
4529 panic("hat_delete_callback: pa_hment not found, pp %p",
4530 (void *)pp);
4531 }
4532 return;
4533 }
4534
4535 /*
4536 * Note: at this point a valid kernel mapping must still be
4537 * present on this page.
4538 */
4539 pp->p_share--;
4540 if (pp->p_share <= 0)
4541 panic("hat_delete_callback: zero p_share");
4542
4543 if (--pahmep->refcnt == 0) {
4544 if (pahmep->flags != 0)
4545 panic("hat_delete_callback: pa_hment is busy");
4546
4547 /*
4548 * Remove sfhmep from the mapping list for the page.
4549 */
4550 if (sfhmep->hme_prev) {
4551 sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4552 } else {
4553 pp->p_mapping = sfhmep->hme_next;
4554 }
4555
4556 if (sfhmep->hme_next)
4557 sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4558
4559 sfmmu_mlist_exit(pml);
4560 SFMMU_HASH_UNLOCK(hmebp);
4561
4562 if (locked)
4563 page_unlock(pp);
4564
4565 kmem_cache_free(pa_hment_cache, pahmep);
4566 return;
4567 }
4568
4569 sfmmu_mlist_exit(pml);
4570 SFMMU_HASH_UNLOCK(hmebp);
4571 if (locked)
4572 page_unlock(pp);
4573 }
4574
4575 /*
4576 * hat_probe returns 1 if the translation for the address 'addr' is
4577 * loaded, zero otherwise.
4578 *
4579 * hat_probe should be used only for advisorary purposes because it may
4580 * occasionally return the wrong value. The implementation must guarantee that
4581 * returning the wrong value is a very rare event. hat_probe is used
4582 * to implement optimizations in the segment drivers.
4583 *
4584 */
4585 int
4586 hat_probe(struct hat *sfmmup, caddr_t addr)
4587 {
4588 pfn_t pfn;
4589 tte_t tte;
4590
4591 ASSERT(sfmmup != NULL);
4592
4593 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4594
4595 if (sfmmup == ksfmmup) {
4596 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4597 == PFN_SUSPENDED) {
4598 sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4599 }
4600 } else {
4601 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4602 }
4603
4604 if (pfn != PFN_INVALID)
4605 return (1);
4606 else
4607 return (0);
4608 }
4609
4610 ssize_t
4611 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4612 {
4613 tte_t tte;
4614
4615 if (sfmmup == ksfmmup) {
4616 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4617 return (-1);
4618 }
4619 } else {
4620 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4621 return (-1);
4622 }
4623 }
4624
4625 ASSERT(TTE_IS_VALID(&tte));
4626 return (TTEBYTES(TTE_CSZ(&tte)));
4627 }
4628
4629 uint_t
4630 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4631 {
4632 tte_t tte;
4633
4634 if (sfmmup == ksfmmup) {
4635 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4636 tte.ll = 0;
4637 }
4638 } else {
4639 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4640 tte.ll = 0;
4641 }
4642 }
4643 if (TTE_IS_VALID(&tte)) {
4644 *attr = sfmmu_ptov_attr(&tte);
4645 return (0);
4646 }
4647 *attr = 0;
4648 return ((uint_t)0xffffffff);
4649 }
4650
4651 /*
4652 * Enables more attributes on specified address range (ie. logical OR)
4653 */
4654 void
4655 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4656 {
4657 ASSERT(hat->sfmmu_as != NULL);
4658
4659 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4660 }
4661
4662 /*
4663 * Assigns attributes to the specified address range. All the attributes
4664 * are specified.
4665 */
4666 void
4667 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4668 {
4669 ASSERT(hat->sfmmu_as != NULL);
4670
4671 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4672 }
4673
4674 /*
4675 * Remove attributes on the specified address range (ie. loginal NAND)
4676 */
4677 void
4678 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4679 {
4680 ASSERT(hat->sfmmu_as != NULL);
4681
4682 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4683 }
4684
4685 /*
4686 * Change attributes on an address range to that specified by attr and mode.
4687 */
4688 static void
4689 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4690 int mode)
4691 {
4692 struct hmehash_bucket *hmebp;
4693 hmeblk_tag hblktag;
4694 int hmeshift, hashno = 1;
4695 struct hme_blk *hmeblkp, *list = NULL;
4696 caddr_t endaddr;
4697 cpuset_t cpuset;
4698 demap_range_t dmr;
4699
4700 CPUSET_ZERO(cpuset);
4701
4702 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4703 ASSERT((len & MMU_PAGEOFFSET) == 0);
4704 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4705
4706 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4707 ((addr + len) > (caddr_t)USERLIMIT)) {
4708 panic("user addr %p in kernel space",
4709 (void *)addr);
4710 }
4711
4712 endaddr = addr + len;
4713 hblktag.htag_id = sfmmup;
4714 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4715 DEMAP_RANGE_INIT(sfmmup, &dmr);
4716
4717 while (addr < endaddr) {
4718 hmeshift = HME_HASH_SHIFT(hashno);
4719 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4720 hblktag.htag_rehash = hashno;
4721 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4722
4723 SFMMU_HASH_LOCK(hmebp);
4724
4725 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4726 if (hmeblkp != NULL) {
4727 ASSERT(!hmeblkp->hblk_shared);
4728 /*
4729 * We've encountered a shadow hmeblk so skip the range
4730 * of the next smaller mapping size.
4731 */
4732 if (hmeblkp->hblk_shw_bit) {
4733 ASSERT(sfmmup != ksfmmup);
4734 ASSERT(hashno > 1);
4735 addr = (caddr_t)P2END((uintptr_t)addr,
4736 TTEBYTES(hashno - 1));
4737 } else {
4738 addr = sfmmu_hblk_chgattr(sfmmup,
4739 hmeblkp, addr, endaddr, &dmr, attr, mode);
4740 }
4741 SFMMU_HASH_UNLOCK(hmebp);
4742 hashno = 1;
4743 continue;
4744 }
4745 SFMMU_HASH_UNLOCK(hmebp);
4746
4747 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4748 /*
4749 * We have traversed the whole list and rehashed
4750 * if necessary without finding the address to chgattr.
4751 * This is ok, so we increment the address by the
4752 * smallest hmeblk range for kernel mappings or for
4753 * user mappings with no large pages, and the largest
4754 * hmeblk range, to account for shadow hmeblks, for
4755 * user mappings with large pages and continue.
4756 */
4757 if (sfmmup == ksfmmup)
4758 addr = (caddr_t)P2END((uintptr_t)addr,
4759 TTEBYTES(1));
4760 else
4761 addr = (caddr_t)P2END((uintptr_t)addr,
4762 TTEBYTES(hashno));
4763 hashno = 1;
4764 } else {
4765 hashno++;
4766 }
4767 }
4768
4769 sfmmu_hblks_list_purge(&list, 0);
4770 DEMAP_RANGE_FLUSH(&dmr);
4771 cpuset = sfmmup->sfmmu_cpusran;
4772 xt_sync(cpuset);
4773 }
4774
4775 /*
4776 * This function chgattr on a range of addresses in an hmeblk. It returns the
4777 * next addres that needs to be chgattr.
4778 * It should be called with the hash lock held.
4779 * XXX It should be possible to optimize chgattr by not flushing every time but
4780 * on the other hand:
4781 * 1. do one flush crosscall.
4782 * 2. only flush if we are increasing permissions (make sure this will work)
4783 */
4784 static caddr_t
4785 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4786 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4787 {
4788 tte_t tte, tteattr, tteflags, ttemod;
4789 struct sf_hment *sfhmep;
4790 int ttesz;
4791 struct page *pp = NULL;
4792 kmutex_t *pml, *pmtx;
4793 int ret;
4794 int use_demap_range;
4795 #if defined(SF_ERRATA_57)
4796 int check_exec;
4797 #endif
4798
4799 ASSERT(in_hblk_range(hmeblkp, addr));
4800 ASSERT(hmeblkp->hblk_shw_bit == 0);
4801 ASSERT(!hmeblkp->hblk_shared);
4802
4803 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4804 ttesz = get_hblk_ttesz(hmeblkp);
4805
4806 /*
4807 * Flush the current demap region if addresses have been
4808 * skipped or the page size doesn't match.
4809 */
4810 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
4811 if (use_demap_range) {
4812 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
4813 } else if (dmrp != NULL) {
4814 DEMAP_RANGE_FLUSH(dmrp);
4815 }
4816
4817 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
4818 #if defined(SF_ERRATA_57)
4819 check_exec = (sfmmup != ksfmmup) &&
4820 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
4821 TTE_IS_EXECUTABLE(&tteattr);
4822 #endif
4823 HBLKTOHME(sfhmep, hmeblkp, addr);
4824 while (addr < endaddr) {
4825 sfmmu_copytte(&sfhmep->hme_tte, &tte);
4826 if (TTE_IS_VALID(&tte)) {
4827 if ((tte.ll & tteflags.ll) == tteattr.ll) {
4828 /*
4829 * if the new attr is the same as old
4830 * continue
4831 */
4832 goto next_addr;
4833 }
4834 if (!TTE_IS_WRITABLE(&tteattr)) {
4835 /*
4836 * make sure we clear hw modify bit if we
4837 * removing write protections
4838 */
4839 tteflags.tte_intlo |= TTE_HWWR_INT;
4840 }
4841
4842 pml = NULL;
4843 pp = sfhmep->hme_page;
4844 if (pp) {
4845 pml = sfmmu_mlist_enter(pp);
4846 }
4847
4848 if (pp != sfhmep->hme_page) {
4849 /*
4850 * tte must have been unloaded.
4851 */
4852 ASSERT(pml);
4853 sfmmu_mlist_exit(pml);
4854 continue;
4855 }
4856
4857 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
4858
4859 ttemod = tte;
4860 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
4861 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
4862
4863 #if defined(SF_ERRATA_57)
4864 if (check_exec && addr < errata57_limit)
4865 ttemod.tte_exec_perm = 0;
4866 #endif
4867 ret = sfmmu_modifytte_try(&tte, &ttemod,
4868 &sfhmep->hme_tte);
4869
4870 if (ret < 0) {
4871 /* tte changed underneath us */
4872 if (pml) {
4873 sfmmu_mlist_exit(pml);
4874 }
4875 continue;
4876 }
4877
4878 if (tteflags.tte_intlo & TTE_HWWR_INT) {
4879 /*
4880 * need to sync if we are clearing modify bit.
4881 */
4882 sfmmu_ttesync(sfmmup, addr, &tte, pp);
4883 }
4884
4885 if (pp && PP_ISRO(pp)) {
4886 if (tteattr.tte_intlo & TTE_WRPRM_INT) {
4887 pmtx = sfmmu_page_enter(pp);
4888 PP_CLRRO(pp);
4889 sfmmu_page_exit(pmtx);
4890 }
4891 }
4892
4893 if (ret > 0 && use_demap_range) {
4894 DEMAP_RANGE_MARKPG(dmrp, addr);
4895 } else if (ret > 0) {
4896 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
4897 }
4898
4899 if (pml) {
4900 sfmmu_mlist_exit(pml);
4901 }
4902 }
4903 next_addr:
4904 addr += TTEBYTES(ttesz);
4905 sfhmep++;
4906 DEMAP_RANGE_NEXTPG(dmrp);
4907 }
4908 return (addr);
4909 }
4910
4911 /*
4912 * This routine converts virtual attributes to physical ones. It will
4913 * update the tteflags field with the tte mask corresponding to the attributes
4914 * affected and it returns the new attributes. It will also clear the modify
4915 * bit if we are taking away write permission. This is necessary since the
4916 * modify bit is the hardware permission bit and we need to clear it in order
4917 * to detect write faults.
4918 */
4919 static uint64_t
4920 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
4921 {
4922 tte_t ttevalue;
4923
4924 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
4925
4926 switch (mode) {
4927 case SFMMU_CHGATTR:
4928 /* all attributes specified */
4929 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
4930 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
4931 ttemaskp->tte_inthi = TTEINTHI_ATTR;
4932 ttemaskp->tte_intlo = TTEINTLO_ATTR;
4933 break;
4934 case SFMMU_SETATTR:
4935 ASSERT(!(attr & ~HAT_PROT_MASK));
4936 ttemaskp->ll = 0;
4937 ttevalue.ll = 0;
4938 /*
4939 * a valid tte implies exec and read for sfmmu
4940 * so no need to do anything about them.
4941 * since priviledged access implies user access
4942 * PROT_USER doesn't make sense either.
4943 */
4944 if (attr & PROT_WRITE) {
4945 ttemaskp->tte_intlo |= TTE_WRPRM_INT;
4946 ttevalue.tte_intlo |= TTE_WRPRM_INT;
4947 }
4948 break;
4949 case SFMMU_CLRATTR:
4950 /* attributes will be nand with current ones */
4951 if (attr & ~(PROT_WRITE | PROT_USER)) {
4952 panic("sfmmu: attr %x not supported", attr);
4953 }
4954 ttemaskp->ll = 0;
4955 ttevalue.ll = 0;
4956 if (attr & PROT_WRITE) {
4957 /* clear both writable and modify bit */
4958 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
4959 }
4960 if (attr & PROT_USER) {
4961 ttemaskp->tte_intlo |= TTE_PRIV_INT;
4962 ttevalue.tte_intlo |= TTE_PRIV_INT;
4963 }
4964 break;
4965 default:
4966 panic("sfmmu_vtop_attr: bad mode %x", mode);
4967 }
4968 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
4969 return (ttevalue.ll);
4970 }
4971
4972 static uint_t
4973 sfmmu_ptov_attr(tte_t *ttep)
4974 {
4975 uint_t attr;
4976
4977 ASSERT(TTE_IS_VALID(ttep));
4978
4979 attr = PROT_READ;
4980
4981 if (TTE_IS_WRITABLE(ttep)) {
4982 attr |= PROT_WRITE;
4983 }
4984 if (TTE_IS_EXECUTABLE(ttep)) {
4985 attr |= PROT_EXEC;
4986 }
4987 if (!TTE_IS_PRIVILEGED(ttep)) {
4988 attr |= PROT_USER;
4989 }
4990 if (TTE_IS_NFO(ttep)) {
4991 attr |= HAT_NOFAULT;
4992 }
4993 if (TTE_IS_NOSYNC(ttep)) {
4994 attr |= HAT_NOSYNC;
4995 }
4996 if (TTE_IS_SIDEFFECT(ttep)) {
4997 attr |= SFMMU_SIDEFFECT;
4998 }
4999 if (!TTE_IS_VCACHEABLE(ttep)) {
5000 attr |= SFMMU_UNCACHEVTTE;
5001 }
5002 if (!TTE_IS_PCACHEABLE(ttep)) {
5003 attr |= SFMMU_UNCACHEPTTE;
5004 }
5005 return (attr);
5006 }
5007
5008 /*
5009 * hat_chgprot is a deprecated hat call. New segment drivers
5010 * should store all attributes and use hat_*attr calls.
5011 *
5012 * Change the protections in the virtual address range
5013 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
5014 * then remove write permission, leaving the other
5015 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions.
5016 *
5017 */
5018 void
5019 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5020 {
5021 struct hmehash_bucket *hmebp;
5022 hmeblk_tag hblktag;
5023 int hmeshift, hashno = 1;
5024 struct hme_blk *hmeblkp, *list = NULL;
5025 caddr_t endaddr;
5026 cpuset_t cpuset;
5027 demap_range_t dmr;
5028
5029 ASSERT((len & MMU_PAGEOFFSET) == 0);
5030 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5031
5032 ASSERT(sfmmup->sfmmu_as != NULL);
5033
5034 CPUSET_ZERO(cpuset);
5035
5036 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5037 ((addr + len) > (caddr_t)USERLIMIT)) {
5038 panic("user addr %p vprot %x in kernel space",
5039 (void *)addr, vprot);
5040 }
5041 endaddr = addr + len;
5042 hblktag.htag_id = sfmmup;
5043 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5044 DEMAP_RANGE_INIT(sfmmup, &dmr);
5045
5046 while (addr < endaddr) {
5047 hmeshift = HME_HASH_SHIFT(hashno);
5048 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5049 hblktag.htag_rehash = hashno;
5050 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5051
5052 SFMMU_HASH_LOCK(hmebp);
5053
5054 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5055 if (hmeblkp != NULL) {
5056 ASSERT(!hmeblkp->hblk_shared);
5057 /*
5058 * We've encountered a shadow hmeblk so skip the range
5059 * of the next smaller mapping size.
5060 */
5061 if (hmeblkp->hblk_shw_bit) {
5062 ASSERT(sfmmup != ksfmmup);
5063 ASSERT(hashno > 1);
5064 addr = (caddr_t)P2END((uintptr_t)addr,
5065 TTEBYTES(hashno - 1));
5066 } else {
5067 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5068 addr, endaddr, &dmr, vprot);
5069 }
5070 SFMMU_HASH_UNLOCK(hmebp);
5071 hashno = 1;
5072 continue;
5073 }
5074 SFMMU_HASH_UNLOCK(hmebp);
5075
5076 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5077 /*
5078 * We have traversed the whole list and rehashed
5079 * if necessary without finding the address to chgprot.
5080 * This is ok so we increment the address by the
5081 * smallest hmeblk range for kernel mappings and the
5082 * largest hmeblk range, to account for shadow hmeblks,
5083 * for user mappings and continue.
5084 */
5085 if (sfmmup == ksfmmup)
5086 addr = (caddr_t)P2END((uintptr_t)addr,
5087 TTEBYTES(1));
5088 else
5089 addr = (caddr_t)P2END((uintptr_t)addr,
5090 TTEBYTES(hashno));
5091 hashno = 1;
5092 } else {
5093 hashno++;
5094 }
5095 }
5096
5097 sfmmu_hblks_list_purge(&list, 0);
5098 DEMAP_RANGE_FLUSH(&dmr);
5099 cpuset = sfmmup->sfmmu_cpusran;
5100 xt_sync(cpuset);
5101 }
5102
5103 /*
5104 * This function chgprots a range of addresses in an hmeblk. It returns the
5105 * next addres that needs to be chgprot.
5106 * It should be called with the hash lock held.
5107 * XXX It shold be possible to optimize chgprot by not flushing every time but
5108 * on the other hand:
5109 * 1. do one flush crosscall.
5110 * 2. only flush if we are increasing permissions (make sure this will work)
5111 */
5112 static caddr_t
5113 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5114 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5115 {
5116 uint_t pprot;
5117 tte_t tte, ttemod;
5118 struct sf_hment *sfhmep;
5119 uint_t tteflags;
5120 int ttesz;
5121 struct page *pp = NULL;
5122 kmutex_t *pml, *pmtx;
5123 int ret;
5124 int use_demap_range;
5125 #if defined(SF_ERRATA_57)
5126 int check_exec;
5127 #endif
5128
5129 ASSERT(in_hblk_range(hmeblkp, addr));
5130 ASSERT(hmeblkp->hblk_shw_bit == 0);
5131 ASSERT(!hmeblkp->hblk_shared);
5132
5133 #ifdef DEBUG
5134 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5135 (endaddr < get_hblk_endaddr(hmeblkp))) {
5136 panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5137 }
5138 #endif /* DEBUG */
5139
5140 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5141 ttesz = get_hblk_ttesz(hmeblkp);
5142
5143 pprot = sfmmu_vtop_prot(vprot, &tteflags);
5144 #if defined(SF_ERRATA_57)
5145 check_exec = (sfmmup != ksfmmup) &&
5146 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5147 ((vprot & PROT_EXEC) == PROT_EXEC);
5148 #endif
5149 HBLKTOHME(sfhmep, hmeblkp, addr);
5150
5151 /*
5152 * Flush the current demap region if addresses have been
5153 * skipped or the page size doesn't match.
5154 */
5155 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5156 if (use_demap_range) {
5157 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5158 } else if (dmrp != NULL) {
5159 DEMAP_RANGE_FLUSH(dmrp);
5160 }
5161
5162 while (addr < endaddr) {
5163 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5164 if (TTE_IS_VALID(&tte)) {
5165 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5166 /*
5167 * if the new protection is the same as old
5168 * continue
5169 */
5170 goto next_addr;
5171 }
5172 pml = NULL;
5173 pp = sfhmep->hme_page;
5174 if (pp) {
5175 pml = sfmmu_mlist_enter(pp);
5176 }
5177 if (pp != sfhmep->hme_page) {
5178 /*
5179 * tte most have been unloaded
5180 * underneath us. Recheck
5181 */
5182 ASSERT(pml);
5183 sfmmu_mlist_exit(pml);
5184 continue;
5185 }
5186
5187 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5188
5189 ttemod = tte;
5190 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5191 #if defined(SF_ERRATA_57)
5192 if (check_exec && addr < errata57_limit)
5193 ttemod.tte_exec_perm = 0;
5194 #endif
5195 ret = sfmmu_modifytte_try(&tte, &ttemod,
5196 &sfhmep->hme_tte);
5197
5198 if (ret < 0) {
5199 /* tte changed underneath us */
5200 if (pml) {
5201 sfmmu_mlist_exit(pml);
5202 }
5203 continue;
5204 }
5205
5206 if (tteflags & TTE_HWWR_INT) {
5207 /*
5208 * need to sync if we are clearing modify bit.
5209 */
5210 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5211 }
5212
5213 if (pp && PP_ISRO(pp)) {
5214 if (pprot & TTE_WRPRM_INT) {
5215 pmtx = sfmmu_page_enter(pp);
5216 PP_CLRRO(pp);
5217 sfmmu_page_exit(pmtx);
5218 }
5219 }
5220
5221 if (ret > 0 && use_demap_range) {
5222 DEMAP_RANGE_MARKPG(dmrp, addr);
5223 } else if (ret > 0) {
5224 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5225 }
5226
5227 if (pml) {
5228 sfmmu_mlist_exit(pml);
5229 }
5230 }
5231 next_addr:
5232 addr += TTEBYTES(ttesz);
5233 sfhmep++;
5234 DEMAP_RANGE_NEXTPG(dmrp);
5235 }
5236 return (addr);
5237 }
5238
5239 /*
5240 * This routine is deprecated and should only be used by hat_chgprot.
5241 * The correct routine is sfmmu_vtop_attr.
5242 * This routine converts virtual page protections to physical ones. It will
5243 * update the tteflags field with the tte mask corresponding to the protections
5244 * affected and it returns the new protections. It will also clear the modify
5245 * bit if we are taking away write permission. This is necessary since the
5246 * modify bit is the hardware permission bit and we need to clear it in order
5247 * to detect write faults.
5248 * It accepts the following special protections:
5249 * ~PROT_WRITE = remove write permissions.
5250 * ~PROT_USER = remove user permissions.
5251 */
5252 static uint_t
5253 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5254 {
5255 if (vprot == (uint_t)~PROT_WRITE) {
5256 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5257 return (0); /* will cause wrprm to be cleared */
5258 }
5259 if (vprot == (uint_t)~PROT_USER) {
5260 *tteflagsp = TTE_PRIV_INT;
5261 return (0); /* will cause privprm to be cleared */
5262 }
5263 if ((vprot == 0) || (vprot == PROT_USER) ||
5264 ((vprot & PROT_ALL) != vprot)) {
5265 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5266 }
5267
5268 switch (vprot) {
5269 case (PROT_READ):
5270 case (PROT_EXEC):
5271 case (PROT_EXEC | PROT_READ):
5272 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5273 return (TTE_PRIV_INT); /* set prv and clr wrt */
5274 case (PROT_WRITE):
5275 case (PROT_WRITE | PROT_READ):
5276 case (PROT_EXEC | PROT_WRITE):
5277 case (PROT_EXEC | PROT_WRITE | PROT_READ):
5278 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5279 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */
5280 case (PROT_USER | PROT_READ):
5281 case (PROT_USER | PROT_EXEC):
5282 case (PROT_USER | PROT_EXEC | PROT_READ):
5283 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5284 return (0); /* clr prv and wrt */
5285 case (PROT_USER | PROT_WRITE):
5286 case (PROT_USER | PROT_WRITE | PROT_READ):
5287 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5288 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5289 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5290 return (TTE_WRPRM_INT); /* clr prv and set wrt */
5291 default:
5292 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5293 }
5294 return (0);
5295 }
5296
5297 /*
5298 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5299 * the normal algorithm would take too long for a very large VA range with
5300 * few real mappings. This routine just walks thru all HMEs in the global
5301 * hash table to find and remove mappings.
5302 */
5303 static void
5304 hat_unload_large_virtual(
5305 struct hat *sfmmup,
5306 caddr_t startaddr,
5307 size_t len,
5308 uint_t flags,
5309 hat_callback_t *callback)
5310 {
5311 struct hmehash_bucket *hmebp;
5312 struct hme_blk *hmeblkp;
5313 struct hme_blk *pr_hblk = NULL;
5314 struct hme_blk *nx_hblk;
5315 struct hme_blk *list = NULL;
5316 int i;
5317 demap_range_t dmr, *dmrp;
5318 cpuset_t cpuset;
5319 caddr_t endaddr = startaddr + len;
5320 caddr_t sa;
5321 caddr_t ea;
5322 caddr_t cb_sa[MAX_CB_ADDR];
5323 caddr_t cb_ea[MAX_CB_ADDR];
5324 int addr_cnt = 0;
5325 int a = 0;
5326
5327 if (sfmmup->sfmmu_free) {
5328 dmrp = NULL;
5329 } else {
5330 dmrp = &dmr;
5331 DEMAP_RANGE_INIT(sfmmup, dmrp);
5332 }
5333
5334 /*
5335 * Loop through all the hash buckets of HME blocks looking for matches.
5336 */
5337 for (i = 0; i <= UHMEHASH_SZ; i++) {
5338 hmebp = &uhme_hash[i];
5339 SFMMU_HASH_LOCK(hmebp);
5340 hmeblkp = hmebp->hmeblkp;
5341 pr_hblk = NULL;
5342 while (hmeblkp) {
5343 nx_hblk = hmeblkp->hblk_next;
5344
5345 /*
5346 * skip if not this context, if a shadow block or
5347 * if the mapping is not in the requested range
5348 */
5349 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5350 hmeblkp->hblk_shw_bit ||
5351 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5352 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5353 pr_hblk = hmeblkp;
5354 goto next_block;
5355 }
5356
5357 ASSERT(!hmeblkp->hblk_shared);
5358 /*
5359 * unload if there are any current valid mappings
5360 */
5361 if (hmeblkp->hblk_vcnt != 0 ||
5362 hmeblkp->hblk_hmecnt != 0)
5363 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5364 sa, ea, dmrp, flags);
5365
5366 /*
5367 * on unmap we also release the HME block itself, once
5368 * all mappings are gone.
5369 */
5370 if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5371 !hmeblkp->hblk_vcnt &&
5372 !hmeblkp->hblk_hmecnt) {
5373 ASSERT(!hmeblkp->hblk_lckcnt);
5374 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5375 &list, 0);
5376 } else {
5377 pr_hblk = hmeblkp;
5378 }
5379
5380 if (callback == NULL)
5381 goto next_block;
5382
5383 /*
5384 * HME blocks may span more than one page, but we may be
5385 * unmapping only one page, so check for a smaller range
5386 * for the callback
5387 */
5388 if (sa < startaddr)
5389 sa = startaddr;
5390 if (--ea > endaddr)
5391 ea = endaddr - 1;
5392
5393 cb_sa[addr_cnt] = sa;
5394 cb_ea[addr_cnt] = ea;
5395 if (++addr_cnt == MAX_CB_ADDR) {
5396 if (dmrp != NULL) {
5397 DEMAP_RANGE_FLUSH(dmrp);
5398 cpuset = sfmmup->sfmmu_cpusran;
5399 xt_sync(cpuset);
5400 }
5401
5402 for (a = 0; a < MAX_CB_ADDR; ++a) {
5403 callback->hcb_start_addr = cb_sa[a];
5404 callback->hcb_end_addr = cb_ea[a];
5405 callback->hcb_function(callback);
5406 }
5407 addr_cnt = 0;
5408 }
5409
5410 next_block:
5411 hmeblkp = nx_hblk;
5412 }
5413 SFMMU_HASH_UNLOCK(hmebp);
5414 }
5415
5416 sfmmu_hblks_list_purge(&list, 0);
5417 if (dmrp != NULL) {
5418 DEMAP_RANGE_FLUSH(dmrp);
5419 cpuset = sfmmup->sfmmu_cpusran;
5420 xt_sync(cpuset);
5421 }
5422
5423 for (a = 0; a < addr_cnt; ++a) {
5424 callback->hcb_start_addr = cb_sa[a];
5425 callback->hcb_end_addr = cb_ea[a];
5426 callback->hcb_function(callback);
5427 }
5428
5429 /*
5430 * Check TSB and TLB page sizes if the process isn't exiting.
5431 */
5432 if (!sfmmup->sfmmu_free)
5433 sfmmu_check_page_sizes(sfmmup, 0);
5434 }
5435
5436 /*
5437 * Unload all the mappings in the range [addr..addr+len). addr and len must
5438 * be MMU_PAGESIZE aligned.
5439 */
5440
5441 extern struct seg *segkmap;
5442 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5443 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5444
5445
5446 void
5447 hat_unload_callback(
5448 struct hat *sfmmup,
5449 caddr_t addr,
5450 size_t len,
5451 uint_t flags,
5452 hat_callback_t *callback)
5453 {
5454 struct hmehash_bucket *hmebp;
5455 hmeblk_tag hblktag;
5456 int hmeshift, hashno, iskernel;
5457 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5458 caddr_t endaddr;
5459 cpuset_t cpuset;
5460 int addr_count = 0;
5461 int a;
5462 caddr_t cb_start_addr[MAX_CB_ADDR];
5463 caddr_t cb_end_addr[MAX_CB_ADDR];
5464 int issegkmap = ISSEGKMAP(sfmmup, addr);
5465 demap_range_t dmr, *dmrp;
5466
5467 ASSERT(sfmmup->sfmmu_as != NULL);
5468
5469 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5470 AS_LOCK_HELD(sfmmup->sfmmu_as));
5471
5472 ASSERT(sfmmup != NULL);
5473 ASSERT((len & MMU_PAGEOFFSET) == 0);
5474 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5475
5476 /*
5477 * Probing through a large VA range (say 63 bits) will be slow, even
5478 * at 4 Meg steps between the probes. So, when the virtual address range
5479 * is very large, search the HME entries for what to unload.
5480 *
5481 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5482 *
5483 * UHMEHASH_SZ is number of hash buckets to examine
5484 *
5485 */
5486 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5487 hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5488 return;
5489 }
5490
5491 CPUSET_ZERO(cpuset);
5492
5493 /*
5494 * If the process is exiting, we can save a lot of fuss since
5495 * we'll flush the TLB when we free the ctx anyway.
5496 */
5497 if (sfmmup->sfmmu_free) {
5498 dmrp = NULL;
5499 } else {
5500 dmrp = &dmr;
5501 DEMAP_RANGE_INIT(sfmmup, dmrp);
5502 }
5503
5504 endaddr = addr + len;
5505 hblktag.htag_id = sfmmup;
5506 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5507
5508 /*
5509 * It is likely for the vm to call unload over a wide range of
5510 * addresses that are actually very sparsely populated by
5511 * translations. In order to speed this up the sfmmu hat supports
5512 * the concept of shadow hmeblks. Dummy large page hmeblks that
5513 * correspond to actual small translations are allocated at tteload
5514 * time and are referred to as shadow hmeblks. Now, during unload
5515 * time, we first check if we have a shadow hmeblk for that
5516 * translation. The absence of one means the corresponding address
5517 * range is empty and can be skipped.
5518 *
5519 * The kernel is an exception to above statement and that is why
5520 * we don't use shadow hmeblks and hash starting from the smallest
5521 * page size.
5522 */
5523 if (sfmmup == KHATID) {
5524 iskernel = 1;
5525 hashno = TTE64K;
5526 } else {
5527 iskernel = 0;
5528 if (mmu_page_sizes == max_mmu_page_sizes) {
5529 hashno = TTE256M;
5530 } else {
5531 hashno = TTE4M;
5532 }
5533 }
5534 while (addr < endaddr) {
5535 hmeshift = HME_HASH_SHIFT(hashno);
5536 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5537 hblktag.htag_rehash = hashno;
5538 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5539
5540 SFMMU_HASH_LOCK(hmebp);
5541
5542 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5543 if (hmeblkp == NULL) {
5544 /*
5545 * didn't find an hmeblk. skip the appropiate
5546 * address range.
5547 */
5548 SFMMU_HASH_UNLOCK(hmebp);
5549 if (iskernel) {
5550 if (hashno < mmu_hashcnt) {
5551 hashno++;
5552 continue;
5553 } else {
5554 hashno = TTE64K;
5555 addr = (caddr_t)roundup((uintptr_t)addr
5556 + 1, MMU_PAGESIZE64K);
5557 continue;
5558 }
5559 }
5560 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5561 (1 << hmeshift));
5562 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5563 ASSERT(hashno == TTE64K);
5564 continue;
5565 }
5566 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5567 hashno = TTE512K;
5568 continue;
5569 }
5570 if (mmu_page_sizes == max_mmu_page_sizes) {
5571 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5572 hashno = TTE4M;
5573 continue;
5574 }
5575 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5576 hashno = TTE32M;
5577 continue;
5578 }
5579 hashno = TTE256M;
5580 continue;
5581 } else {
5582 hashno = TTE4M;
5583 continue;
5584 }
5585 }
5586 ASSERT(hmeblkp);
5587 ASSERT(!hmeblkp->hblk_shared);
5588 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5589 /*
5590 * If the valid count is zero we can skip the range
5591 * mapped by this hmeblk.
5592 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP
5593 * is used by segment drivers as a hint
5594 * that the mapping resource won't be used any longer.
5595 * The best example of this is during exit().
5596 */
5597 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5598 get_hblk_span(hmeblkp));
5599 if ((flags & HAT_UNLOAD_UNMAP) ||
5600 (iskernel && !issegkmap)) {
5601 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5602 &list, 0);
5603 }
5604 SFMMU_HASH_UNLOCK(hmebp);
5605
5606 if (iskernel) {
5607 hashno = TTE64K;
5608 continue;
5609 }
5610 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5611 ASSERT(hashno == TTE64K);
5612 continue;
5613 }
5614 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5615 hashno = TTE512K;
5616 continue;
5617 }
5618 if (mmu_page_sizes == max_mmu_page_sizes) {
5619 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5620 hashno = TTE4M;
5621 continue;
5622 }
5623 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5624 hashno = TTE32M;
5625 continue;
5626 }
5627 hashno = TTE256M;
5628 continue;
5629 } else {
5630 hashno = TTE4M;
5631 continue;
5632 }
5633 }
5634 if (hmeblkp->hblk_shw_bit) {
5635 /*
5636 * If we encounter a shadow hmeblk we know there is
5637 * smaller sized hmeblks mapping the same address space.
5638 * Decrement the hash size and rehash.
5639 */
5640 ASSERT(sfmmup != KHATID);
5641 hashno--;
5642 SFMMU_HASH_UNLOCK(hmebp);
5643 continue;
5644 }
5645
5646 /*
5647 * track callback address ranges.
5648 * only start a new range when it's not contiguous
5649 */
5650 if (callback != NULL) {
5651 if (addr_count > 0 &&
5652 addr == cb_end_addr[addr_count - 1])
5653 --addr_count;
5654 else
5655 cb_start_addr[addr_count] = addr;
5656 }
5657
5658 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5659 dmrp, flags);
5660
5661 if (callback != NULL)
5662 cb_end_addr[addr_count++] = addr;
5663
5664 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5665 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5666 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5667 }
5668 SFMMU_HASH_UNLOCK(hmebp);
5669
5670 /*
5671 * Notify our caller as to exactly which pages
5672 * have been unloaded. We do these in clumps,
5673 * to minimize the number of xt_sync()s that need to occur.
5674 */
5675 if (callback != NULL && addr_count == MAX_CB_ADDR) {
5676 if (dmrp != NULL) {
5677 DEMAP_RANGE_FLUSH(dmrp);
5678 cpuset = sfmmup->sfmmu_cpusran;
5679 xt_sync(cpuset);
5680 }
5681
5682 for (a = 0; a < MAX_CB_ADDR; ++a) {
5683 callback->hcb_start_addr = cb_start_addr[a];
5684 callback->hcb_end_addr = cb_end_addr[a];
5685 callback->hcb_function(callback);
5686 }
5687 addr_count = 0;
5688 }
5689 if (iskernel) {
5690 hashno = TTE64K;
5691 continue;
5692 }
5693 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5694 ASSERT(hashno == TTE64K);
5695 continue;
5696 }
5697 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5698 hashno = TTE512K;
5699 continue;
5700 }
5701 if (mmu_page_sizes == max_mmu_page_sizes) {
5702 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5703 hashno = TTE4M;
5704 continue;
5705 }
5706 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5707 hashno = TTE32M;
5708 continue;
5709 }
5710 hashno = TTE256M;
5711 } else {
5712 hashno = TTE4M;
5713 }
5714 }
5715
5716 sfmmu_hblks_list_purge(&list, 0);
5717 if (dmrp != NULL) {
5718 DEMAP_RANGE_FLUSH(dmrp);
5719 cpuset = sfmmup->sfmmu_cpusran;
5720 xt_sync(cpuset);
5721 }
5722 if (callback && addr_count != 0) {
5723 for (a = 0; a < addr_count; ++a) {
5724 callback->hcb_start_addr = cb_start_addr[a];
5725 callback->hcb_end_addr = cb_end_addr[a];
5726 callback->hcb_function(callback);
5727 }
5728 }
5729
5730 /*
5731 * Check TSB and TLB page sizes if the process isn't exiting.
5732 */
5733 if (!sfmmup->sfmmu_free)
5734 sfmmu_check_page_sizes(sfmmup, 0);
5735 }
5736
5737 /*
5738 * Unload all the mappings in the range [addr..addr+len). addr and len must
5739 * be MMU_PAGESIZE aligned.
5740 */
5741 void
5742 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5743 {
5744 hat_unload_callback(sfmmup, addr, len, flags, NULL);
5745 }
5746
5747
5748 /*
5749 * Find the largest mapping size for this page.
5750 */
5751 int
5752 fnd_mapping_sz(page_t *pp)
5753 {
5754 int sz;
5755 int p_index;
5756
5757 p_index = PP_MAPINDEX(pp);
5758
5759 sz = 0;
5760 p_index >>= 1; /* don't care about 8K bit */
5761 for (; p_index; p_index >>= 1) {
5762 sz++;
5763 }
5764
5765 return (sz);
5766 }
5767
5768 /*
5769 * This function unloads a range of addresses for an hmeblk.
5770 * It returns the next address to be unloaded.
5771 * It should be called with the hash lock held.
5772 */
5773 static caddr_t
5774 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5775 caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5776 {
5777 tte_t tte, ttemod;
5778 struct sf_hment *sfhmep;
5779 int ttesz;
5780 long ttecnt;
5781 page_t *pp;
5782 kmutex_t *pml;
5783 int ret;
5784 int use_demap_range;
5785
5786 ASSERT(in_hblk_range(hmeblkp, addr));
5787 ASSERT(!hmeblkp->hblk_shw_bit);
5788 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
5789 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
5790 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
5791
5792 #ifdef DEBUG
5793 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5794 (endaddr < get_hblk_endaddr(hmeblkp))) {
5795 panic("sfmmu_hblk_unload: partial unload of large page");
5796 }
5797 #endif /* DEBUG */
5798
5799 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5800 ttesz = get_hblk_ttesz(hmeblkp);
5801
5802 use_demap_range = ((dmrp == NULL) ||
5803 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
5804
5805 if (use_demap_range) {
5806 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5807 } else if (dmrp != NULL) {
5808 DEMAP_RANGE_FLUSH(dmrp);
5809 }
5810 ttecnt = 0;
5811 HBLKTOHME(sfhmep, hmeblkp, addr);
5812
5813 while (addr < endaddr) {
5814 pml = NULL;
5815 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5816 if (TTE_IS_VALID(&tte)) {
5817 pp = sfhmep->hme_page;
5818 if (pp != NULL) {
5819 pml = sfmmu_mlist_enter(pp);
5820 }
5821
5822 /*
5823 * Verify if hme still points to 'pp' now that
5824 * we have p_mapping lock.
5825 */
5826 if (sfhmep->hme_page != pp) {
5827 if (pp != NULL && sfhmep->hme_page != NULL) {
5828 ASSERT(pml != NULL);
5829 sfmmu_mlist_exit(pml);
5830 /* Re-start this iteration. */
5831 continue;
5832 }
5833 ASSERT((pp != NULL) &&
5834 (sfhmep->hme_page == NULL));
5835 goto tte_unloaded;
5836 }
5837
5838 /*
5839 * This point on we have both HASH and p_mapping
5840 * lock.
5841 */
5842 ASSERT(pp == sfhmep->hme_page);
5843 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5844
5845 /*
5846 * We need to loop on modify tte because it is
5847 * possible for pagesync to come along and
5848 * change the software bits beneath us.
5849 *
5850 * Page_unload can also invalidate the tte after
5851 * we read tte outside of p_mapping lock.
5852 */
5853 again:
5854 ttemod = tte;
5855
5856 TTE_SET_INVALID(&ttemod);
5857 ret = sfmmu_modifytte_try(&tte, &ttemod,
5858 &sfhmep->hme_tte);
5859
5860 if (ret <= 0) {
5861 if (TTE_IS_VALID(&tte)) {
5862 ASSERT(ret < 0);
5863 goto again;
5864 }
5865 if (pp != NULL) {
5866 panic("sfmmu_hblk_unload: pp = 0x%p "
5867 "tte became invalid under mlist"
5868 " lock = 0x%p", (void *)pp,
5869 (void *)pml);
5870 }
5871 continue;
5872 }
5873
5874 if (!(flags & HAT_UNLOAD_NOSYNC)) {
5875 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5876 }
5877
5878 /*
5879 * Ok- we invalidated the tte. Do the rest of the job.
5880 */
5881 ttecnt++;
5882
5883 if (flags & HAT_UNLOAD_UNLOCK) {
5884 ASSERT(hmeblkp->hblk_lckcnt > 0);
5885 atomic_dec_32(&hmeblkp->hblk_lckcnt);
5886 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
5887 }
5888
5889 /*
5890 * Normally we would need to flush the page
5891 * from the virtual cache at this point in
5892 * order to prevent a potential cache alias
5893 * inconsistency.
5894 * The particular scenario we need to worry
5895 * about is:
5896 * Given: va1 and va2 are two virtual address
5897 * that alias and map the same physical
5898 * address.
5899 * 1. mapping exists from va1 to pa and data
5900 * has been read into the cache.
5901 * 2. unload va1.
5902 * 3. load va2 and modify data using va2.
5903 * 4 unload va2.
5904 * 5. load va1 and reference data. Unless we
5905 * flush the data cache when we unload we will
5906 * get stale data.
5907 * Fortunately, page coloring eliminates the
5908 * above scenario by remembering the color a
5909 * physical page was last or is currently
5910 * mapped to. Now, we delay the flush until
5911 * the loading of translations. Only when the
5912 * new translation is of a different color
5913 * are we forced to flush.
5914 */
5915 if (use_demap_range) {
5916 /*
5917 * Mark this page as needing a demap.
5918 */
5919 DEMAP_RANGE_MARKPG(dmrp, addr);
5920 } else {
5921 ASSERT(sfmmup != NULL);
5922 ASSERT(!hmeblkp->hblk_shared);
5923 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
5924 sfmmup->sfmmu_free, 0);
5925 }
5926
5927 if (pp) {
5928 /*
5929 * Remove the hment from the mapping list
5930 */
5931 ASSERT(hmeblkp->hblk_hmecnt > 0);
5932
5933 /*
5934 * Again, we cannot
5935 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
5936 */
5937 HME_SUB(sfhmep, pp);
5938 membar_stst();
5939 atomic_dec_16(&hmeblkp->hblk_hmecnt);
5940 }
5941
5942 ASSERT(hmeblkp->hblk_vcnt > 0);
5943 atomic_dec_16(&hmeblkp->hblk_vcnt);
5944
5945 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
5946 !hmeblkp->hblk_lckcnt);
5947
5948 #ifdef VAC
5949 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
5950 if (PP_ISTNC(pp)) {
5951 /*
5952 * If page was temporary
5953 * uncached, try to recache
5954 * it. Note that HME_SUB() was
5955 * called above so p_index and
5956 * mlist had been updated.
5957 */
5958 conv_tnc(pp, ttesz);
5959 } else if (pp->p_mapping == NULL) {
5960 ASSERT(kpm_enable);
5961 /*
5962 * Page is marked to be in VAC conflict
5963 * to an existing kpm mapping and/or is
5964 * kpm mapped using only the regular
5965 * pagesize.
5966 */
5967 sfmmu_kpm_hme_unload(pp);
5968 }
5969 }
5970 #endif /* VAC */
5971 } else if ((pp = sfhmep->hme_page) != NULL) {
5972 /*
5973 * TTE is invalid but the hme
5974 * still exists. let pageunload
5975 * complete its job.
5976 */
5977 ASSERT(pml == NULL);
5978 pml = sfmmu_mlist_enter(pp);
5979 if (sfhmep->hme_page != NULL) {
5980 sfmmu_mlist_exit(pml);
5981 continue;
5982 }
5983 ASSERT(sfhmep->hme_page == NULL);
5984 } else if (hmeblkp->hblk_hmecnt != 0) {
5985 /*
5986 * pageunload may have not finished decrementing
5987 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
5988 * wait for pageunload to finish. Rely on pageunload
5989 * to decrement hblk_hmecnt after hblk_vcnt.
5990 */
5991 pfn_t pfn = TTE_TO_TTEPFN(&tte);
5992 ASSERT(pml == NULL);
5993 if (pf_is_memory(pfn)) {
5994 pp = page_numtopp_nolock(pfn);
5995 if (pp != NULL) {
5996 pml = sfmmu_mlist_enter(pp);
5997 sfmmu_mlist_exit(pml);
5998 pml = NULL;
5999 }
6000 }
6001 }
6002
6003 tte_unloaded:
6004 /*
6005 * At this point, the tte we are looking at
6006 * should be unloaded, and hme has been unlinked
6007 * from page too. This is important because in
6008 * pageunload, it does ttesync() then HME_SUB.
6009 * We need to make sure HME_SUB has been completed
6010 * so we know ttesync() has been completed. Otherwise,
6011 * at exit time, after return from hat layer, VM will
6012 * release as structure which hat_setstat() (called
6013 * by ttesync()) needs.
6014 */
6015 #ifdef DEBUG
6016 {
6017 tte_t dtte;
6018
6019 ASSERT(sfhmep->hme_page == NULL);
6020
6021 sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6022 ASSERT(!TTE_IS_VALID(&dtte));
6023 }
6024 #endif
6025
6026 if (pml) {
6027 sfmmu_mlist_exit(pml);
6028 }
6029
6030 addr += TTEBYTES(ttesz);
6031 sfhmep++;
6032 DEMAP_RANGE_NEXTPG(dmrp);
6033 }
6034 /*
6035 * For shared hmeblks this routine is only called when region is freed
6036 * and no longer referenced. So no need to decrement ttecnt
6037 * in the region structure here.
6038 */
6039 if (ttecnt > 0 && sfmmup != NULL) {
6040 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6041 }
6042 return (addr);
6043 }
6044
6045 /*
6046 * Invalidate a virtual address range for the local CPU.
6047 * For best performance ensure that the va range is completely
6048 * mapped, otherwise the entire TLB will be flushed.
6049 */
6050 void
6051 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6052 {
6053 ssize_t sz;
6054 caddr_t endva = va + size;
6055
6056 while (va < endva) {
6057 sz = hat_getpagesize(sfmmup, va);
6058 if (sz < 0) {
6059 vtag_flushall();
6060 break;
6061 }
6062 vtag_flushpage(va, (uint64_t)sfmmup);
6063 va += sz;
6064 }
6065 }
6066
6067 /*
6068 * Synchronize all the mappings in the range [addr..addr+len).
6069 * Can be called with clearflag having two states:
6070 * HAT_SYNC_DONTZERO means just return the rm stats
6071 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6072 */
6073 void
6074 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6075 {
6076 struct hmehash_bucket *hmebp;
6077 hmeblk_tag hblktag;
6078 int hmeshift, hashno = 1;
6079 struct hme_blk *hmeblkp, *list = NULL;
6080 caddr_t endaddr;
6081 cpuset_t cpuset;
6082
6083 ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
6084 ASSERT((len & MMU_PAGEOFFSET) == 0);
6085 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6086 (clearflag == HAT_SYNC_ZERORM));
6087
6088 CPUSET_ZERO(cpuset);
6089
6090 endaddr = addr + len;
6091 hblktag.htag_id = sfmmup;
6092 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6093
6094 /*
6095 * Spitfire supports 4 page sizes.
6096 * Most pages are expected to be of the smallest page
6097 * size (8K) and these will not need to be rehashed. 64K
6098 * pages also don't need to be rehashed because the an hmeblk
6099 * spans 64K of address space. 512K pages might need 1 rehash and
6100 * and 4M pages 2 rehashes.
6101 */
6102 while (addr < endaddr) {
6103 hmeshift = HME_HASH_SHIFT(hashno);
6104 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6105 hblktag.htag_rehash = hashno;
6106 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6107
6108 SFMMU_HASH_LOCK(hmebp);
6109
6110 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6111 if (hmeblkp != NULL) {
6112 ASSERT(!hmeblkp->hblk_shared);
6113 /*
6114 * We've encountered a shadow hmeblk so skip the range
6115 * of the next smaller mapping size.
6116 */
6117 if (hmeblkp->hblk_shw_bit) {
6118 ASSERT(sfmmup != ksfmmup);
6119 ASSERT(hashno > 1);
6120 addr = (caddr_t)P2END((uintptr_t)addr,
6121 TTEBYTES(hashno - 1));
6122 } else {
6123 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6124 addr, endaddr, clearflag);
6125 }
6126 SFMMU_HASH_UNLOCK(hmebp);
6127 hashno = 1;
6128 continue;
6129 }
6130 SFMMU_HASH_UNLOCK(hmebp);
6131
6132 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6133 /*
6134 * We have traversed the whole list and rehashed
6135 * if necessary without finding the address to sync.
6136 * This is ok so we increment the address by the
6137 * smallest hmeblk range for kernel mappings and the
6138 * largest hmeblk range, to account for shadow hmeblks,
6139 * for user mappings and continue.
6140 */
6141 if (sfmmup == ksfmmup)
6142 addr = (caddr_t)P2END((uintptr_t)addr,
6143 TTEBYTES(1));
6144 else
6145 addr = (caddr_t)P2END((uintptr_t)addr,
6146 TTEBYTES(hashno));
6147 hashno = 1;
6148 } else {
6149 hashno++;
6150 }
6151 }
6152 sfmmu_hblks_list_purge(&list, 0);
6153 cpuset = sfmmup->sfmmu_cpusran;
6154 xt_sync(cpuset);
6155 }
6156
6157 static caddr_t
6158 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6159 caddr_t endaddr, int clearflag)
6160 {
6161 tte_t tte, ttemod;
6162 struct sf_hment *sfhmep;
6163 int ttesz;
6164 struct page *pp;
6165 kmutex_t *pml;
6166 int ret;
6167
6168 ASSERT(hmeblkp->hblk_shw_bit == 0);
6169 ASSERT(!hmeblkp->hblk_shared);
6170
6171 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6172
6173 ttesz = get_hblk_ttesz(hmeblkp);
6174 HBLKTOHME(sfhmep, hmeblkp, addr);
6175
6176 while (addr < endaddr) {
6177 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6178 if (TTE_IS_VALID(&tte)) {
6179 pml = NULL;
6180 pp = sfhmep->hme_page;
6181 if (pp) {
6182 pml = sfmmu_mlist_enter(pp);
6183 }
6184 if (pp != sfhmep->hme_page) {
6185 /*
6186 * tte most have been unloaded
6187 * underneath us. Recheck
6188 */
6189 ASSERT(pml);
6190 sfmmu_mlist_exit(pml);
6191 continue;
6192 }
6193
6194 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6195
6196 if (clearflag == HAT_SYNC_ZERORM) {
6197 ttemod = tte;
6198 TTE_CLR_RM(&ttemod);
6199 ret = sfmmu_modifytte_try(&tte, &ttemod,
6200 &sfhmep->hme_tte);
6201 if (ret < 0) {
6202 if (pml) {
6203 sfmmu_mlist_exit(pml);
6204 }
6205 continue;
6206 }
6207
6208 if (ret > 0) {
6209 sfmmu_tlb_demap(addr, sfmmup,
6210 hmeblkp, 0, 0);
6211 }
6212 }
6213 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6214 if (pml) {
6215 sfmmu_mlist_exit(pml);
6216 }
6217 }
6218 addr += TTEBYTES(ttesz);
6219 sfhmep++;
6220 }
6221 return (addr);
6222 }
6223
6224 /*
6225 * This function will sync a tte to the page struct and it will
6226 * update the hat stats. Currently it allows us to pass a NULL pp
6227 * and we will simply update the stats. We may want to change this
6228 * so we only keep stats for pages backed by pp's.
6229 */
6230 static void
6231 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6232 {
6233 uint_t rm = 0;
6234 int sz;
6235 pgcnt_t npgs;
6236
6237 ASSERT(TTE_IS_VALID(ttep));
6238
6239 if (TTE_IS_NOSYNC(ttep)) {
6240 return;
6241 }
6242
6243 if (TTE_IS_REF(ttep)) {
6244 rm = P_REF;
6245 }
6246 if (TTE_IS_MOD(ttep)) {
6247 rm |= P_MOD;
6248 }
6249
6250 if (rm == 0) {
6251 return;
6252 }
6253
6254 sz = TTE_CSZ(ttep);
6255 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6256 int i;
6257 caddr_t vaddr = addr;
6258
6259 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6260 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6261 }
6262
6263 }
6264
6265 /*
6266 * XXX I want to use cas to update nrm bits but they
6267 * currently belong in common/vm and not in hat where
6268 * they should be.
6269 * The nrm bits are protected by the same mutex as
6270 * the one that protects the page's mapping list.
6271 */
6272 if (!pp)
6273 return;
6274 ASSERT(sfmmu_mlist_held(pp));
6275 /*
6276 * If the tte is for a large page, we need to sync all the
6277 * pages covered by the tte.
6278 */
6279 if (sz != TTE8K) {
6280 ASSERT(pp->p_szc != 0);
6281 pp = PP_GROUPLEADER(pp, sz);
6282 ASSERT(sfmmu_mlist_held(pp));
6283 }
6284
6285 /* Get number of pages from tte size. */
6286 npgs = TTEPAGES(sz);
6287
6288 do {
6289 ASSERT(pp);
6290 ASSERT(sfmmu_mlist_held(pp));
6291 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6292 ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6293 hat_page_setattr(pp, rm);
6294
6295 /*
6296 * Are we done? If not, we must have a large mapping.
6297 * For large mappings we need to sync the rest of the pages
6298 * covered by this tte; goto the next page.
6299 */
6300 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6301 }
6302
6303 /*
6304 * Execute pre-callback handler of each pa_hment linked to pp
6305 *
6306 * Inputs:
6307 * flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6308 * capture_cpus: pointer to return value (below)
6309 *
6310 * Returns:
6311 * Propagates the subsystem callback return values back to the caller;
6312 * returns 0 on success. If capture_cpus is non-NULL, the value returned
6313 * is zero if all of the pa_hments are of a type that do not require
6314 * capturing CPUs prior to suspending the mapping, else it is 1.
6315 */
6316 static int
6317 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6318 {
6319 struct sf_hment *sfhmep;
6320 struct pa_hment *pahmep;
6321 int (*f)(caddr_t, uint_t, uint_t, void *);
6322 int ret;
6323 id_t id;
6324 int locked = 0;
6325 kmutex_t *pml;
6326
6327 ASSERT(PAGE_EXCL(pp));
6328 if (!sfmmu_mlist_held(pp)) {
6329 pml = sfmmu_mlist_enter(pp);
6330 locked = 1;
6331 }
6332
6333 if (capture_cpus)
6334 *capture_cpus = 0;
6335
6336 top:
6337 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6338 /*
6339 * skip sf_hments corresponding to VA<->PA mappings;
6340 * for pa_hment's, hme_tte.ll is zero
6341 */
6342 if (!IS_PAHME(sfhmep))
6343 continue;
6344
6345 pahmep = sfhmep->hme_data;
6346 ASSERT(pahmep != NULL);
6347
6348 /*
6349 * skip if pre-handler has been called earlier in this loop
6350 */
6351 if (pahmep->flags & flag)
6352 continue;
6353
6354 id = pahmep->cb_id;
6355 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6356 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6357 *capture_cpus = 1;
6358 if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6359 pahmep->flags |= flag;
6360 continue;
6361 }
6362
6363 /*
6364 * Drop the mapping list lock to avoid locking order issues.
6365 */
6366 if (locked)
6367 sfmmu_mlist_exit(pml);
6368
6369 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6370 if (ret != 0)
6371 return (ret); /* caller must do the cleanup */
6372
6373 if (locked) {
6374 pml = sfmmu_mlist_enter(pp);
6375 pahmep->flags |= flag;
6376 goto top;
6377 }
6378
6379 pahmep->flags |= flag;
6380 }
6381
6382 if (locked)
6383 sfmmu_mlist_exit(pml);
6384
6385 return (0);
6386 }
6387
6388 /*
6389 * Execute post-callback handler of each pa_hment linked to pp
6390 *
6391 * Same overall assumptions and restrictions apply as for
6392 * hat_pageprocess_precallbacks().
6393 */
6394 static void
6395 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6396 {
6397 pfn_t pgpfn = pp->p_pagenum;
6398 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6399 pfn_t newpfn;
6400 struct sf_hment *sfhmep;
6401 struct pa_hment *pahmep;
6402 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6403 id_t id;
6404 int locked = 0;
6405 kmutex_t *pml;
6406
6407 ASSERT(PAGE_EXCL(pp));
6408 if (!sfmmu_mlist_held(pp)) {
6409 pml = sfmmu_mlist_enter(pp);
6410 locked = 1;
6411 }
6412
6413 top:
6414 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6415 /*
6416 * skip sf_hments corresponding to VA<->PA mappings;
6417 * for pa_hment's, hme_tte.ll is zero
6418 */
6419 if (!IS_PAHME(sfhmep))
6420 continue;
6421
6422 pahmep = sfhmep->hme_data;
6423 ASSERT(pahmep != NULL);
6424
6425 if ((pahmep->flags & flag) == 0)
6426 continue;
6427
6428 pahmep->flags &= ~flag;
6429
6430 id = pahmep->cb_id;
6431 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6432 if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6433 continue;
6434
6435 /*
6436 * Convert the base page PFN into the constituent PFN
6437 * which is needed by the callback handler.
6438 */
6439 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6440
6441 /*
6442 * Drop the mapping list lock to avoid locking order issues.
6443 */
6444 if (locked)
6445 sfmmu_mlist_exit(pml);
6446
6447 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6448 != 0)
6449 panic("sfmmu: posthandler failed");
6450
6451 if (locked) {
6452 pml = sfmmu_mlist_enter(pp);
6453 goto top;
6454 }
6455 }
6456
6457 if (locked)
6458 sfmmu_mlist_exit(pml);
6459 }
6460
6461 /*
6462 * Suspend locked kernel mapping
6463 */
6464 void
6465 hat_pagesuspend(struct page *pp)
6466 {
6467 struct sf_hment *sfhmep;
6468 sfmmu_t *sfmmup;
6469 tte_t tte, ttemod;
6470 struct hme_blk *hmeblkp;
6471 caddr_t addr;
6472 int index, cons;
6473 cpuset_t cpuset;
6474
6475 ASSERT(PAGE_EXCL(pp));
6476 ASSERT(sfmmu_mlist_held(pp));
6477
6478 mutex_enter(&kpr_suspendlock);
6479
6480 /*
6481 * We're about to suspend a kernel mapping so mark this thread as
6482 * non-traceable by DTrace. This prevents us from running into issues
6483 * with probe context trying to touch a suspended page
6484 * in the relocation codepath itself.
6485 */
6486 curthread->t_flag |= T_DONTDTRACE;
6487
6488 index = PP_MAPINDEX(pp);
6489 cons = TTE8K;
6490
6491 retry:
6492 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6493
6494 if (IS_PAHME(sfhmep))
6495 continue;
6496
6497 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6498 continue;
6499
6500 /*
6501 * Loop until we successfully set the suspend bit in
6502 * the TTE.
6503 */
6504 again:
6505 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6506 ASSERT(TTE_IS_VALID(&tte));
6507
6508 ttemod = tte;
6509 TTE_SET_SUSPEND(&ttemod);
6510 if (sfmmu_modifytte_try(&tte, &ttemod,
6511 &sfhmep->hme_tte) < 0)
6512 goto again;
6513
6514 /*
6515 * Invalidate TSB entry
6516 */
6517 hmeblkp = sfmmu_hmetohblk(sfhmep);
6518
6519 sfmmup = hblktosfmmu(hmeblkp);
6520 ASSERT(sfmmup == ksfmmup);
6521 ASSERT(!hmeblkp->hblk_shared);
6522
6523 addr = tte_to_vaddr(hmeblkp, tte);
6524
6525 /*
6526 * No need to make sure that the TSB for this sfmmu is
6527 * not being relocated since it is ksfmmup and thus it
6528 * will never be relocated.
6529 */
6530 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6531
6532 /*
6533 * Update xcall stats
6534 */
6535 cpuset = cpu_ready_set;
6536 CPUSET_DEL(cpuset, CPU->cpu_id);
6537
6538 /* LINTED: constant in conditional context */
6539 SFMMU_XCALL_STATS(ksfmmup);
6540
6541 /*
6542 * Flush TLB entry on remote CPU's
6543 */
6544 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6545 (uint64_t)ksfmmup);
6546 xt_sync(cpuset);
6547
6548 /*
6549 * Flush TLB entry on local CPU
6550 */
6551 vtag_flushpage(addr, (uint64_t)ksfmmup);
6552 }
6553
6554 while (index != 0) {
6555 index = index >> 1;
6556 if (index != 0)
6557 cons++;
6558 if (index & 0x1) {
6559 pp = PP_GROUPLEADER(pp, cons);
6560 goto retry;
6561 }
6562 }
6563 }
6564
6565 #ifdef DEBUG
6566
6567 #define N_PRLE 1024
6568 struct prle {
6569 page_t *targ;
6570 page_t *repl;
6571 int status;
6572 int pausecpus;
6573 hrtime_t whence;
6574 };
6575
6576 static struct prle page_relocate_log[N_PRLE];
6577 static int prl_entry;
6578 static kmutex_t prl_mutex;
6579
6580 #define PAGE_RELOCATE_LOG(t, r, s, p) \
6581 mutex_enter(&prl_mutex); \
6582 page_relocate_log[prl_entry].targ = *(t); \
6583 page_relocate_log[prl_entry].repl = *(r); \
6584 page_relocate_log[prl_entry].status = (s); \
6585 page_relocate_log[prl_entry].pausecpus = (p); \
6586 page_relocate_log[prl_entry].whence = gethrtime(); \
6587 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \
6588 mutex_exit(&prl_mutex);
6589
6590 #else /* !DEBUG */
6591 #define PAGE_RELOCATE_LOG(t, r, s, p)
6592 #endif
6593
6594 /*
6595 * Core Kernel Page Relocation Algorithm
6596 *
6597 * Input:
6598 *
6599 * target : constituent pages are SE_EXCL locked.
6600 * replacement: constituent pages are SE_EXCL locked.
6601 *
6602 * Output:
6603 *
6604 * nrelocp: number of pages relocated
6605 */
6606 int
6607 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6608 {
6609 page_t *targ, *repl;
6610 page_t *tpp, *rpp;
6611 kmutex_t *low, *high;
6612 spgcnt_t npages, i;
6613 page_t *pl = NULL;
6614 int old_pil;
6615 cpuset_t cpuset;
6616 int cap_cpus;
6617 int ret;
6618 #ifdef VAC
6619 int cflags = 0;
6620 #endif
6621
6622 if (!kcage_on || PP_ISNORELOC(*target)) {
6623 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6624 return (EAGAIN);
6625 }
6626
6627 mutex_enter(&kpr_mutex);
6628 kreloc_thread = curthread;
6629
6630 targ = *target;
6631 repl = *replacement;
6632 ASSERT(repl != NULL);
6633 ASSERT(targ->p_szc == repl->p_szc);
6634
6635 npages = page_get_pagecnt(targ->p_szc);
6636
6637 /*
6638 * unload VA<->PA mappings that are not locked
6639 */
6640 tpp = targ;
6641 for (i = 0; i < npages; i++) {
6642 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6643 tpp++;
6644 }
6645
6646 /*
6647 * Do "presuspend" callbacks, in a context from which we can still
6648 * block as needed. Note that we don't hold the mapping list lock
6649 * of "targ" at this point due to potential locking order issues;
6650 * we assume that between the hat_pageunload() above and holding
6651 * the SE_EXCL lock that the mapping list *cannot* change at this
6652 * point.
6653 */
6654 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6655 if (ret != 0) {
6656 /*
6657 * EIO translates to fatal error, for all others cleanup
6658 * and return EAGAIN.
6659 */
6660 ASSERT(ret != EIO);
6661 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6662 PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6663 kreloc_thread = NULL;
6664 mutex_exit(&kpr_mutex);
6665 return (EAGAIN);
6666 }
6667
6668 /*
6669 * acquire p_mapping list lock for both the target and replacement
6670 * root pages.
6671 *
6672 * low and high refer to the need to grab the mlist locks in a
6673 * specific order in order to prevent race conditions. Thus the
6674 * lower lock must be grabbed before the higher lock.
6675 *
6676 * This will block hat_unload's accessing p_mapping list. Since
6677 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6678 * blocked. Thus, no one else will be accessing the p_mapping list
6679 * while we suspend and reload the locked mapping below.
6680 */
6681 tpp = targ;
6682 rpp = repl;
6683 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6684
6685 kpreempt_disable();
6686
6687 /*
6688 * We raise our PIL to 13 so that we don't get captured by
6689 * another CPU or pinned by an interrupt thread. We can't go to
6690 * PIL 14 since the nexus driver(s) may need to interrupt at
6691 * that level in the case of IOMMU pseudo mappings.
6692 */
6693 cpuset = cpu_ready_set;
6694 CPUSET_DEL(cpuset, CPU->cpu_id);
6695 if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6696 old_pil = splr(XCALL_PIL);
6697 } else {
6698 old_pil = -1;
6699 xc_attention(cpuset);
6700 }
6701 ASSERT(getpil() == XCALL_PIL);
6702
6703 /*
6704 * Now do suspend callbacks. In the case of an IOMMU mapping
6705 * this will suspend all DMA activity to the page while it is
6706 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6707 * may be captured at this point we should have acquired any needed
6708 * locks in the presuspend callback.
6709 */
6710 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6711 if (ret != 0) {
6712 repl = targ;
6713 goto suspend_fail;
6714 }
6715
6716 /*
6717 * Raise the PIL yet again, this time to block all high-level
6718 * interrupts on this CPU. This is necessary to prevent an
6719 * interrupt routine from pinning the thread which holds the
6720 * mapping suspended and then touching the suspended page.
6721 *
6722 * Once the page is suspended we also need to be careful to
6723 * avoid calling any functions which touch any seg_kmem memory
6724 * since that memory may be backed by the very page we are
6725 * relocating in here!
6726 */
6727 hat_pagesuspend(targ);
6728
6729 /*
6730 * Now that we are confident everybody has stopped using this page,
6731 * copy the page contents. Note we use a physical copy to prevent
6732 * locking issues and to avoid fpRAS because we can't handle it in
6733 * this context.
6734 */
6735 for (i = 0; i < npages; i++, tpp++, rpp++) {
6736 #ifdef VAC
6737 /*
6738 * If the replacement has a different vcolor than
6739 * the one being replacd, we need to handle VAC
6740 * consistency for it just as we were setting up
6741 * a new mapping to it.
6742 */
6743 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6744 (tpp->p_vcolor != rpp->p_vcolor) &&
6745 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6746 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
6747 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
6748 rpp->p_pagenum);
6749 }
6750 #endif
6751 /*
6752 * Copy the contents of the page.
6753 */
6754 ppcopy_kernel(tpp, rpp);
6755 }
6756
6757 tpp = targ;
6758 rpp = repl;
6759 for (i = 0; i < npages; i++, tpp++, rpp++) {
6760 /*
6761 * Copy attributes. VAC consistency was handled above,
6762 * if required.
6763 */
6764 rpp->p_nrm = tpp->p_nrm;
6765 tpp->p_nrm = 0;
6766 rpp->p_index = tpp->p_index;
6767 tpp->p_index = 0;
6768 #ifdef VAC
6769 rpp->p_vcolor = tpp->p_vcolor;
6770 #endif
6771 }
6772
6773 /*
6774 * First, unsuspend the page, if we set the suspend bit, and transfer
6775 * the mapping list from the target page to the replacement page.
6776 * Next process postcallbacks; since pa_hment's are linked only to the
6777 * p_mapping list of root page, we don't iterate over the constituent
6778 * pages.
6779 */
6780 hat_pagereload(targ, repl);
6781
6782 suspend_fail:
6783 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
6784
6785 /*
6786 * Now lower our PIL and release any captured CPUs since we
6787 * are out of the "danger zone". After this it will again be
6788 * safe to acquire adaptive mutex locks, or to drop them...
6789 */
6790 if (old_pil != -1) {
6791 splx(old_pil);
6792 } else {
6793 xc_dismissed(cpuset);
6794 }
6795
6796 kpreempt_enable();
6797
6798 sfmmu_mlist_reloc_exit(low, high);
6799
6800 /*
6801 * Postsuspend callbacks should drop any locks held across
6802 * the suspend callbacks. As before, we don't hold the mapping
6803 * list lock at this point.. our assumption is that the mapping
6804 * list still can't change due to our holding SE_EXCL lock and
6805 * there being no unlocked mappings left. Hence the restriction
6806 * on calling context to hat_delete_callback()
6807 */
6808 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
6809 if (ret != 0) {
6810 /*
6811 * The second presuspend call failed: we got here through
6812 * the suspend_fail label above.
6813 */
6814 ASSERT(ret != EIO);
6815 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
6816 kreloc_thread = NULL;
6817 mutex_exit(&kpr_mutex);
6818 return (EAGAIN);
6819 }
6820
6821 /*
6822 * Now that we're out of the performance critical section we can
6823 * take care of updating the hash table, since we still
6824 * hold all the pages locked SE_EXCL at this point we
6825 * needn't worry about things changing out from under us.
6826 */
6827 tpp = targ;
6828 rpp = repl;
6829 for (i = 0; i < npages; i++, tpp++, rpp++) {
6830
6831 /*
6832 * replace targ with replacement in page_hash table
6833 */
6834 targ = tpp;
6835 page_relocate_hash(rpp, targ);
6836
6837 /*
6838 * concatenate target; caller of platform_page_relocate()
6839 * expects target to be concatenated after returning.
6840 */
6841 ASSERT(targ->p_next == targ);
6842 ASSERT(targ->p_prev == targ);
6843 page_list_concat(&pl, &targ);
6844 }
6845
6846 ASSERT(*target == pl);
6847 *nrelocp = npages;
6848 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
6849 kreloc_thread = NULL;
6850 mutex_exit(&kpr_mutex);
6851 return (0);
6852 }
6853
6854 /*
6855 * Called when stray pa_hments are found attached to a page which is
6856 * being freed. Notify the subsystem which attached the pa_hment of
6857 * the error if it registered a suitable handler, else panic.
6858 */
6859 static void
6860 sfmmu_pahment_leaked(struct pa_hment *pahmep)
6861 {
6862 id_t cb_id = pahmep->cb_id;
6863
6864 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
6865 if (sfmmu_cb_table[cb_id].errhandler != NULL) {
6866 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
6867 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
6868 return; /* non-fatal */
6869 }
6870 panic("pa_hment leaked: 0x%p", (void *)pahmep);
6871 }
6872
6873 /*
6874 * Remove all mappings to page 'pp'.
6875 */
6876 int
6877 hat_pageunload(struct page *pp, uint_t forceflag)
6878 {
6879 struct page *origpp = pp;
6880 struct sf_hment *sfhme, *tmphme;
6881 struct hme_blk *hmeblkp;
6882 kmutex_t *pml;
6883 #ifdef VAC
6884 kmutex_t *pmtx;
6885 #endif
6886 cpuset_t cpuset, tset;
6887 int index, cons;
6888 int pa_hments;
6889
6890 ASSERT(PAGE_EXCL(pp));
6891
6892 tmphme = NULL;
6893 pa_hments = 0;
6894 CPUSET_ZERO(cpuset);
6895
6896 pml = sfmmu_mlist_enter(pp);
6897
6898 #ifdef VAC
6899 if (pp->p_kpmref)
6900 sfmmu_kpm_pageunload(pp);
6901 ASSERT(!PP_ISMAPPED_KPM(pp));
6902 #endif
6903 /*
6904 * Clear vpm reference. Since the page is exclusively locked
6905 * vpm cannot be referencing it.
6906 */
6907 if (vpm_enable) {
6908 pp->p_vpmref = 0;
6909 }
6910
6911 index = PP_MAPINDEX(pp);
6912 cons = TTE8K;
6913 retry:
6914 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6915 tmphme = sfhme->hme_next;
6916
6917 if (IS_PAHME(sfhme)) {
6918 ASSERT(sfhme->hme_data != NULL);
6919 pa_hments++;
6920 continue;
6921 }
6922
6923 hmeblkp = sfmmu_hmetohblk(sfhme);
6924
6925 /*
6926 * If there are kernel mappings don't unload them, they will
6927 * be suspended.
6928 */
6929 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
6930 hmeblkp->hblk_tag.htag_id == ksfmmup)
6931 continue;
6932
6933 tset = sfmmu_pageunload(pp, sfhme, cons);
6934 CPUSET_OR(cpuset, tset);
6935 }
6936
6937 while (index != 0) {
6938 index = index >> 1;
6939 if (index != 0)
6940 cons++;
6941 if (index & 0x1) {
6942 /* Go to leading page */
6943 pp = PP_GROUPLEADER(pp, cons);
6944 ASSERT(sfmmu_mlist_held(pp));
6945 goto retry;
6946 }
6947 }
6948
6949 /*
6950 * cpuset may be empty if the page was only mapped by segkpm,
6951 * in which case we won't actually cross-trap.
6952 */
6953 xt_sync(cpuset);
6954
6955 /*
6956 * The page should have no mappings at this point, unless
6957 * we were called from hat_page_relocate() in which case we
6958 * leave the locked mappings which will be suspended later.
6959 */
6960 ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
6961 (forceflag == SFMMU_KERNEL_RELOC));
6962
6963 #ifdef VAC
6964 if (PP_ISTNC(pp)) {
6965 if (cons == TTE8K) {
6966 pmtx = sfmmu_page_enter(pp);
6967 PP_CLRTNC(pp);
6968 sfmmu_page_exit(pmtx);
6969 } else {
6970 conv_tnc(pp, cons);
6971 }
6972 }
6973 #endif /* VAC */
6974
6975 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
6976 /*
6977 * Unlink any pa_hments and free them, calling back
6978 * the responsible subsystem to notify it of the error.
6979 * This can occur in situations such as drivers leaking
6980 * DMA handles: naughty, but common enough that we'd like
6981 * to keep the system running rather than bringing it
6982 * down with an obscure error like "pa_hment leaked"
6983 * which doesn't aid the user in debugging their driver.
6984 */
6985 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
6986 tmphme = sfhme->hme_next;
6987 if (IS_PAHME(sfhme)) {
6988 struct pa_hment *pahmep = sfhme->hme_data;
6989 sfmmu_pahment_leaked(pahmep);
6990 HME_SUB(sfhme, pp);
6991 kmem_cache_free(pa_hment_cache, pahmep);
6992 }
6993 }
6994
6995 ASSERT(!PP_ISMAPPED(origpp));
6996 }
6997
6998 sfmmu_mlist_exit(pml);
6999
7000 return (0);
7001 }
7002
7003 cpuset_t
7004 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7005 {
7006 struct hme_blk *hmeblkp;
7007 sfmmu_t *sfmmup;
7008 tte_t tte, ttemod;
7009 #ifdef DEBUG
7010 tte_t orig_old;
7011 #endif /* DEBUG */
7012 caddr_t addr;
7013 int ttesz;
7014 int ret;
7015 cpuset_t cpuset;
7016
7017 ASSERT(pp != NULL);
7018 ASSERT(sfmmu_mlist_held(pp));
7019 ASSERT(!PP_ISKAS(pp));
7020
7021 CPUSET_ZERO(cpuset);
7022
7023 hmeblkp = sfmmu_hmetohblk(sfhme);
7024
7025 readtte:
7026 sfmmu_copytte(&sfhme->hme_tte, &tte);
7027 if (TTE_IS_VALID(&tte)) {
7028 sfmmup = hblktosfmmu(hmeblkp);
7029 ttesz = get_hblk_ttesz(hmeblkp);
7030 /*
7031 * Only unload mappings of 'cons' size.
7032 */
7033 if (ttesz != cons)
7034 return (cpuset);
7035
7036 /*
7037 * Note that we have p_mapping lock, but no hash lock here.
7038 * hblk_unload() has to have both hash lock AND p_mapping
7039 * lock before it tries to modify tte. So, the tte could
7040 * not become invalid in the sfmmu_modifytte_try() below.
7041 */
7042 ttemod = tte;
7043 #ifdef DEBUG
7044 orig_old = tte;
7045 #endif /* DEBUG */
7046
7047 TTE_SET_INVALID(&ttemod);
7048 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7049 if (ret < 0) {
7050 #ifdef DEBUG
7051 /* only R/M bits can change. */
7052 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7053 #endif /* DEBUG */
7054 goto readtte;
7055 }
7056
7057 if (ret == 0) {
7058 panic("pageunload: cas failed?");
7059 }
7060
7061 addr = tte_to_vaddr(hmeblkp, tte);
7062
7063 if (hmeblkp->hblk_shared) {
7064 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7065 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7066 sf_region_t *rgnp;
7067 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7068 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7069 ASSERT(srdp != NULL);
7070 rgnp = srdp->srd_hmergnp[rid];
7071 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7072 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7073 sfmmu_ttesync(NULL, addr, &tte, pp);
7074 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7075 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7076 } else {
7077 sfmmu_ttesync(sfmmup, addr, &tte, pp);
7078 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7079
7080 /*
7081 * We need to flush the page from the virtual cache
7082 * in order to prevent a virtual cache alias
7083 * inconsistency. The particular scenario we need
7084 * to worry about is:
7085 * Given: va1 and va2 are two virtual address that
7086 * alias and will map the same physical address.
7087 * 1. mapping exists from va1 to pa and data has
7088 * been read into the cache.
7089 * 2. unload va1.
7090 * 3. load va2 and modify data using va2.
7091 * 4 unload va2.
7092 * 5. load va1 and reference data. Unless we flush
7093 * the data cache when we unload we will get
7094 * stale data.
7095 * This scenario is taken care of by using virtual
7096 * page coloring.
7097 */
7098 if (sfmmup->sfmmu_ismhat) {
7099 /*
7100 * Flush TSBs, TLBs and caches
7101 * of every process
7102 * sharing this ism segment.
7103 */
7104 sfmmu_hat_lock_all();
7105 mutex_enter(&ism_mlist_lock);
7106 kpreempt_disable();
7107 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7108 pp->p_pagenum, CACHE_NO_FLUSH);
7109 kpreempt_enable();
7110 mutex_exit(&ism_mlist_lock);
7111 sfmmu_hat_unlock_all();
7112 cpuset = cpu_ready_set;
7113 } else {
7114 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7115 cpuset = sfmmup->sfmmu_cpusran;
7116 }
7117 }
7118
7119 /*
7120 * Hme_sub has to run after ttesync() and a_rss update.
7121 * See hblk_unload().
7122 */
7123 HME_SUB(sfhme, pp);
7124 membar_stst();
7125
7126 /*
7127 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7128 * since pteload may have done a HME_ADD() right after
7129 * we did the HME_SUB() above. Hmecnt is now maintained
7130 * by cas only. no lock guranteed its value. The only
7131 * gurantee we have is the hmecnt should not be less than
7132 * what it should be so the hblk will not be taken away.
7133 * It's also important that we decremented the hmecnt after
7134 * we are done with hmeblkp so that this hmeblk won't be
7135 * stolen.
7136 */
7137 ASSERT(hmeblkp->hblk_hmecnt > 0);
7138 ASSERT(hmeblkp->hblk_vcnt > 0);
7139 atomic_dec_16(&hmeblkp->hblk_vcnt);
7140 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7141 /*
7142 * This is bug 4063182.
7143 * XXX: fixme
7144 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7145 * !hmeblkp->hblk_lckcnt);
7146 */
7147 } else {
7148 panic("invalid tte? pp %p &tte %p",
7149 (void *)pp, (void *)&tte);
7150 }
7151
7152 return (cpuset);
7153 }
7154
7155 /*
7156 * While relocating a kernel page, this function will move the mappings
7157 * from tpp to dpp and modify any associated data with these mappings.
7158 * It also unsuspends the suspended kernel mapping.
7159 */
7160 static void
7161 hat_pagereload(struct page *tpp, struct page *dpp)
7162 {
7163 struct sf_hment *sfhme;
7164 tte_t tte, ttemod;
7165 int index, cons;
7166
7167 ASSERT(getpil() == PIL_MAX);
7168 ASSERT(sfmmu_mlist_held(tpp));
7169 ASSERT(sfmmu_mlist_held(dpp));
7170
7171 index = PP_MAPINDEX(tpp);
7172 cons = TTE8K;
7173
7174 /* Update real mappings to the page */
7175 retry:
7176 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7177 if (IS_PAHME(sfhme))
7178 continue;
7179 sfmmu_copytte(&sfhme->hme_tte, &tte);
7180 ttemod = tte;
7181
7182 /*
7183 * replace old pfn with new pfn in TTE
7184 */
7185 PFN_TO_TTE(ttemod, dpp->p_pagenum);
7186
7187 /*
7188 * clear suspend bit
7189 */
7190 ASSERT(TTE_IS_SUSPEND(&ttemod));
7191 TTE_CLR_SUSPEND(&ttemod);
7192
7193 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7194 panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7195
7196 /*
7197 * set hme_page point to new page
7198 */
7199 sfhme->hme_page = dpp;
7200 }
7201
7202 /*
7203 * move p_mapping list from old page to new page
7204 */
7205 dpp->p_mapping = tpp->p_mapping;
7206 tpp->p_mapping = NULL;
7207 dpp->p_share = tpp->p_share;
7208 tpp->p_share = 0;
7209
7210 while (index != 0) {
7211 index = index >> 1;
7212 if (index != 0)
7213 cons++;
7214 if (index & 0x1) {
7215 tpp = PP_GROUPLEADER(tpp, cons);
7216 dpp = PP_GROUPLEADER(dpp, cons);
7217 goto retry;
7218 }
7219 }
7220
7221 curthread->t_flag &= ~T_DONTDTRACE;
7222 mutex_exit(&kpr_suspendlock);
7223 }
7224
7225 uint_t
7226 hat_pagesync(struct page *pp, uint_t clearflag)
7227 {
7228 struct sf_hment *sfhme, *tmphme = NULL;
7229 struct hme_blk *hmeblkp;
7230 kmutex_t *pml;
7231 cpuset_t cpuset, tset;
7232 int index, cons;
7233 extern ulong_t po_share;
7234 page_t *save_pp = pp;
7235 int stop_on_sh = 0;
7236 uint_t shcnt;
7237
7238 CPUSET_ZERO(cpuset);
7239
7240 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7241 return (PP_GENERIC_ATTR(pp));
7242 }
7243
7244 if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7245 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7246 return (PP_GENERIC_ATTR(pp));
7247 }
7248 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7249 return (PP_GENERIC_ATTR(pp));
7250 }
7251 if (clearflag & HAT_SYNC_STOPON_SHARED) {
7252 if (pp->p_share > po_share) {
7253 hat_page_setattr(pp, P_REF);
7254 return (PP_GENERIC_ATTR(pp));
7255 }
7256 stop_on_sh = 1;
7257 shcnt = 0;
7258 }
7259 }
7260
7261 clearflag &= ~HAT_SYNC_STOPON_SHARED;
7262 pml = sfmmu_mlist_enter(pp);
7263 index = PP_MAPINDEX(pp);
7264 cons = TTE8K;
7265 retry:
7266 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7267 /*
7268 * We need to save the next hment on the list since
7269 * it is possible for pagesync to remove an invalid hment
7270 * from the list.
7271 */
7272 tmphme = sfhme->hme_next;
7273 if (IS_PAHME(sfhme))
7274 continue;
7275 /*
7276 * If we are looking for large mappings and this hme doesn't
7277 * reach the range we are seeking, just ignore it.
7278 */
7279 hmeblkp = sfmmu_hmetohblk(sfhme);
7280
7281 if (hme_size(sfhme) < cons)
7282 continue;
7283
7284 if (stop_on_sh) {
7285 if (hmeblkp->hblk_shared) {
7286 sf_srd_t *srdp = hblktosrd(hmeblkp);
7287 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7288 sf_region_t *rgnp;
7289 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7290 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7291 ASSERT(srdp != NULL);
7292 rgnp = srdp->srd_hmergnp[rid];
7293 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7294 rgnp, rid);
7295 shcnt += rgnp->rgn_refcnt;
7296 } else {
7297 shcnt++;
7298 }
7299 if (shcnt > po_share) {
7300 /*
7301 * tell the pager to spare the page this time
7302 * around.
7303 */
7304 hat_page_setattr(save_pp, P_REF);
7305 index = 0;
7306 break;
7307 }
7308 }
7309 tset = sfmmu_pagesync(pp, sfhme,
7310 clearflag & ~HAT_SYNC_STOPON_RM);
7311 CPUSET_OR(cpuset, tset);
7312
7313 /*
7314 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7315 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7316 */
7317 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7318 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7319 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7320 index = 0;
7321 break;
7322 }
7323 }
7324
7325 while (index) {
7326 index = index >> 1;
7327 cons++;
7328 if (index & 0x1) {
7329 /* Go to leading page */
7330 pp = PP_GROUPLEADER(pp, cons);
7331 goto retry;
7332 }
7333 }
7334
7335 xt_sync(cpuset);
7336 sfmmu_mlist_exit(pml);
7337 return (PP_GENERIC_ATTR(save_pp));
7338 }
7339
7340 /*
7341 * Get all the hardware dependent attributes for a page struct
7342 */
7343 static cpuset_t
7344 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7345 uint_t clearflag)
7346 {
7347 caddr_t addr;
7348 tte_t tte, ttemod;
7349 struct hme_blk *hmeblkp;
7350 int ret;
7351 sfmmu_t *sfmmup;
7352 cpuset_t cpuset;
7353
7354 ASSERT(pp != NULL);
7355 ASSERT(sfmmu_mlist_held(pp));
7356 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7357 (clearflag == HAT_SYNC_ZERORM));
7358
7359 SFMMU_STAT(sf_pagesync);
7360
7361 CPUSET_ZERO(cpuset);
7362
7363 sfmmu_pagesync_retry:
7364
7365 sfmmu_copytte(&sfhme->hme_tte, &tte);
7366 if (TTE_IS_VALID(&tte)) {
7367 hmeblkp = sfmmu_hmetohblk(sfhme);
7368 sfmmup = hblktosfmmu(hmeblkp);
7369 addr = tte_to_vaddr(hmeblkp, tte);
7370 if (clearflag == HAT_SYNC_ZERORM) {
7371 ttemod = tte;
7372 TTE_CLR_RM(&ttemod);
7373 ret = sfmmu_modifytte_try(&tte, &ttemod,
7374 &sfhme->hme_tte);
7375 if (ret < 0) {
7376 /*
7377 * cas failed and the new value is not what
7378 * we want.
7379 */
7380 goto sfmmu_pagesync_retry;
7381 }
7382
7383 if (ret > 0) {
7384 /* we win the cas */
7385 if (hmeblkp->hblk_shared) {
7386 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7387 uint_t rid =
7388 hmeblkp->hblk_tag.htag_rid;
7389 sf_region_t *rgnp;
7390 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7391 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7392 ASSERT(srdp != NULL);
7393 rgnp = srdp->srd_hmergnp[rid];
7394 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7395 srdp, rgnp, rid);
7396 cpuset = sfmmu_rgntlb_demap(addr,
7397 rgnp, hmeblkp, 1);
7398 } else {
7399 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7400 0, 0);
7401 cpuset = sfmmup->sfmmu_cpusran;
7402 }
7403 }
7404 }
7405 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7406 &tte, pp);
7407 }
7408 return (cpuset);
7409 }
7410
7411 /*
7412 * Remove write permission from a mappings to a page, so that
7413 * we can detect the next modification of it. This requires modifying
7414 * the TTE then invalidating (demap) any TLB entry using that TTE.
7415 * This code is similar to sfmmu_pagesync().
7416 */
7417 static cpuset_t
7418 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7419 {
7420 caddr_t addr;
7421 tte_t tte;
7422 tte_t ttemod;
7423 struct hme_blk *hmeblkp;
7424 int ret;
7425 sfmmu_t *sfmmup;
7426 cpuset_t cpuset;
7427
7428 ASSERT(pp != NULL);
7429 ASSERT(sfmmu_mlist_held(pp));
7430
7431 CPUSET_ZERO(cpuset);
7432 SFMMU_STAT(sf_clrwrt);
7433
7434 retry:
7435
7436 sfmmu_copytte(&sfhme->hme_tte, &tte);
7437 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7438 hmeblkp = sfmmu_hmetohblk(sfhme);
7439 sfmmup = hblktosfmmu(hmeblkp);
7440 addr = tte_to_vaddr(hmeblkp, tte);
7441
7442 ttemod = tte;
7443 TTE_CLR_WRT(&ttemod);
7444 TTE_CLR_MOD(&ttemod);
7445 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7446
7447 /*
7448 * if cas failed and the new value is not what
7449 * we want retry
7450 */
7451 if (ret < 0)
7452 goto retry;
7453
7454 /* we win the cas */
7455 if (ret > 0) {
7456 if (hmeblkp->hblk_shared) {
7457 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7458 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7459 sf_region_t *rgnp;
7460 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7461 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7462 ASSERT(srdp != NULL);
7463 rgnp = srdp->srd_hmergnp[rid];
7464 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7465 srdp, rgnp, rid);
7466 cpuset = sfmmu_rgntlb_demap(addr,
7467 rgnp, hmeblkp, 1);
7468 } else {
7469 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7470 cpuset = sfmmup->sfmmu_cpusran;
7471 }
7472 }
7473 }
7474
7475 return (cpuset);
7476 }
7477
7478 /*
7479 * Walk all mappings of a page, removing write permission and clearing the
7480 * ref/mod bits. This code is similar to hat_pagesync()
7481 */
7482 static void
7483 hat_page_clrwrt(page_t *pp)
7484 {
7485 struct sf_hment *sfhme;
7486 struct sf_hment *tmphme = NULL;
7487 kmutex_t *pml;
7488 cpuset_t cpuset;
7489 cpuset_t tset;
7490 int index;
7491 int cons;
7492
7493 CPUSET_ZERO(cpuset);
7494
7495 pml = sfmmu_mlist_enter(pp);
7496 index = PP_MAPINDEX(pp);
7497 cons = TTE8K;
7498 retry:
7499 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7500 tmphme = sfhme->hme_next;
7501
7502 /*
7503 * If we are looking for large mappings and this hme doesn't
7504 * reach the range we are seeking, just ignore its.
7505 */
7506
7507 if (hme_size(sfhme) < cons)
7508 continue;
7509
7510 tset = sfmmu_pageclrwrt(pp, sfhme);
7511 CPUSET_OR(cpuset, tset);
7512 }
7513
7514 while (index) {
7515 index = index >> 1;
7516 cons++;
7517 if (index & 0x1) {
7518 /* Go to leading page */
7519 pp = PP_GROUPLEADER(pp, cons);
7520 goto retry;
7521 }
7522 }
7523
7524 xt_sync(cpuset);
7525 sfmmu_mlist_exit(pml);
7526 }
7527
7528 /*
7529 * Set the given REF/MOD/RO bits for the given page.
7530 * For a vnode with a sorted v_pages list, we need to change
7531 * the attributes and the v_pages list together under page_vnode_mutex.
7532 */
7533 void
7534 hat_page_setattr(page_t *pp, uint_t flag)
7535 {
7536 vnode_t *vp = pp->p_vnode;
7537 page_t **listp;
7538 kmutex_t *pmtx;
7539 kmutex_t *vphm = NULL;
7540 int noshuffle;
7541
7542 noshuffle = flag & P_NSH;
7543 flag &= ~P_NSH;
7544
7545 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7546
7547 /*
7548 * nothing to do if attribute already set
7549 */
7550 if ((pp->p_nrm & flag) == flag)
7551 return;
7552
7553 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7554 !noshuffle) {
7555 vphm = page_vnode_mutex(vp);
7556 mutex_enter(vphm);
7557 }
7558
7559 pmtx = sfmmu_page_enter(pp);
7560 pp->p_nrm |= flag;
7561 sfmmu_page_exit(pmtx);
7562
7563 if (vphm != NULL) {
7564 /*
7565 * Some File Systems examine v_pages for NULL w/o
7566 * grabbing the vphm mutex. Must not let it become NULL when
7567 * pp is the only page on the list.
7568 */
7569 if (pp->p_vpnext != pp) {
7570 page_vpsub(&vp->v_pages, pp);
7571 if (vp->v_pages != NULL)
7572 listp = &vp->v_pages->p_vpprev->p_vpnext;
7573 else
7574 listp = &vp->v_pages;
7575 page_vpadd(listp, pp);
7576 }
7577 mutex_exit(vphm);
7578 }
7579 }
7580
7581 void
7582 hat_page_clrattr(page_t *pp, uint_t flag)
7583 {
7584 vnode_t *vp = pp->p_vnode;
7585 kmutex_t *pmtx;
7586
7587 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7588
7589 pmtx = sfmmu_page_enter(pp);
7590
7591 /*
7592 * Caller is expected to hold page's io lock for VMODSORT to work
7593 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7594 * bit is cleared.
7595 * We don't have assert to avoid tripping some existing third party
7596 * code. The dirty page is moved back to top of the v_page list
7597 * after IO is done in pvn_write_done().
7598 */
7599 pp->p_nrm &= ~flag;
7600 sfmmu_page_exit(pmtx);
7601
7602 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7603
7604 /*
7605 * VMODSORT works by removing write permissions and getting
7606 * a fault when a page is made dirty. At this point
7607 * we need to remove write permission from all mappings
7608 * to this page.
7609 */
7610 hat_page_clrwrt(pp);
7611 }
7612 }
7613
7614 uint_t
7615 hat_page_getattr(page_t *pp, uint_t flag)
7616 {
7617 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7618 return ((uint_t)(pp->p_nrm & flag));
7619 }
7620
7621 /*
7622 * DEBUG kernels: verify that a kernel va<->pa translation
7623 * is safe by checking the underlying page_t is in a page
7624 * relocation-safe state.
7625 */
7626 #ifdef DEBUG
7627 void
7628 sfmmu_check_kpfn(pfn_t pfn)
7629 {
7630 page_t *pp;
7631 int index, cons;
7632
7633 if (hat_check_vtop == 0)
7634 return;
7635
7636 if (kvseg.s_base == NULL || panicstr)
7637 return;
7638
7639 pp = page_numtopp_nolock(pfn);
7640 if (!pp)
7641 return;
7642
7643 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7644 return;
7645
7646 /*
7647 * Handed a large kernel page, we dig up the root page since we
7648 * know the root page might have the lock also.
7649 */
7650 if (pp->p_szc != 0) {
7651 index = PP_MAPINDEX(pp);
7652 cons = TTE8K;
7653 again:
7654 while (index != 0) {
7655 index >>= 1;
7656 if (index != 0)
7657 cons++;
7658 if (index & 0x1) {
7659 pp = PP_GROUPLEADER(pp, cons);
7660 goto again;
7661 }
7662 }
7663 }
7664
7665 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7666 return;
7667
7668 /*
7669 * Pages need to be locked or allocated "permanent" (either from
7670 * static_arena arena or explicitly setting PG_NORELOC when calling
7671 * page_create_va()) for VA->PA translations to be valid.
7672 */
7673 if (!PP_ISNORELOC(pp))
7674 panic("Illegal VA->PA translation, pp 0x%p not permanent",
7675 (void *)pp);
7676 else
7677 panic("Illegal VA->PA translation, pp 0x%p not locked",
7678 (void *)pp);
7679 }
7680 #endif /* DEBUG */
7681
7682 /*
7683 * Returns a page frame number for a given virtual address.
7684 * Returns PFN_INVALID to indicate an invalid mapping
7685 */
7686 pfn_t
7687 hat_getpfnum(struct hat *hat, caddr_t addr)
7688 {
7689 pfn_t pfn;
7690 tte_t tte;
7691
7692 /*
7693 * We would like to
7694 * ASSERT(AS_LOCK_HELD(as));
7695 * but we can't because the iommu driver will call this
7696 * routine at interrupt time and it can't grab the as lock
7697 * or it will deadlock: A thread could have the as lock
7698 * and be waiting for io. The io can't complete
7699 * because the interrupt thread is blocked trying to grab
7700 * the as lock.
7701 */
7702
7703 if (hat == ksfmmup) {
7704 if (IS_KMEM_VA_LARGEPAGE(addr)) {
7705 ASSERT(segkmem_lpszc > 0);
7706 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7707 if (pfn != PFN_INVALID) {
7708 sfmmu_check_kpfn(pfn);
7709 return (pfn);
7710 }
7711 } else if (segkpm && IS_KPM_ADDR(addr)) {
7712 return (sfmmu_kpm_vatopfn(addr));
7713 }
7714 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
7715 == PFN_SUSPENDED) {
7716 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
7717 }
7718 sfmmu_check_kpfn(pfn);
7719 return (pfn);
7720 } else {
7721 return (sfmmu_uvatopfn(addr, hat, NULL));
7722 }
7723 }
7724
7725 /*
7726 * This routine will return both pfn and tte for the vaddr.
7727 */
7728 static pfn_t
7729 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
7730 {
7731 struct hmehash_bucket *hmebp;
7732 hmeblk_tag hblktag;
7733 int hmeshift, hashno = 1;
7734 struct hme_blk *hmeblkp = NULL;
7735 tte_t tte;
7736
7737 struct sf_hment *sfhmep;
7738 pfn_t pfn;
7739
7740 /* support for ISM */
7741 ism_map_t *ism_map;
7742 ism_blk_t *ism_blkp;
7743 int i;
7744 sfmmu_t *ism_hatid = NULL;
7745 sfmmu_t *locked_hatid = NULL;
7746 sfmmu_t *sv_sfmmup = sfmmup;
7747 caddr_t sv_vaddr = vaddr;
7748 sf_srd_t *srdp;
7749
7750 if (ttep == NULL) {
7751 ttep = &tte;
7752 } else {
7753 ttep->ll = 0;
7754 }
7755
7756 ASSERT(sfmmup != ksfmmup);
7757 SFMMU_STAT(sf_user_vtop);
7758 /*
7759 * Set ism_hatid if vaddr falls in a ISM segment.
7760 */
7761 ism_blkp = sfmmup->sfmmu_iblk;
7762 if (ism_blkp != NULL) {
7763 sfmmu_ismhat_enter(sfmmup, 0);
7764 locked_hatid = sfmmup;
7765 }
7766 while (ism_blkp != NULL && ism_hatid == NULL) {
7767 ism_map = ism_blkp->iblk_maps;
7768 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
7769 if (vaddr >= ism_start(ism_map[i]) &&
7770 vaddr < ism_end(ism_map[i])) {
7771 sfmmup = ism_hatid = ism_map[i].imap_ismhat;
7772 vaddr = (caddr_t)(vaddr -
7773 ism_start(ism_map[i]));
7774 break;
7775 }
7776 }
7777 ism_blkp = ism_blkp->iblk_next;
7778 }
7779 if (locked_hatid) {
7780 sfmmu_ismhat_exit(locked_hatid, 0);
7781 }
7782
7783 hblktag.htag_id = sfmmup;
7784 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
7785 do {
7786 hmeshift = HME_HASH_SHIFT(hashno);
7787 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
7788 hblktag.htag_rehash = hashno;
7789 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
7790
7791 SFMMU_HASH_LOCK(hmebp);
7792
7793 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
7794 if (hmeblkp != NULL) {
7795 ASSERT(!hmeblkp->hblk_shared);
7796 HBLKTOHME(sfhmep, hmeblkp, vaddr);
7797 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7798 SFMMU_HASH_UNLOCK(hmebp);
7799 if (TTE_IS_VALID(ttep)) {
7800 pfn = TTE_TO_PFN(vaddr, ttep);
7801 return (pfn);
7802 }
7803 break;
7804 }
7805 SFMMU_HASH_UNLOCK(hmebp);
7806 hashno++;
7807 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
7808
7809 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
7810 return (PFN_INVALID);
7811 }
7812 srdp = sv_sfmmup->sfmmu_srdp;
7813 ASSERT(srdp != NULL);
7814 ASSERT(srdp->srd_refcnt != 0);
7815 hblktag.htag_id = srdp;
7816 hashno = 1;
7817 do {
7818 hmeshift = HME_HASH_SHIFT(hashno);
7819 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
7820 hblktag.htag_rehash = hashno;
7821 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
7822
7823 SFMMU_HASH_LOCK(hmebp);
7824 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
7825 hmeblkp = hmeblkp->hblk_next) {
7826 uint_t rid;
7827 sf_region_t *rgnp;
7828 caddr_t rsaddr;
7829 caddr_t readdr;
7830
7831 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
7832 sv_sfmmup->sfmmu_hmeregion_map)) {
7833 continue;
7834 }
7835 ASSERT(hmeblkp->hblk_shared);
7836 rid = hmeblkp->hblk_tag.htag_rid;
7837 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7838 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7839 rgnp = srdp->srd_hmergnp[rid];
7840 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7841 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
7842 sfmmu_copytte(&sfhmep->hme_tte, ttep);
7843 rsaddr = rgnp->rgn_saddr;
7844 readdr = rsaddr + rgnp->rgn_size;
7845 #ifdef DEBUG
7846 if (TTE_IS_VALID(ttep) ||
7847 get_hblk_ttesz(hmeblkp) > TTE8K) {
7848 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
7849 ASSERT(eva > sv_vaddr);
7850 ASSERT(sv_vaddr >= rsaddr);
7851 ASSERT(sv_vaddr < readdr);
7852 ASSERT(eva <= readdr);
7853 }
7854 #endif /* DEBUG */
7855 /*
7856 * Continue the search if we
7857 * found an invalid 8K tte outside of the area
7858 * covered by this hmeblk's region.
7859 */
7860 if (TTE_IS_VALID(ttep)) {
7861 SFMMU_HASH_UNLOCK(hmebp);
7862 pfn = TTE_TO_PFN(sv_vaddr, ttep);
7863 return (pfn);
7864 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
7865 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
7866 SFMMU_HASH_UNLOCK(hmebp);
7867 pfn = PFN_INVALID;
7868 return (pfn);
7869 }
7870 }
7871 SFMMU_HASH_UNLOCK(hmebp);
7872 hashno++;
7873 } while (hashno <= mmu_hashcnt);
7874 return (PFN_INVALID);
7875 }
7876
7877
7878 /*
7879 * For compatability with AT&T and later optimizations
7880 */
7881 /* ARGSUSED */
7882 void
7883 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
7884 {
7885 ASSERT(hat != NULL);
7886 }
7887
7888 /*
7889 * Return the number of mappings to a particular page. This number is an
7890 * approximation of the number of people sharing the page.
7891 *
7892 * shared hmeblks or ism hmeblks are counted as 1 mapping here.
7893 * hat_page_checkshare() can be used to compare threshold to share
7894 * count that reflects the number of region sharers albeit at higher cost.
7895 */
7896 ulong_t
7897 hat_page_getshare(page_t *pp)
7898 {
7899 page_t *spp = pp; /* start page */
7900 kmutex_t *pml;
7901 ulong_t cnt;
7902 int index, sz = TTE64K;
7903
7904 /*
7905 * We need to grab the mlist lock to make sure any outstanding
7906 * load/unloads complete. Otherwise we could return zero
7907 * even though the unload(s) hasn't finished yet.
7908 */
7909 pml = sfmmu_mlist_enter(spp);
7910 cnt = spp->p_share;
7911
7912 #ifdef VAC
7913 if (kpm_enable)
7914 cnt += spp->p_kpmref;
7915 #endif
7916 if (vpm_enable && pp->p_vpmref) {
7917 cnt += 1;
7918 }
7919
7920 /*
7921 * If we have any large mappings, we count the number of
7922 * mappings that this large page is part of.
7923 */
7924 index = PP_MAPINDEX(spp);
7925 index >>= 1;
7926 while (index) {
7927 pp = PP_GROUPLEADER(spp, sz);
7928 if ((index & 0x1) && pp != spp) {
7929 cnt += pp->p_share;
7930 spp = pp;
7931 }
7932 index >>= 1;
7933 sz++;
7934 }
7935 sfmmu_mlist_exit(pml);
7936 return (cnt);
7937 }
7938
7939 /*
7940 * Return 1 if the number of mappings exceeds sh_thresh. Return 0
7941 * otherwise. Count shared hmeblks by region's refcnt.
7942 */
7943 int
7944 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
7945 {
7946 kmutex_t *pml;
7947 ulong_t cnt = 0;
7948 int index, sz = TTE8K;
7949 struct sf_hment *sfhme, *tmphme = NULL;
7950 struct hme_blk *hmeblkp;
7951
7952 pml = sfmmu_mlist_enter(pp);
7953
7954 #ifdef VAC
7955 if (kpm_enable)
7956 cnt = pp->p_kpmref;
7957 #endif
7958
7959 if (vpm_enable && pp->p_vpmref) {
7960 cnt += 1;
7961 }
7962
7963 if (pp->p_share + cnt > sh_thresh) {
7964 sfmmu_mlist_exit(pml);
7965 return (1);
7966 }
7967
7968 index = PP_MAPINDEX(pp);
7969
7970 again:
7971 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7972 tmphme = sfhme->hme_next;
7973 if (IS_PAHME(sfhme)) {
7974 continue;
7975 }
7976
7977 hmeblkp = sfmmu_hmetohblk(sfhme);
7978 if (hme_size(sfhme) != sz) {
7979 continue;
7980 }
7981
7982 if (hmeblkp->hblk_shared) {
7983 sf_srd_t *srdp = hblktosrd(hmeblkp);
7984 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7985 sf_region_t *rgnp;
7986 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7987 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7988 ASSERT(srdp != NULL);
7989 rgnp = srdp->srd_hmergnp[rid];
7990 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7991 rgnp, rid);
7992 cnt += rgnp->rgn_refcnt;
7993 } else {
7994 cnt++;
7995 }
7996 if (cnt > sh_thresh) {
7997 sfmmu_mlist_exit(pml);
7998 return (1);
7999 }
8000 }
8001
8002 index >>= 1;
8003 sz++;
8004 while (index) {
8005 pp = PP_GROUPLEADER(pp, sz);
8006 ASSERT(sfmmu_mlist_held(pp));
8007 if (index & 0x1) {
8008 goto again;
8009 }
8010 index >>= 1;
8011 sz++;
8012 }
8013 sfmmu_mlist_exit(pml);
8014 return (0);
8015 }
8016
8017 /*
8018 * Unload all large mappings to the pp and reset the p_szc field of every
8019 * constituent page according to the remaining mappings.
8020 *
8021 * pp must be locked SE_EXCL. Even though no other constituent pages are
8022 * locked it's legal to unload the large mappings to the pp because all
8023 * constituent pages of large locked mappings have to be locked SE_SHARED.
8024 * This means if we have SE_EXCL lock on one of constituent pages none of the
8025 * large mappings to pp are locked.
8026 *
8027 * Decrease p_szc field starting from the last constituent page and ending
8028 * with the root page. This method is used because other threads rely on the
8029 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8030 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8031 * ensures that p_szc changes of the constituent pages appears atomic for all
8032 * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8033 *
8034 * This mechanism is only used for file system pages where it's not always
8035 * possible to get SE_EXCL locks on all constituent pages to demote the size
8036 * code (as is done for anonymous or kernel large pages).
8037 *
8038 * See more comments in front of sfmmu_mlspl_enter().
8039 */
8040 void
8041 hat_page_demote(page_t *pp)
8042 {
8043 int index;
8044 int sz;
8045 cpuset_t cpuset;
8046 int sync = 0;
8047 page_t *rootpp;
8048 struct sf_hment *sfhme;
8049 struct sf_hment *tmphme = NULL;
8050 struct hme_blk *hmeblkp;
8051 uint_t pszc;
8052 page_t *lastpp;
8053 cpuset_t tset;
8054 pgcnt_t npgs;
8055 kmutex_t *pml;
8056 kmutex_t *pmtx = NULL;
8057
8058 ASSERT(PAGE_EXCL(pp));
8059 ASSERT(!PP_ISFREE(pp));
8060 ASSERT(!PP_ISKAS(pp));
8061 ASSERT(page_szc_lock_assert(pp));
8062 pml = sfmmu_mlist_enter(pp);
8063
8064 pszc = pp->p_szc;
8065 if (pszc == 0) {
8066 goto out;
8067 }
8068
8069 index = PP_MAPINDEX(pp) >> 1;
8070
8071 if (index) {
8072 CPUSET_ZERO(cpuset);
8073 sz = TTE64K;
8074 sync = 1;
8075 }
8076
8077 while (index) {
8078 if (!(index & 0x1)) {
8079 index >>= 1;
8080 sz++;
8081 continue;
8082 }
8083 ASSERT(sz <= pszc);
8084 rootpp = PP_GROUPLEADER(pp, sz);
8085 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8086 tmphme = sfhme->hme_next;
8087 ASSERT(!IS_PAHME(sfhme));
8088 hmeblkp = sfmmu_hmetohblk(sfhme);
8089 if (hme_size(sfhme) != sz) {
8090 continue;
8091 }
8092 tset = sfmmu_pageunload(rootpp, sfhme, sz);
8093 CPUSET_OR(cpuset, tset);
8094 }
8095 if (index >>= 1) {
8096 sz++;
8097 }
8098 }
8099
8100 ASSERT(!PP_ISMAPPED_LARGE(pp));
8101
8102 if (sync) {
8103 xt_sync(cpuset);
8104 #ifdef VAC
8105 if (PP_ISTNC(pp)) {
8106 conv_tnc(rootpp, sz);
8107 }
8108 #endif /* VAC */
8109 }
8110
8111 pmtx = sfmmu_page_enter(pp);
8112
8113 ASSERT(pp->p_szc == pszc);
8114 rootpp = PP_PAGEROOT(pp);
8115 ASSERT(rootpp->p_szc == pszc);
8116 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8117
8118 while (lastpp != rootpp) {
8119 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8120 ASSERT(sz < pszc);
8121 npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8122 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8123 while (--npgs > 0) {
8124 lastpp->p_szc = (uchar_t)sz;
8125 lastpp = PP_PAGEPREV(lastpp);
8126 }
8127 if (sz) {
8128 /*
8129 * make sure before current root's pszc
8130 * is updated all updates to constituent pages pszc
8131 * fields are globally visible.
8132 */
8133 membar_producer();
8134 }
8135 lastpp->p_szc = sz;
8136 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8137 if (lastpp != rootpp) {
8138 lastpp = PP_PAGEPREV(lastpp);
8139 }
8140 }
8141 if (sz == 0) {
8142 /* the loop above doesn't cover this case */
8143 rootpp->p_szc = 0;
8144 }
8145 out:
8146 ASSERT(pp->p_szc == 0);
8147 if (pmtx != NULL) {
8148 sfmmu_page_exit(pmtx);
8149 }
8150 sfmmu_mlist_exit(pml);
8151 }
8152
8153 /*
8154 * Refresh the HAT ismttecnt[] element for size szc.
8155 * Caller must have set ISM busy flag to prevent mapping
8156 * lists from changing while we're traversing them.
8157 */
8158 pgcnt_t
8159 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8160 {
8161 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk;
8162 ism_map_t *ism_map;
8163 pgcnt_t npgs = 0;
8164 pgcnt_t npgs_scd = 0;
8165 int j;
8166 sf_scd_t *scdp;
8167 uchar_t rid;
8168
8169 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8170 scdp = sfmmup->sfmmu_scdp;
8171
8172 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8173 ism_map = ism_blkp->iblk_maps;
8174 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8175 rid = ism_map[j].imap_rid;
8176 ASSERT(rid == SFMMU_INVALID_ISMRID ||
8177 rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8178
8179 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8180 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8181 /* ISM is in sfmmup's SCD */
8182 npgs_scd +=
8183 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8184 } else {
8185 /* ISMs is not in SCD */
8186 npgs +=
8187 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8188 }
8189 }
8190 }
8191 sfmmup->sfmmu_ismttecnt[szc] = npgs;
8192 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8193 return (npgs);
8194 }
8195
8196 /*
8197 * Yield the memory claim requirement for an address space.
8198 *
8199 * This is currently implemented as the number of bytes that have active
8200 * hardware translations that have page structures. Therefore, it can
8201 * underestimate the traditional resident set size, eg, if the
8202 * physical page is present and the hardware translation is missing;
8203 * and it can overestimate the rss, eg, if there are active
8204 * translations to a frame buffer with page structs.
8205 * Also, it does not take sharing into account.
8206 *
8207 * Note that we don't acquire locks here since this function is most often
8208 * called from the clock thread.
8209 */
8210 size_t
8211 hat_get_mapped_size(struct hat *hat)
8212 {
8213 size_t assize = 0;
8214 int i;
8215
8216 if (hat == NULL)
8217 return (0);
8218
8219 for (i = 0; i < mmu_page_sizes; i++)
8220 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8221 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8222
8223 if (hat->sfmmu_iblk == NULL)
8224 return (assize);
8225
8226 for (i = 0; i < mmu_page_sizes; i++)
8227 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8228 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8229
8230 return (assize);
8231 }
8232
8233 int
8234 hat_stats_enable(struct hat *hat)
8235 {
8236 hatlock_t *hatlockp;
8237
8238 hatlockp = sfmmu_hat_enter(hat);
8239 hat->sfmmu_rmstat++;
8240 sfmmu_hat_exit(hatlockp);
8241 return (1);
8242 }
8243
8244 void
8245 hat_stats_disable(struct hat *hat)
8246 {
8247 hatlock_t *hatlockp;
8248
8249 hatlockp = sfmmu_hat_enter(hat);
8250 hat->sfmmu_rmstat--;
8251 sfmmu_hat_exit(hatlockp);
8252 }
8253
8254 /*
8255 * Routines for entering or removing ourselves from the
8256 * ism_hat's mapping list. This is used for both private and
8257 * SCD hats.
8258 */
8259 static void
8260 iment_add(struct ism_ment *iment, struct hat *ism_hat)
8261 {
8262 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8263
8264 iment->iment_prev = NULL;
8265 iment->iment_next = ism_hat->sfmmu_iment;
8266 if (ism_hat->sfmmu_iment) {
8267 ism_hat->sfmmu_iment->iment_prev = iment;
8268 }
8269 ism_hat->sfmmu_iment = iment;
8270 }
8271
8272 static void
8273 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8274 {
8275 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8276
8277 if (ism_hat->sfmmu_iment == NULL) {
8278 panic("ism map entry remove - no entries");
8279 }
8280
8281 if (iment->iment_prev) {
8282 ASSERT(ism_hat->sfmmu_iment != iment);
8283 iment->iment_prev->iment_next = iment->iment_next;
8284 } else {
8285 ASSERT(ism_hat->sfmmu_iment == iment);
8286 ism_hat->sfmmu_iment = iment->iment_next;
8287 }
8288
8289 if (iment->iment_next) {
8290 iment->iment_next->iment_prev = iment->iment_prev;
8291 }
8292
8293 /*
8294 * zero out the entry
8295 */
8296 iment->iment_next = NULL;
8297 iment->iment_prev = NULL;
8298 iment->iment_hat = NULL;
8299 iment->iment_base_va = 0;
8300 }
8301
8302 /*
8303 * Hat_share()/unshare() return an (non-zero) error
8304 * when saddr and daddr are not properly aligned.
8305 *
8306 * The top level mapping element determines the alignment
8307 * requirement for saddr and daddr, depending on different
8308 * architectures.
8309 *
8310 * When hat_share()/unshare() are not supported,
8311 * HATOP_SHARE()/UNSHARE() return 0
8312 */
8313 int
8314 hat_share(struct hat *sfmmup, caddr_t addr,
8315 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8316 {
8317 ism_blk_t *ism_blkp;
8318 ism_blk_t *new_iblk;
8319 ism_map_t *ism_map;
8320 ism_ment_t *ism_ment;
8321 int i, added;
8322 hatlock_t *hatlockp;
8323 int reload_mmu = 0;
8324 uint_t ismshift = page_get_shift(ismszc);
8325 size_t ismpgsz = page_get_pagesize(ismszc);
8326 uint_t ismmask = (uint_t)ismpgsz - 1;
8327 size_t sh_size = ISM_SHIFT(ismshift, len);
8328 ushort_t ismhatflag;
8329 hat_region_cookie_t rcookie;
8330 sf_scd_t *old_scdp;
8331
8332 #ifdef DEBUG
8333 caddr_t eaddr = addr + len;
8334 #endif /* DEBUG */
8335
8336 ASSERT(ism_hatid != NULL && sfmmup != NULL);
8337 ASSERT(sptaddr == ISMID_STARTADDR);
8338 /*
8339 * Check the alignment.
8340 */
8341 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8342 return (EINVAL);
8343
8344 /*
8345 * Check size alignment.
8346 */
8347 if (!ISM_ALIGNED(ismshift, len))
8348 return (EINVAL);
8349
8350 /*
8351 * Allocate ism_ment for the ism_hat's mapping list, and an
8352 * ism map blk in case we need one. We must do our
8353 * allocations before acquiring locks to prevent a deadlock
8354 * in the kmem allocator on the mapping list lock.
8355 */
8356 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8357 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8358
8359 /*
8360 * Serialize ISM mappings with the ISM busy flag, and also the
8361 * trap handlers.
8362 */
8363 sfmmu_ismhat_enter(sfmmup, 0);
8364
8365 /*
8366 * Allocate an ism map blk if necessary.
8367 */
8368 if (sfmmup->sfmmu_iblk == NULL) {
8369 sfmmup->sfmmu_iblk = new_iblk;
8370 bzero(new_iblk, sizeof (*new_iblk));
8371 new_iblk->iblk_nextpa = (uint64_t)-1;
8372 membar_stst(); /* make sure next ptr visible to all CPUs */
8373 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8374 reload_mmu = 1;
8375 new_iblk = NULL;
8376 }
8377
8378 #ifdef DEBUG
8379 /*
8380 * Make sure mapping does not already exist.
8381 */
8382 ism_blkp = sfmmup->sfmmu_iblk;
8383 while (ism_blkp != NULL) {
8384 ism_map = ism_blkp->iblk_maps;
8385 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8386 if ((addr >= ism_start(ism_map[i]) &&
8387 addr < ism_end(ism_map[i])) ||
8388 eaddr > ism_start(ism_map[i]) &&
8389 eaddr <= ism_end(ism_map[i])) {
8390 panic("sfmmu_share: Already mapped!");
8391 }
8392 }
8393 ism_blkp = ism_blkp->iblk_next;
8394 }
8395 #endif /* DEBUG */
8396
8397 ASSERT(ismszc >= TTE4M);
8398 if (ismszc == TTE4M) {
8399 ismhatflag = HAT_4M_FLAG;
8400 } else if (ismszc == TTE32M) {
8401 ismhatflag = HAT_32M_FLAG;
8402 } else if (ismszc == TTE256M) {
8403 ismhatflag = HAT_256M_FLAG;
8404 }
8405 /*
8406 * Add mapping to first available mapping slot.
8407 */
8408 ism_blkp = sfmmup->sfmmu_iblk;
8409 added = 0;
8410 while (!added) {
8411 ism_map = ism_blkp->iblk_maps;
8412 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8413 if (ism_map[i].imap_ismhat == NULL) {
8414
8415 ism_map[i].imap_ismhat = ism_hatid;
8416 ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8417 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8418 ism_map[i].imap_hatflags = ismhatflag;
8419 ism_map[i].imap_sz_mask = ismmask;
8420 /*
8421 * imap_seg is checked in ISM_CHECK to see if
8422 * non-NULL, then other info assumed valid.
8423 */
8424 membar_stst();
8425 ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8426 ism_map[i].imap_ment = ism_ment;
8427
8428 /*
8429 * Now add ourselves to the ism_hat's
8430 * mapping list.
8431 */
8432 ism_ment->iment_hat = sfmmup;
8433 ism_ment->iment_base_va = addr;
8434 ism_hatid->sfmmu_ismhat = 1;
8435 mutex_enter(&ism_mlist_lock);
8436 iment_add(ism_ment, ism_hatid);
8437 mutex_exit(&ism_mlist_lock);
8438 added = 1;
8439 break;
8440 }
8441 }
8442 if (!added && ism_blkp->iblk_next == NULL) {
8443 ism_blkp->iblk_next = new_iblk;
8444 new_iblk = NULL;
8445 bzero(ism_blkp->iblk_next,
8446 sizeof (*ism_blkp->iblk_next));
8447 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8448 membar_stst();
8449 ism_blkp->iblk_nextpa =
8450 va_to_pa((caddr_t)ism_blkp->iblk_next);
8451 }
8452 ism_blkp = ism_blkp->iblk_next;
8453 }
8454
8455 /*
8456 * After calling hat_join_region, sfmmup may join a new SCD or
8457 * move from the old scd to a new scd, in which case, we want to
8458 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8459 * sfmmu_check_page_sizes at the end of this routine.
8460 */
8461 old_scdp = sfmmup->sfmmu_scdp;
8462
8463 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8464 PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8465 if (rcookie != HAT_INVALID_REGION_COOKIE) {
8466 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8467 }
8468 /*
8469 * Update our counters for this sfmmup's ism mappings.
8470 */
8471 for (i = 0; i <= ismszc; i++) {
8472 if (!(disable_ism_large_pages & (1 << i)))
8473 (void) ism_tsb_entries(sfmmup, i);
8474 }
8475
8476 /*
8477 * For ISM and DISM we do not support 512K pages, so we only only
8478 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8479 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8480 *
8481 * Need to set 32M/256M ISM flags to make sure
8482 * sfmmu_check_page_sizes() enables them on Panther.
8483 */
8484 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8485
8486 switch (ismszc) {
8487 case TTE256M:
8488 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8489 hatlockp = sfmmu_hat_enter(sfmmup);
8490 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8491 sfmmu_hat_exit(hatlockp);
8492 }
8493 break;
8494 case TTE32M:
8495 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8496 hatlockp = sfmmu_hat_enter(sfmmup);
8497 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8498 sfmmu_hat_exit(hatlockp);
8499 }
8500 break;
8501 default:
8502 break;
8503 }
8504
8505 /*
8506 * If we updated the ismblkpa for this HAT we must make
8507 * sure all CPUs running this process reload their tsbmiss area.
8508 * Otherwise they will fail to load the mappings in the tsbmiss
8509 * handler and will loop calling pagefault().
8510 */
8511 if (reload_mmu) {
8512 hatlockp = sfmmu_hat_enter(sfmmup);
8513 sfmmu_sync_mmustate(sfmmup);
8514 sfmmu_hat_exit(hatlockp);
8515 }
8516
8517 sfmmu_ismhat_exit(sfmmup, 0);
8518
8519 /*
8520 * Free up ismblk if we didn't use it.
8521 */
8522 if (new_iblk != NULL)
8523 kmem_cache_free(ism_blk_cache, new_iblk);
8524
8525 /*
8526 * Check TSB and TLB page sizes.
8527 */
8528 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8529 sfmmu_check_page_sizes(sfmmup, 0);
8530 } else {
8531 sfmmu_check_page_sizes(sfmmup, 1);
8532 }
8533 return (0);
8534 }
8535
8536 /*
8537 * hat_unshare removes exactly one ism_map from
8538 * this process's as. It expects multiple calls
8539 * to hat_unshare for multiple shm segments.
8540 */
8541 void
8542 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8543 {
8544 ism_map_t *ism_map;
8545 ism_ment_t *free_ment = NULL;
8546 ism_blk_t *ism_blkp;
8547 struct hat *ism_hatid;
8548 int found, i;
8549 hatlock_t *hatlockp;
8550 struct tsb_info *tsbinfo;
8551 uint_t ismshift = page_get_shift(ismszc);
8552 size_t sh_size = ISM_SHIFT(ismshift, len);
8553 uchar_t ism_rid;
8554 sf_scd_t *old_scdp;
8555
8556 ASSERT(ISM_ALIGNED(ismshift, addr));
8557 ASSERT(ISM_ALIGNED(ismshift, len));
8558 ASSERT(sfmmup != NULL);
8559 ASSERT(sfmmup != ksfmmup);
8560
8561 ASSERT(sfmmup->sfmmu_as != NULL);
8562
8563 /*
8564 * Make sure that during the entire time ISM mappings are removed,
8565 * the trap handlers serialize behind us, and that no one else
8566 * can be mucking with ISM mappings. This also lets us get away
8567 * with not doing expensive cross calls to flush the TLB -- we
8568 * just discard the context, flush the entire TSB, and call it
8569 * a day.
8570 */
8571 sfmmu_ismhat_enter(sfmmup, 0);
8572
8573 /*
8574 * Remove the mapping.
8575 *
8576 * We can't have any holes in the ism map.
8577 * The tsb miss code while searching the ism map will
8578 * stop on an empty map slot. So we must move
8579 * everyone past the hole up 1 if any.
8580 *
8581 * Also empty ism map blks are not freed until the
8582 * process exits. This is to prevent a MT race condition
8583 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8584 */
8585 found = 0;
8586 ism_blkp = sfmmup->sfmmu_iblk;
8587 while (!found && ism_blkp != NULL) {
8588 ism_map = ism_blkp->iblk_maps;
8589 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8590 if (addr == ism_start(ism_map[i]) &&
8591 sh_size == (size_t)(ism_size(ism_map[i]))) {
8592 found = 1;
8593 break;
8594 }
8595 }
8596 if (!found)
8597 ism_blkp = ism_blkp->iblk_next;
8598 }
8599
8600 if (found) {
8601 ism_hatid = ism_map[i].imap_ismhat;
8602 ism_rid = ism_map[i].imap_rid;
8603 ASSERT(ism_hatid != NULL);
8604 ASSERT(ism_hatid->sfmmu_ismhat == 1);
8605
8606 /*
8607 * After hat_leave_region, the sfmmup may leave SCD,
8608 * in which case, we want to grow the private tsb size when
8609 * calling sfmmu_check_page_sizes at the end of the routine.
8610 */
8611 old_scdp = sfmmup->sfmmu_scdp;
8612 /*
8613 * Then remove ourselves from the region.
8614 */
8615 if (ism_rid != SFMMU_INVALID_ISMRID) {
8616 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8617 HAT_REGION_ISM);
8618 }
8619
8620 /*
8621 * And now guarantee that any other cpu
8622 * that tries to process an ISM miss
8623 * will go to tl=0.
8624 */
8625 hatlockp = sfmmu_hat_enter(sfmmup);
8626 sfmmu_invalidate_ctx(sfmmup);
8627 sfmmu_hat_exit(hatlockp);
8628
8629 /*
8630 * Remove ourselves from the ism mapping list.
8631 */
8632 mutex_enter(&ism_mlist_lock);
8633 iment_sub(ism_map[i].imap_ment, ism_hatid);
8634 mutex_exit(&ism_mlist_lock);
8635 free_ment = ism_map[i].imap_ment;
8636
8637 /*
8638 * We delete the ism map by copying
8639 * the next map over the current one.
8640 * We will take the next one in the maps
8641 * array or from the next ism_blk.
8642 */
8643 while (ism_blkp != NULL) {
8644 ism_map = ism_blkp->iblk_maps;
8645 while (i < (ISM_MAP_SLOTS - 1)) {
8646 ism_map[i] = ism_map[i + 1];
8647 i++;
8648 }
8649 /* i == (ISM_MAP_SLOTS - 1) */
8650 ism_blkp = ism_blkp->iblk_next;
8651 if (ism_blkp != NULL) {
8652 ism_map[i] = ism_blkp->iblk_maps[0];
8653 i = 0;
8654 } else {
8655 ism_map[i].imap_seg = 0;
8656 ism_map[i].imap_vb_shift = 0;
8657 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8658 ism_map[i].imap_hatflags = 0;
8659 ism_map[i].imap_sz_mask = 0;
8660 ism_map[i].imap_ismhat = NULL;
8661 ism_map[i].imap_ment = NULL;
8662 }
8663 }
8664
8665 /*
8666 * Now flush entire TSB for the process, since
8667 * demapping page by page can be too expensive.
8668 * We don't have to flush the TLB here anymore
8669 * since we switch to a new TLB ctx instead.
8670 * Also, there is no need to flush if the process
8671 * is exiting since the TSB will be freed later.
8672 */
8673 if (!sfmmup->sfmmu_free) {
8674 hatlockp = sfmmu_hat_enter(sfmmup);
8675 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8676 tsbinfo = tsbinfo->tsb_next) {
8677 if (tsbinfo->tsb_flags & TSB_SWAPPED)
8678 continue;
8679 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
8680 tsbinfo->tsb_flags |=
8681 TSB_FLUSH_NEEDED;
8682 continue;
8683 }
8684
8685 sfmmu_inv_tsb(tsbinfo->tsb_va,
8686 TSB_BYTES(tsbinfo->tsb_szc));
8687 }
8688 sfmmu_hat_exit(hatlockp);
8689 }
8690 }
8691
8692 /*
8693 * Update our counters for this sfmmup's ism mappings.
8694 */
8695 for (i = 0; i <= ismszc; i++) {
8696 if (!(disable_ism_large_pages & (1 << i)))
8697 (void) ism_tsb_entries(sfmmup, i);
8698 }
8699
8700 sfmmu_ismhat_exit(sfmmup, 0);
8701
8702 /*
8703 * We must do our freeing here after dropping locks
8704 * to prevent a deadlock in the kmem allocator on the
8705 * mapping list lock.
8706 */
8707 if (free_ment != NULL)
8708 kmem_cache_free(ism_ment_cache, free_ment);
8709
8710 /*
8711 * Check TSB and TLB page sizes if the process isn't exiting.
8712 */
8713 if (!sfmmup->sfmmu_free) {
8714 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
8715 sfmmu_check_page_sizes(sfmmup, 1);
8716 } else {
8717 sfmmu_check_page_sizes(sfmmup, 0);
8718 }
8719 }
8720 }
8721
8722 /* ARGSUSED */
8723 static int
8724 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
8725 {
8726 /* void *buf is sfmmu_t pointer */
8727 bzero(buf, sizeof (sfmmu_t));
8728
8729 return (0);
8730 }
8731
8732 /* ARGSUSED */
8733 static void
8734 sfmmu_idcache_destructor(void *buf, void *cdrarg)
8735 {
8736 /* void *buf is sfmmu_t pointer */
8737 }
8738
8739 /*
8740 * setup kmem hmeblks by bzeroing all members and initializing the nextpa
8741 * field to be the pa of this hmeblk
8742 */
8743 /* ARGSUSED */
8744 static int
8745 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
8746 {
8747 struct hme_blk *hmeblkp;
8748
8749 bzero(buf, (size_t)cdrarg);
8750 hmeblkp = (struct hme_blk *)buf;
8751 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
8752
8753 #ifdef HBLK_TRACE
8754 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
8755 #endif /* HBLK_TRACE */
8756
8757 return (0);
8758 }
8759
8760 /* ARGSUSED */
8761 static void
8762 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
8763 {
8764
8765 #ifdef HBLK_TRACE
8766
8767 struct hme_blk *hmeblkp;
8768
8769 hmeblkp = (struct hme_blk *)buf;
8770 mutex_destroy(&hmeblkp->hblk_audit_lock);
8771
8772 #endif /* HBLK_TRACE */
8773 }
8774
8775 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
8776 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
8777 /*
8778 * The kmem allocator will callback into our reclaim routine when the system
8779 * is running low in memory. We traverse the hash and free up all unused but
8780 * still cached hme_blks. We also traverse the free list and free them up
8781 * as well.
8782 */
8783 /*ARGSUSED*/
8784 static void
8785 sfmmu_hblkcache_reclaim(void *cdrarg)
8786 {
8787 int i;
8788 struct hmehash_bucket *hmebp;
8789 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
8790 static struct hmehash_bucket *uhmehash_reclaim_hand;
8791 static struct hmehash_bucket *khmehash_reclaim_hand;
8792 struct hme_blk *list = NULL, *last_hmeblkp;
8793 cpuset_t cpuset = cpu_ready_set;
8794 cpu_hme_pend_t *cpuhp;
8795
8796 /* Free up hmeblks on the cpu pending lists */
8797 for (i = 0; i < NCPU; i++) {
8798 cpuhp = &cpu_hme_pend[i];
8799 if (cpuhp->chp_listp != NULL) {
8800 mutex_enter(&cpuhp->chp_mutex);
8801 if (cpuhp->chp_listp == NULL) {
8802 mutex_exit(&cpuhp->chp_mutex);
8803 continue;
8804 }
8805 for (last_hmeblkp = cpuhp->chp_listp;
8806 last_hmeblkp->hblk_next != NULL;
8807 last_hmeblkp = last_hmeblkp->hblk_next)
8808 ;
8809 last_hmeblkp->hblk_next = list;
8810 list = cpuhp->chp_listp;
8811 cpuhp->chp_listp = NULL;
8812 cpuhp->chp_count = 0;
8813 mutex_exit(&cpuhp->chp_mutex);
8814 }
8815
8816 }
8817
8818 if (list != NULL) {
8819 kpreempt_disable();
8820 CPUSET_DEL(cpuset, CPU->cpu_id);
8821 xt_sync(cpuset);
8822 xt_sync(cpuset);
8823 kpreempt_enable();
8824 sfmmu_hblk_free(&list);
8825 list = NULL;
8826 }
8827
8828 hmebp = uhmehash_reclaim_hand;
8829 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
8830 uhmehash_reclaim_hand = hmebp = uhme_hash;
8831 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8832
8833 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8834 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8835 hmeblkp = hmebp->hmeblkp;
8836 pr_hblk = NULL;
8837 while (hmeblkp) {
8838 nx_hblk = hmeblkp->hblk_next;
8839 if (!hmeblkp->hblk_vcnt &&
8840 !hmeblkp->hblk_hmecnt) {
8841 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8842 pr_hblk, &list, 0);
8843 } else {
8844 pr_hblk = hmeblkp;
8845 }
8846 hmeblkp = nx_hblk;
8847 }
8848 SFMMU_HASH_UNLOCK(hmebp);
8849 }
8850 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
8851 hmebp = uhme_hash;
8852 }
8853
8854 hmebp = khmehash_reclaim_hand;
8855 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
8856 khmehash_reclaim_hand = hmebp = khme_hash;
8857 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
8858
8859 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
8860 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
8861 hmeblkp = hmebp->hmeblkp;
8862 pr_hblk = NULL;
8863 while (hmeblkp) {
8864 nx_hblk = hmeblkp->hblk_next;
8865 if (!hmeblkp->hblk_vcnt &&
8866 !hmeblkp->hblk_hmecnt) {
8867 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
8868 pr_hblk, &list, 0);
8869 } else {
8870 pr_hblk = hmeblkp;
8871 }
8872 hmeblkp = nx_hblk;
8873 }
8874 SFMMU_HASH_UNLOCK(hmebp);
8875 }
8876 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
8877 hmebp = khme_hash;
8878 }
8879 sfmmu_hblks_list_purge(&list, 0);
8880 }
8881
8882 /*
8883 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
8884 * same goes for sfmmu_get_addrvcolor().
8885 *
8886 * This function will return the virtual color for the specified page. The
8887 * virtual color corresponds to this page current mapping or its last mapping.
8888 * It is used by memory allocators to choose addresses with the correct
8889 * alignment so vac consistency is automatically maintained. If the page
8890 * has no color it returns -1.
8891 */
8892 /*ARGSUSED*/
8893 int
8894 sfmmu_get_ppvcolor(struct page *pp)
8895 {
8896 #ifdef VAC
8897 int color;
8898
8899 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
8900 return (-1);
8901 }
8902 color = PP_GET_VCOLOR(pp);
8903 ASSERT(color < mmu_btop(shm_alignment));
8904 return (color);
8905 #else
8906 return (-1);
8907 #endif /* VAC */
8908 }
8909
8910 /*
8911 * This function will return the desired alignment for vac consistency
8912 * (vac color) given a virtual address. If no vac is present it returns -1.
8913 */
8914 /*ARGSUSED*/
8915 int
8916 sfmmu_get_addrvcolor(caddr_t vaddr)
8917 {
8918 #ifdef VAC
8919 if (cache & CACHE_VAC) {
8920 return (addr_to_vcolor(vaddr));
8921 } else {
8922 return (-1);
8923 }
8924 #else
8925 return (-1);
8926 #endif /* VAC */
8927 }
8928
8929 #ifdef VAC
8930 /*
8931 * Check for conflicts.
8932 * A conflict exists if the new and existent mappings do not match in
8933 * their "shm_alignment fields. If conflicts exist, the existant mappings
8934 * are flushed unless one of them is locked. If one of them is locked, then
8935 * the mappings are flushed and converted to non-cacheable mappings.
8936 */
8937 static void
8938 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
8939 {
8940 struct hat *tmphat;
8941 struct sf_hment *sfhmep, *tmphme = NULL;
8942 struct hme_blk *hmeblkp;
8943 int vcolor;
8944 tte_t tte;
8945
8946 ASSERT(sfmmu_mlist_held(pp));
8947 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */
8948
8949 vcolor = addr_to_vcolor(addr);
8950 if (PP_NEWPAGE(pp)) {
8951 PP_SET_VCOLOR(pp, vcolor);
8952 return;
8953 }
8954
8955 if (PP_GET_VCOLOR(pp) == vcolor) {
8956 return;
8957 }
8958
8959 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
8960 /*
8961 * Previous user of page had a different color
8962 * but since there are no current users
8963 * we just flush the cache and change the color.
8964 */
8965 SFMMU_STAT(sf_pgcolor_conflict);
8966 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
8967 PP_SET_VCOLOR(pp, vcolor);
8968 return;
8969 }
8970
8971 /*
8972 * If we get here we have a vac conflict with a current
8973 * mapping. VAC conflict policy is as follows.
8974 * - The default is to unload the other mappings unless:
8975 * - If we have a large mapping we uncache the page.
8976 * We need to uncache the rest of the large page too.
8977 * - If any of the mappings are locked we uncache the page.
8978 * - If the requested mapping is inconsistent
8979 * with another mapping and that mapping
8980 * is in the same address space we have to
8981 * make it non-cached. The default thing
8982 * to do is unload the inconsistent mapping
8983 * but if they are in the same address space
8984 * we run the risk of unmapping the pc or the
8985 * stack which we will use as we return to the user,
8986 * in which case we can then fault on the thing
8987 * we just unloaded and get into an infinite loop.
8988 */
8989 if (PP_ISMAPPED_LARGE(pp)) {
8990 int sz;
8991
8992 /*
8993 * Existing mapping is for big pages. We don't unload
8994 * existing big mappings to satisfy new mappings.
8995 * Always convert all mappings to TNC.
8996 */
8997 sz = fnd_mapping_sz(pp);
8998 pp = PP_GROUPLEADER(pp, sz);
8999 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9000 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9001 TTEPAGES(sz));
9002
9003 return;
9004 }
9005
9006 /*
9007 * check if any mapping is in same as or if it is locked
9008 * since in that case we need to uncache.
9009 */
9010 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9011 tmphme = sfhmep->hme_next;
9012 if (IS_PAHME(sfhmep))
9013 continue;
9014 hmeblkp = sfmmu_hmetohblk(sfhmep);
9015 tmphat = hblktosfmmu(hmeblkp);
9016 sfmmu_copytte(&sfhmep->hme_tte, &tte);
9017 ASSERT(TTE_IS_VALID(&tte));
9018 if (hmeblkp->hblk_shared || tmphat == hat ||
9019 hmeblkp->hblk_lckcnt) {
9020 /*
9021 * We have an uncache conflict
9022 */
9023 SFMMU_STAT(sf_uncache_conflict);
9024 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9025 return;
9026 }
9027 }
9028
9029 /*
9030 * We have an unload conflict
9031 * We have already checked for LARGE mappings, therefore
9032 * the remaining mapping(s) must be TTE8K.
9033 */
9034 SFMMU_STAT(sf_unload_conflict);
9035
9036 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9037 tmphme = sfhmep->hme_next;
9038 if (IS_PAHME(sfhmep))
9039 continue;
9040 hmeblkp = sfmmu_hmetohblk(sfhmep);
9041 ASSERT(!hmeblkp->hblk_shared);
9042 (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9043 }
9044
9045 if (PP_ISMAPPED_KPM(pp))
9046 sfmmu_kpm_vac_unload(pp, addr);
9047
9048 /*
9049 * Unloads only do TLB flushes so we need to flush the
9050 * cache here.
9051 */
9052 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9053 PP_SET_VCOLOR(pp, vcolor);
9054 }
9055
9056 /*
9057 * Whenever a mapping is unloaded and the page is in TNC state,
9058 * we see if the page can be made cacheable again. 'pp' is
9059 * the page that we just unloaded a mapping from, the size
9060 * of mapping that was unloaded is 'ottesz'.
9061 * Remark:
9062 * The recache policy for mpss pages can leave a performance problem
9063 * under the following circumstances:
9064 * . A large page in uncached mode has just been unmapped.
9065 * . All constituent pages are TNC due to a conflicting small mapping.
9066 * . There are many other, non conflicting, small mappings around for
9067 * a lot of the constituent pages.
9068 * . We're called w/ the "old" groupleader page and the old ottesz,
9069 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9070 * we end up w/ TTE8K or npages == 1.
9071 * . We call tst_tnc w/ the old groupleader only, and if there is no
9072 * conflict, we re-cache only this page.
9073 * . All other small mappings are not checked and will be left in TNC mode.
9074 * The problem is not very serious because:
9075 * . mpss is actually only defined for heap and stack, so the probability
9076 * is not very high that a large page mapping exists in parallel to a small
9077 * one (this is possible, but seems to be bad programming style in the
9078 * appl).
9079 * . The problem gets a little bit more serious, when those TNC pages
9080 * have to be mapped into kernel space, e.g. for networking.
9081 * . When VAC alias conflicts occur in applications, this is regarded
9082 * as an application bug. So if kstat's show them, the appl should
9083 * be changed anyway.
9084 */
9085 void
9086 conv_tnc(page_t *pp, int ottesz)
9087 {
9088 int cursz, dosz;
9089 pgcnt_t curnpgs, dopgs;
9090 pgcnt_t pg64k;
9091 page_t *pp2;
9092
9093 /*
9094 * Determine how big a range we check for TNC and find
9095 * leader page. cursz is the size of the biggest
9096 * mapping that still exist on 'pp'.
9097 */
9098 if (PP_ISMAPPED_LARGE(pp)) {
9099 cursz = fnd_mapping_sz(pp);
9100 } else {
9101 cursz = TTE8K;
9102 }
9103
9104 if (ottesz >= cursz) {
9105 dosz = ottesz;
9106 pp2 = pp;
9107 } else {
9108 dosz = cursz;
9109 pp2 = PP_GROUPLEADER(pp, dosz);
9110 }
9111
9112 pg64k = TTEPAGES(TTE64K);
9113 dopgs = TTEPAGES(dosz);
9114
9115 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9116
9117 while (dopgs != 0) {
9118 curnpgs = TTEPAGES(cursz);
9119 if (tst_tnc(pp2, curnpgs)) {
9120 SFMMU_STAT_ADD(sf_recache, curnpgs);
9121 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9122 curnpgs);
9123 }
9124
9125 ASSERT(dopgs >= curnpgs);
9126 dopgs -= curnpgs;
9127
9128 if (dopgs == 0) {
9129 break;
9130 }
9131
9132 pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9133 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9134 cursz = fnd_mapping_sz(pp2);
9135 } else {
9136 cursz = TTE8K;
9137 }
9138 }
9139 }
9140
9141 /*
9142 * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9143 * returns 0 otherwise. Note that oaddr argument is valid for only
9144 * 8k pages.
9145 */
9146 int
9147 tst_tnc(page_t *pp, pgcnt_t npages)
9148 {
9149 struct sf_hment *sfhme;
9150 struct hme_blk *hmeblkp;
9151 tte_t tte;
9152 caddr_t vaddr;
9153 int clr_valid = 0;
9154 int color, color1, bcolor;
9155 int i, ncolors;
9156
9157 ASSERT(pp != NULL);
9158 ASSERT(!(cache & CACHE_WRITEBACK));
9159
9160 if (npages > 1) {
9161 ncolors = CACHE_NUM_COLOR;
9162 }
9163
9164 for (i = 0; i < npages; i++) {
9165 ASSERT(sfmmu_mlist_held(pp));
9166 ASSERT(PP_ISTNC(pp));
9167 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9168
9169 if (PP_ISPNC(pp)) {
9170 return (0);
9171 }
9172
9173 clr_valid = 0;
9174 if (PP_ISMAPPED_KPM(pp)) {
9175 caddr_t kpmvaddr;
9176
9177 ASSERT(kpm_enable);
9178 kpmvaddr = hat_kpm_page2va(pp, 1);
9179 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9180 color1 = addr_to_vcolor(kpmvaddr);
9181 clr_valid = 1;
9182 }
9183
9184 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9185 if (IS_PAHME(sfhme))
9186 continue;
9187 hmeblkp = sfmmu_hmetohblk(sfhme);
9188
9189 sfmmu_copytte(&sfhme->hme_tte, &tte);
9190 ASSERT(TTE_IS_VALID(&tte));
9191
9192 vaddr = tte_to_vaddr(hmeblkp, tte);
9193 color = addr_to_vcolor(vaddr);
9194
9195 if (npages > 1) {
9196 /*
9197 * If there is a big mapping, make sure
9198 * 8K mapping is consistent with the big
9199 * mapping.
9200 */
9201 bcolor = i % ncolors;
9202 if (color != bcolor) {
9203 return (0);
9204 }
9205 }
9206 if (!clr_valid) {
9207 clr_valid = 1;
9208 color1 = color;
9209 }
9210
9211 if (color1 != color) {
9212 return (0);
9213 }
9214 }
9215
9216 pp = PP_PAGENEXT(pp);
9217 }
9218
9219 return (1);
9220 }
9221
9222 void
9223 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9224 pgcnt_t npages)
9225 {
9226 kmutex_t *pmtx;
9227 int i, ncolors, bcolor;
9228 kpm_hlk_t *kpmp;
9229 cpuset_t cpuset;
9230
9231 ASSERT(pp != NULL);
9232 ASSERT(!(cache & CACHE_WRITEBACK));
9233
9234 kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9235 pmtx = sfmmu_page_enter(pp);
9236
9237 /*
9238 * Fast path caching single unmapped page
9239 */
9240 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9241 flags == HAT_CACHE) {
9242 PP_CLRTNC(pp);
9243 PP_CLRPNC(pp);
9244 sfmmu_page_exit(pmtx);
9245 sfmmu_kpm_kpmp_exit(kpmp);
9246 return;
9247 }
9248
9249 /*
9250 * We need to capture all cpus in order to change cacheability
9251 * because we can't allow one cpu to access the same physical
9252 * page using a cacheable and a non-cachebale mapping at the same
9253 * time. Since we may end up walking the ism mapping list
9254 * have to grab it's lock now since we can't after all the
9255 * cpus have been captured.
9256 */
9257 sfmmu_hat_lock_all();
9258 mutex_enter(&ism_mlist_lock);
9259 kpreempt_disable();
9260 cpuset = cpu_ready_set;
9261 xc_attention(cpuset);
9262
9263 if (npages > 1) {
9264 /*
9265 * Make sure all colors are flushed since the
9266 * sfmmu_page_cache() only flushes one color-
9267 * it does not know big pages.
9268 */
9269 ncolors = CACHE_NUM_COLOR;
9270 if (flags & HAT_TMPNC) {
9271 for (i = 0; i < ncolors; i++) {
9272 sfmmu_cache_flushcolor(i, pp->p_pagenum);
9273 }
9274 cache_flush_flag = CACHE_NO_FLUSH;
9275 }
9276 }
9277
9278 for (i = 0; i < npages; i++) {
9279
9280 ASSERT(sfmmu_mlist_held(pp));
9281
9282 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9283
9284 if (npages > 1) {
9285 bcolor = i % ncolors;
9286 } else {
9287 bcolor = NO_VCOLOR;
9288 }
9289
9290 sfmmu_page_cache(pp, flags, cache_flush_flag,
9291 bcolor);
9292 }
9293
9294 pp = PP_PAGENEXT(pp);
9295 }
9296
9297 xt_sync(cpuset);
9298 xc_dismissed(cpuset);
9299 mutex_exit(&ism_mlist_lock);
9300 sfmmu_hat_unlock_all();
9301 sfmmu_page_exit(pmtx);
9302 sfmmu_kpm_kpmp_exit(kpmp);
9303 kpreempt_enable();
9304 }
9305
9306 /*
9307 * This function changes the virtual cacheability of all mappings to a
9308 * particular page. When changing from uncache to cacheable the mappings will
9309 * only be changed if all of them have the same virtual color.
9310 * We need to flush the cache in all cpus. It is possible that
9311 * a process referenced a page as cacheable but has sinced exited
9312 * and cleared the mapping list. We still to flush it but have no
9313 * state so all cpus is the only alternative.
9314 */
9315 static void
9316 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9317 {
9318 struct sf_hment *sfhme;
9319 struct hme_blk *hmeblkp;
9320 sfmmu_t *sfmmup;
9321 tte_t tte, ttemod;
9322 caddr_t vaddr;
9323 int ret, color;
9324 pfn_t pfn;
9325
9326 color = bcolor;
9327 pfn = pp->p_pagenum;
9328
9329 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9330
9331 if (IS_PAHME(sfhme))
9332 continue;
9333 hmeblkp = sfmmu_hmetohblk(sfhme);
9334
9335 sfmmu_copytte(&sfhme->hme_tte, &tte);
9336 ASSERT(TTE_IS_VALID(&tte));
9337 vaddr = tte_to_vaddr(hmeblkp, tte);
9338 color = addr_to_vcolor(vaddr);
9339
9340 #ifdef DEBUG
9341 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9342 ASSERT(color == bcolor);
9343 }
9344 #endif
9345
9346 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9347
9348 ttemod = tte;
9349 if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9350 TTE_CLR_VCACHEABLE(&ttemod);
9351 } else { /* flags & HAT_CACHE */
9352 TTE_SET_VCACHEABLE(&ttemod);
9353 }
9354 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9355 if (ret < 0) {
9356 /*
9357 * Since all cpus are captured modifytte should not
9358 * fail.
9359 */
9360 panic("sfmmu_page_cache: write to tte failed");
9361 }
9362
9363 sfmmup = hblktosfmmu(hmeblkp);
9364 if (cache_flush_flag == CACHE_FLUSH) {
9365 /*
9366 * Flush TSBs, TLBs and caches
9367 */
9368 if (hmeblkp->hblk_shared) {
9369 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9370 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9371 sf_region_t *rgnp;
9372 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9373 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9374 ASSERT(srdp != NULL);
9375 rgnp = srdp->srd_hmergnp[rid];
9376 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9377 srdp, rgnp, rid);
9378 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9379 hmeblkp, 0);
9380 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9381 } else if (sfmmup->sfmmu_ismhat) {
9382 if (flags & HAT_CACHE) {
9383 SFMMU_STAT(sf_ism_recache);
9384 } else {
9385 SFMMU_STAT(sf_ism_uncache);
9386 }
9387 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9388 pfn, CACHE_FLUSH);
9389 } else {
9390 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9391 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9392 }
9393
9394 /*
9395 * all cache entries belonging to this pfn are
9396 * now flushed.
9397 */
9398 cache_flush_flag = CACHE_NO_FLUSH;
9399 } else {
9400 /*
9401 * Flush only TSBs and TLBs.
9402 */
9403 if (hmeblkp->hblk_shared) {
9404 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9405 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9406 sf_region_t *rgnp;
9407 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9408 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9409 ASSERT(srdp != NULL);
9410 rgnp = srdp->srd_hmergnp[rid];
9411 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9412 srdp, rgnp, rid);
9413 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9414 hmeblkp, 0);
9415 } else if (sfmmup->sfmmu_ismhat) {
9416 if (flags & HAT_CACHE) {
9417 SFMMU_STAT(sf_ism_recache);
9418 } else {
9419 SFMMU_STAT(sf_ism_uncache);
9420 }
9421 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9422 pfn, CACHE_NO_FLUSH);
9423 } else {
9424 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9425 }
9426 }
9427 }
9428
9429 if (PP_ISMAPPED_KPM(pp))
9430 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9431
9432 switch (flags) {
9433
9434 default:
9435 panic("sfmmu_pagecache: unknown flags");
9436 break;
9437
9438 case HAT_CACHE:
9439 PP_CLRTNC(pp);
9440 PP_CLRPNC(pp);
9441 PP_SET_VCOLOR(pp, color);
9442 break;
9443
9444 case HAT_TMPNC:
9445 PP_SETTNC(pp);
9446 PP_SET_VCOLOR(pp, NO_VCOLOR);
9447 break;
9448
9449 case HAT_UNCACHE:
9450 PP_SETPNC(pp);
9451 PP_CLRTNC(pp);
9452 PP_SET_VCOLOR(pp, NO_VCOLOR);
9453 break;
9454 }
9455 }
9456 #endif /* VAC */
9457
9458
9459 /*
9460 * Wrapper routine used to return a context.
9461 *
9462 * It's the responsibility of the caller to guarantee that the
9463 * process serializes on calls here by taking the HAT lock for
9464 * the hat.
9465 *
9466 */
9467 static void
9468 sfmmu_get_ctx(sfmmu_t *sfmmup)
9469 {
9470 mmu_ctx_t *mmu_ctxp;
9471 uint_t pstate_save;
9472 int ret;
9473
9474 ASSERT(sfmmu_hat_lock_held(sfmmup));
9475 ASSERT(sfmmup != ksfmmup);
9476
9477 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9478 sfmmu_setup_tsbinfo(sfmmup);
9479 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9480 }
9481
9482 kpreempt_disable();
9483
9484 mmu_ctxp = CPU_MMU_CTXP(CPU);
9485 ASSERT(mmu_ctxp);
9486 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9487 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9488
9489 /*
9490 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9491 */
9492 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9493 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9494
9495 /*
9496 * Let the MMU set up the page sizes to use for
9497 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9498 */
9499 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9500 mmu_set_ctx_page_sizes(sfmmup);
9501 }
9502
9503 /*
9504 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9505 * interrupts disabled to prevent race condition with wrap-around
9506 * ctx invalidatation. In sun4v, ctx invalidation also involves
9507 * a HV call to set the number of TSBs to 0. If interrupts are not
9508 * disabled until after sfmmu_load_mmustate is complete TSBs may
9509 * become assigned to INVALID_CONTEXT. This is not allowed.
9510 */
9511 pstate_save = sfmmu_disable_intrs();
9512
9513 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9514 sfmmup->sfmmu_scdp != NULL) {
9515 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9516 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9517 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9518 /* debug purpose only */
9519 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9520 != INVALID_CONTEXT);
9521 }
9522 sfmmu_load_mmustate(sfmmup);
9523
9524 sfmmu_enable_intrs(pstate_save);
9525
9526 kpreempt_enable();
9527 }
9528
9529 /*
9530 * When all cnums are used up in a MMU, cnum will wrap around to the
9531 * next generation and start from 2.
9532 */
9533 static void
9534 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9535 {
9536
9537 /* caller must have disabled the preemption */
9538 ASSERT(curthread->t_preempt >= 1);
9539 ASSERT(mmu_ctxp != NULL);
9540
9541 /* acquire Per-MMU (PM) spin lock */
9542 mutex_enter(&mmu_ctxp->mmu_lock);
9543
9544 /* re-check to see if wrap-around is needed */
9545 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9546 goto done;
9547
9548 SFMMU_MMU_STAT(mmu_wrap_around);
9549
9550 /* update gnum */
9551 ASSERT(mmu_ctxp->mmu_gnum != 0);
9552 mmu_ctxp->mmu_gnum++;
9553 if (mmu_ctxp->mmu_gnum == 0 ||
9554 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9555 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9556 (void *)mmu_ctxp);
9557 }
9558
9559 if (mmu_ctxp->mmu_ncpus > 1) {
9560 cpuset_t cpuset;
9561
9562 membar_enter(); /* make sure updated gnum visible */
9563
9564 SFMMU_XCALL_STATS(NULL);
9565
9566 /* xcall to others on the same MMU to invalidate ctx */
9567 cpuset = mmu_ctxp->mmu_cpuset;
9568 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9569 CPUSET_DEL(cpuset, CPU->cpu_id);
9570 CPUSET_AND(cpuset, cpu_ready_set);
9571
9572 /*
9573 * Pass in INVALID_CONTEXT as the first parameter to
9574 * sfmmu_raise_tsb_exception, which invalidates the context
9575 * of any process running on the CPUs in the MMU.
9576 */
9577 xt_some(cpuset, sfmmu_raise_tsb_exception,
9578 INVALID_CONTEXT, INVALID_CONTEXT);
9579 xt_sync(cpuset);
9580
9581 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9582 }
9583
9584 if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9585 sfmmu_setctx_sec(INVALID_CONTEXT);
9586 sfmmu_clear_utsbinfo();
9587 }
9588
9589 /*
9590 * No xcall is needed here. For sun4u systems all CPUs in context
9591 * domain share a single physical MMU therefore it's enough to flush
9592 * TLB on local CPU. On sun4v systems we use 1 global context
9593 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9594 * handler. Note that vtag_flushall_uctxs() is called
9595 * for Ultra II machine, where the equivalent flushall functionality
9596 * is implemented in SW, and only user ctx TLB entries are flushed.
9597 */
9598 if (&vtag_flushall_uctxs != NULL) {
9599 vtag_flushall_uctxs();
9600 } else {
9601 vtag_flushall();
9602 }
9603
9604 /* reset mmu cnum, skips cnum 0 and 1 */
9605 if (reset_cnum == B_TRUE)
9606 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9607
9608 done:
9609 mutex_exit(&mmu_ctxp->mmu_lock);
9610 }
9611
9612
9613 /*
9614 * For multi-threaded process, set the process context to INVALID_CONTEXT
9615 * so that it faults and reloads the MMU state from TL=0. For single-threaded
9616 * process, we can just load the MMU state directly without having to
9617 * set context invalid. Caller must hold the hat lock since we don't
9618 * acquire it here.
9619 */
9620 static void
9621 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9622 {
9623 uint_t cnum;
9624 uint_t pstate_save;
9625
9626 ASSERT(sfmmup != ksfmmup);
9627 ASSERT(sfmmu_hat_lock_held(sfmmup));
9628
9629 kpreempt_disable();
9630
9631 /*
9632 * We check whether the pass'ed-in sfmmup is the same as the
9633 * current running proc. This is to makes sure the current proc
9634 * stays single-threaded if it already is.
9635 */
9636 if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9637 (curthread->t_procp->p_lwpcnt == 1)) {
9638 /* single-thread */
9639 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9640 if (cnum != INVALID_CONTEXT) {
9641 uint_t curcnum;
9642 /*
9643 * Disable interrupts to prevent race condition
9644 * with sfmmu_ctx_wrap_around ctx invalidation.
9645 * In sun4v, ctx invalidation involves setting
9646 * TSB to NULL, hence, interrupts should be disabled
9647 * untill after sfmmu_load_mmustate is completed.
9648 */
9649 pstate_save = sfmmu_disable_intrs();
9650 curcnum = sfmmu_getctx_sec();
9651 if (curcnum == cnum)
9652 sfmmu_load_mmustate(sfmmup);
9653 sfmmu_enable_intrs(pstate_save);
9654 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9655 }
9656 } else {
9657 /*
9658 * multi-thread
9659 * or when sfmmup is not the same as the curproc.
9660 */
9661 sfmmu_invalidate_ctx(sfmmup);
9662 }
9663
9664 kpreempt_enable();
9665 }
9666
9667
9668 /*
9669 * Replace the specified TSB with a new TSB. This function gets called when
9670 * we grow, or shrink a TSB. When swapping in a TSB (TSB_SWAPIN), the
9671 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
9672 * (8K).
9673 *
9674 * Caller must hold the HAT lock, but should assume any tsb_info
9675 * pointers it has are no longer valid after calling this function.
9676 *
9677 * Return values:
9678 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
9679 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
9680 * something to this tsbinfo/TSB
9681 * TSB_SUCCESS Operation succeeded
9682 */
9683 static tsb_replace_rc_t
9684 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
9685 hatlock_t *hatlockp, uint_t flags)
9686 {
9687 struct tsb_info *new_tsbinfo = NULL;
9688 struct tsb_info *curtsb, *prevtsb;
9689 uint_t tte_sz_mask;
9690 int i;
9691
9692 ASSERT(sfmmup != ksfmmup);
9693 ASSERT(sfmmup->sfmmu_ismhat == 0);
9694 ASSERT(sfmmu_hat_lock_held(sfmmup));
9695 ASSERT(szc <= tsb_max_growsize);
9696
9697 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
9698 return (TSB_LOSTRACE);
9699
9700 /*
9701 * Find the tsb_info ahead of this one in the list, and
9702 * also make sure that the tsb_info passed in really
9703 * exists!
9704 */
9705 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9706 curtsb != old_tsbinfo && curtsb != NULL;
9707 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9708 ;
9709 ASSERT(curtsb != NULL);
9710
9711 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9712 /*
9713 * The process is swapped out, so just set the new size
9714 * code. When it swaps back in, we'll allocate a new one
9715 * of the new chosen size.
9716 */
9717 curtsb->tsb_szc = szc;
9718 return (TSB_SUCCESS);
9719 }
9720 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
9721
9722 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
9723
9724 /*
9725 * All initialization is done inside of sfmmu_tsbinfo_alloc().
9726 * If we fail to allocate a TSB, exit.
9727 *
9728 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
9729 * then try 4M slab after the initial alloc fails.
9730 *
9731 * If tsb swapin with tsb size > 4M, then try 4M after the
9732 * initial alloc fails.
9733 */
9734 sfmmu_hat_exit(hatlockp);
9735 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
9736 tte_sz_mask, flags, sfmmup) &&
9737 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
9738 (!(flags & TSB_SWAPIN) &&
9739 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
9740 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
9741 tte_sz_mask, flags, sfmmup))) {
9742 (void) sfmmu_hat_enter(sfmmup);
9743 if (!(flags & TSB_SWAPIN))
9744 SFMMU_STAT(sf_tsb_resize_failures);
9745 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9746 return (TSB_ALLOCFAIL);
9747 }
9748 (void) sfmmu_hat_enter(sfmmup);
9749
9750 /*
9751 * Re-check to make sure somebody else didn't muck with us while we
9752 * didn't hold the HAT lock. If the process swapped out, fine, just
9753 * exit; this can happen if we try to shrink the TSB from the context
9754 * of another process (such as on an ISM unmap), though it is rare.
9755 */
9756 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
9757 SFMMU_STAT(sf_tsb_resize_failures);
9758 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9759 sfmmu_hat_exit(hatlockp);
9760 sfmmu_tsbinfo_free(new_tsbinfo);
9761 (void) sfmmu_hat_enter(sfmmup);
9762 return (TSB_LOSTRACE);
9763 }
9764
9765 #ifdef DEBUG
9766 /* Reverify that the tsb_info still exists.. for debugging only */
9767 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
9768 curtsb != old_tsbinfo && curtsb != NULL;
9769 prevtsb = curtsb, curtsb = curtsb->tsb_next)
9770 ;
9771 ASSERT(curtsb != NULL);
9772 #endif /* DEBUG */
9773
9774 /*
9775 * Quiesce any CPUs running this process on their next TLB miss
9776 * so they atomically see the new tsb_info. We temporarily set the
9777 * context to invalid context so new threads that come on processor
9778 * after we do the xcall to cpusran will also serialize behind the
9779 * HAT lock on TLB miss and will see the new TSB. Since this short
9780 * race with a new thread coming on processor is relatively rare,
9781 * this synchronization mechanism should be cheaper than always
9782 * pausing all CPUs for the duration of the setup, which is what
9783 * the old implementation did. This is particuarly true if we are
9784 * copying a huge chunk of memory around during that window.
9785 *
9786 * The memory barriers are to make sure things stay consistent
9787 * with resume() since it does not hold the HAT lock while
9788 * walking the list of tsb_info structures.
9789 */
9790 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
9791 /* The TSB is either growing or shrinking. */
9792 sfmmu_invalidate_ctx(sfmmup);
9793 } else {
9794 /*
9795 * It is illegal to swap in TSBs from a process other
9796 * than a process being swapped in. This in turn
9797 * implies we do not have a valid MMU context here
9798 * since a process needs one to resolve translation
9799 * misses.
9800 */
9801 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
9802 }
9803
9804 #ifdef DEBUG
9805 ASSERT(max_mmu_ctxdoms > 0);
9806
9807 /*
9808 * Process should have INVALID_CONTEXT on all MMUs
9809 */
9810 for (i = 0; i < max_mmu_ctxdoms; i++) {
9811
9812 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
9813 }
9814 #endif
9815
9816 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
9817 membar_stst(); /* strict ordering required */
9818 if (prevtsb)
9819 prevtsb->tsb_next = new_tsbinfo;
9820 else
9821 sfmmup->sfmmu_tsb = new_tsbinfo;
9822 membar_enter(); /* make sure new TSB globally visible */
9823
9824 /*
9825 * We need to migrate TSB entries from the old TSB to the new TSB
9826 * if tsb_remap_ttes is set and the TSB is growing.
9827 */
9828 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
9829 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
9830
9831 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
9832
9833 /*
9834 * Drop the HAT lock to free our old tsb_info.
9835 */
9836 sfmmu_hat_exit(hatlockp);
9837
9838 if ((flags & TSB_GROW) == TSB_GROW) {
9839 SFMMU_STAT(sf_tsb_grow);
9840 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
9841 SFMMU_STAT(sf_tsb_shrink);
9842 }
9843
9844 sfmmu_tsbinfo_free(old_tsbinfo);
9845
9846 (void) sfmmu_hat_enter(sfmmup);
9847 return (TSB_SUCCESS);
9848 }
9849
9850 /*
9851 * This function will re-program hat pgsz array, and invalidate the
9852 * process' context, forcing the process to switch to another
9853 * context on the next TLB miss, and therefore start using the
9854 * TLB that is reprogrammed for the new page sizes.
9855 */
9856 void
9857 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
9858 {
9859 int i;
9860 hatlock_t *hatlockp = NULL;
9861
9862 hatlockp = sfmmu_hat_enter(sfmmup);
9863 /* USIII+-IV+ optimization, requires hat lock */
9864 if (tmp_pgsz) {
9865 for (i = 0; i < mmu_page_sizes; i++)
9866 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
9867 }
9868 SFMMU_STAT(sf_tlb_reprog_pgsz);
9869
9870 sfmmu_invalidate_ctx(sfmmup);
9871
9872 sfmmu_hat_exit(hatlockp);
9873 }
9874
9875 /*
9876 * The scd_rttecnt field in the SCD must be updated to take account of the
9877 * regions which it contains.
9878 */
9879 static void
9880 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
9881 {
9882 uint_t rid;
9883 uint_t i, j;
9884 ulong_t w;
9885 sf_region_t *rgnp;
9886
9887 ASSERT(srdp != NULL);
9888
9889 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
9890 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
9891 continue;
9892 }
9893
9894 j = 0;
9895 while (w) {
9896 if (!(w & 0x1)) {
9897 j++;
9898 w >>= 1;
9899 continue;
9900 }
9901 rid = (i << BT_ULSHIFT) | j;
9902 j++;
9903 w >>= 1;
9904
9905 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9906 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9907 rgnp = srdp->srd_hmergnp[rid];
9908 ASSERT(rgnp->rgn_refcnt > 0);
9909 ASSERT(rgnp->rgn_id == rid);
9910
9911 scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
9912 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
9913
9914 /*
9915 * Maintain the tsb0 inflation cnt for the regions
9916 * in the SCD.
9917 */
9918 if (rgnp->rgn_pgszc >= TTE4M) {
9919 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
9920 rgnp->rgn_size >>
9921 (TTE_PAGE_SHIFT(TTE8K) + 2);
9922 }
9923 }
9924 }
9925 }
9926
9927 /*
9928 * This function assumes that there are either four or six supported page
9929 * sizes and at most two programmable TLBs, so we need to decide which
9930 * page sizes are most important and then tell the MMU layer so it
9931 * can adjust the TLB page sizes accordingly (if supported).
9932 *
9933 * If these assumptions change, this function will need to be
9934 * updated to support whatever the new limits are.
9935 *
9936 * The growing flag is nonzero if we are growing the address space,
9937 * and zero if it is shrinking. This allows us to decide whether
9938 * to grow or shrink our TSB, depending upon available memory
9939 * conditions.
9940 */
9941 static void
9942 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
9943 {
9944 uint64_t ttecnt[MMU_PAGE_SIZES];
9945 uint64_t tte8k_cnt, tte4m_cnt;
9946 uint8_t i;
9947 int sectsb_thresh;
9948
9949 /*
9950 * Kernel threads, processes with small address spaces not using
9951 * large pages, and dummy ISM HATs need not apply.
9952 */
9953 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
9954 return;
9955
9956 if (!SFMMU_LGPGS_INUSE(sfmmup) &&
9957 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
9958 return;
9959
9960 for (i = 0; i < mmu_page_sizes; i++) {
9961 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
9962 sfmmup->sfmmu_ismttecnt[i];
9963 }
9964
9965 /* Check pagesizes in use, and possibly reprogram DTLB. */
9966 if (&mmu_check_page_sizes)
9967 mmu_check_page_sizes(sfmmup, ttecnt);
9968
9969 /*
9970 * Calculate the number of 8k ttes to represent the span of these
9971 * pages.
9972 */
9973 tte8k_cnt = ttecnt[TTE8K] +
9974 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
9975 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
9976 if (mmu_page_sizes == max_mmu_page_sizes) {
9977 tte4m_cnt = ttecnt[TTE4M] +
9978 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
9979 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
9980 } else {
9981 tte4m_cnt = ttecnt[TTE4M];
9982 }
9983
9984 /*
9985 * Inflate tte8k_cnt to allow for region large page allocation failure.
9986 */
9987 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
9988
9989 /*
9990 * Inflate TSB sizes by a factor of 2 if this process
9991 * uses 4M text pages to minimize extra conflict misses
9992 * in the first TSB since without counting text pages
9993 * 8K TSB may become too small.
9994 *
9995 * Also double the size of the second TSB to minimize
9996 * extra conflict misses due to competition between 4M text pages
9997 * and data pages.
9998 *
9999 * We need to adjust the second TSB allocation threshold by the
10000 * inflation factor, since there is no point in creating a second
10001 * TSB when we know all the mappings can fit in the I/D TLBs.
10002 */
10003 sectsb_thresh = tsb_sectsb_threshold;
10004 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10005 tte8k_cnt <<= 1;
10006 tte4m_cnt <<= 1;
10007 sectsb_thresh <<= 1;
10008 }
10009
10010 /*
10011 * Check to see if our TSB is the right size; we may need to
10012 * grow or shrink it. If the process is small, our work is
10013 * finished at this point.
10014 */
10015 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10016 return;
10017 }
10018 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10019 }
10020
10021 static void
10022 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10023 uint64_t tte4m_cnt, int sectsb_thresh)
10024 {
10025 int tsb_bits;
10026 uint_t tsb_szc;
10027 struct tsb_info *tsbinfop;
10028 hatlock_t *hatlockp = NULL;
10029
10030 hatlockp = sfmmu_hat_enter(sfmmup);
10031 ASSERT(hatlockp != NULL);
10032 tsbinfop = sfmmup->sfmmu_tsb;
10033 ASSERT(tsbinfop != NULL);
10034
10035 /*
10036 * If we're growing, select the size based on RSS. If we're
10037 * shrinking, leave some room so we don't have to turn around and
10038 * grow again immediately.
10039 */
10040 if (growing)
10041 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10042 else
10043 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10044
10045 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10046 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10047 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10048 hatlockp, TSB_SHRINK);
10049 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10050 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10051 hatlockp, TSB_GROW);
10052 }
10053 tsbinfop = sfmmup->sfmmu_tsb;
10054
10055 /*
10056 * With the TLB and first TSB out of the way, we need to see if
10057 * we need a second TSB for 4M pages. If we managed to reprogram
10058 * the TLB page sizes above, the process will start using this new
10059 * TSB right away; otherwise, it will start using it on the next
10060 * context switch. Either way, it's no big deal so there's no
10061 * synchronization with the trap handlers here unless we grow the
10062 * TSB (in which case it's required to prevent using the old one
10063 * after it's freed). Note: second tsb is required for 32M/256M
10064 * page sizes.
10065 */
10066 if (tte4m_cnt > sectsb_thresh) {
10067 /*
10068 * If we're growing, select the size based on RSS. If we're
10069 * shrinking, leave some room so we don't have to turn
10070 * around and grow again immediately.
10071 */
10072 if (growing)
10073 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10074 else
10075 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10076 if (tsbinfop->tsb_next == NULL) {
10077 struct tsb_info *newtsb;
10078 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10079 0 : TSB_ALLOC;
10080
10081 sfmmu_hat_exit(hatlockp);
10082
10083 /*
10084 * Try to allocate a TSB for 4[32|256]M pages. If we
10085 * can't get the size we want, retry w/a minimum sized
10086 * TSB. If that still didn't work, give up; we can
10087 * still run without one.
10088 */
10089 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10090 TSB4M|TSB32M|TSB256M:TSB4M;
10091 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10092 allocflags, sfmmup)) &&
10093 (tsb_szc <= TSB_4M_SZCODE ||
10094 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10095 tsb_bits, allocflags, sfmmup)) &&
10096 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10097 tsb_bits, allocflags, sfmmup)) {
10098 return;
10099 }
10100
10101 hatlockp = sfmmu_hat_enter(sfmmup);
10102
10103 sfmmu_invalidate_ctx(sfmmup);
10104
10105 if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10106 sfmmup->sfmmu_tsb->tsb_next = newtsb;
10107 SFMMU_STAT(sf_tsb_sectsb_create);
10108 sfmmu_hat_exit(hatlockp);
10109 return;
10110 } else {
10111 /*
10112 * It's annoying, but possible for us
10113 * to get here.. we dropped the HAT lock
10114 * because of locking order in the kmem
10115 * allocator, and while we were off getting
10116 * our memory, some other thread decided to
10117 * do us a favor and won the race to get a
10118 * second TSB for this process. Sigh.
10119 */
10120 sfmmu_hat_exit(hatlockp);
10121 sfmmu_tsbinfo_free(newtsb);
10122 return;
10123 }
10124 }
10125
10126 /*
10127 * We have a second TSB, see if it's big enough.
10128 */
10129 tsbinfop = tsbinfop->tsb_next;
10130
10131 /*
10132 * Check to see if our second TSB is the right size;
10133 * we may need to grow or shrink it.
10134 * To prevent thrashing (e.g. growing the TSB on a
10135 * subsequent map operation), only try to shrink if
10136 * the TSB reach exceeds twice the virtual address
10137 * space size.
10138 */
10139 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10140 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10141 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10142 tsb_szc, hatlockp, TSB_SHRINK);
10143 } else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10144 TSB_OK_GROW()) {
10145 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10146 tsb_szc, hatlockp, TSB_GROW);
10147 }
10148 }
10149
10150 sfmmu_hat_exit(hatlockp);
10151 }
10152
10153 /*
10154 * Free up a sfmmu
10155 * Since the sfmmu is currently embedded in the hat struct we simply zero
10156 * out our fields and free up the ism map blk list if any.
10157 */
10158 static void
10159 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10160 {
10161 ism_blk_t *blkp, *nx_blkp;
10162 #ifdef DEBUG
10163 ism_map_t *map;
10164 int i;
10165 #endif
10166
10167 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10168 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10169 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10170 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10171 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10172 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10173 ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10174
10175 sfmmup->sfmmu_free = 0;
10176 sfmmup->sfmmu_ismhat = 0;
10177
10178 blkp = sfmmup->sfmmu_iblk;
10179 sfmmup->sfmmu_iblk = NULL;
10180
10181 while (blkp) {
10182 #ifdef DEBUG
10183 map = blkp->iblk_maps;
10184 for (i = 0; i < ISM_MAP_SLOTS; i++) {
10185 ASSERT(map[i].imap_seg == 0);
10186 ASSERT(map[i].imap_ismhat == NULL);
10187 ASSERT(map[i].imap_ment == NULL);
10188 }
10189 #endif
10190 nx_blkp = blkp->iblk_next;
10191 blkp->iblk_next = NULL;
10192 blkp->iblk_nextpa = (uint64_t)-1;
10193 kmem_cache_free(ism_blk_cache, blkp);
10194 blkp = nx_blkp;
10195 }
10196 }
10197
10198 /*
10199 * Locking primitves accessed by HATLOCK macros
10200 */
10201
10202 #define SFMMU_SPL_MTX (0x0)
10203 #define SFMMU_ML_MTX (0x1)
10204
10205 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \
10206 SPL_HASH(pg) : MLIST_HASH(pg))
10207
10208 kmutex_t *
10209 sfmmu_page_enter(struct page *pp)
10210 {
10211 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10212 }
10213
10214 void
10215 sfmmu_page_exit(kmutex_t *spl)
10216 {
10217 mutex_exit(spl);
10218 }
10219
10220 int
10221 sfmmu_page_spl_held(struct page *pp)
10222 {
10223 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10224 }
10225
10226 kmutex_t *
10227 sfmmu_mlist_enter(struct page *pp)
10228 {
10229 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10230 }
10231
10232 void
10233 sfmmu_mlist_exit(kmutex_t *mml)
10234 {
10235 mutex_exit(mml);
10236 }
10237
10238 int
10239 sfmmu_mlist_held(struct page *pp)
10240 {
10241
10242 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10243 }
10244
10245 /*
10246 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For
10247 * sfmmu_mlist_enter() case mml_table lock array is used and for
10248 * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10249 *
10250 * The lock is taken on a root page so that it protects an operation on all
10251 * constituent pages of a large page pp belongs to.
10252 *
10253 * The routine takes a lock from the appropriate array. The lock is determined
10254 * by hashing the root page. After taking the lock this routine checks if the
10255 * root page has the same size code that was used to determine the root (i.e
10256 * that root hasn't changed). If root page has the expected p_szc field we
10257 * have the right lock and it's returned to the caller. If root's p_szc
10258 * decreased we release the lock and retry from the beginning. This case can
10259 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10260 * value and taking the lock. The number of retries due to p_szc decrease is
10261 * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10262 * determined by hashing pp itself.
10263 *
10264 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10265 * possible that p_szc can increase. To increase p_szc a thread has to lock
10266 * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10267 * callers that don't hold a page locked recheck if hmeblk through which pp
10268 * was found still maps this pp. If it doesn't map it anymore returned lock
10269 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10270 * p_szc increase after taking the lock it returns this lock without further
10271 * retries because in this case the caller doesn't care about which lock was
10272 * taken. The caller will drop it right away.
10273 *
10274 * After the routine returns it's guaranteed that hat_page_demote() can't
10275 * change p_szc field of any of constituent pages of a large page pp belongs
10276 * to as long as pp was either locked at least SHARED prior to this call or
10277 * the caller finds that hment that pointed to this pp still references this
10278 * pp (this also assumes that the caller holds hme hash bucket lock so that
10279 * the same pp can't be remapped into the same hmeblk after it was unmapped by
10280 * hat_pageunload()).
10281 */
10282 static kmutex_t *
10283 sfmmu_mlspl_enter(struct page *pp, int type)
10284 {
10285 kmutex_t *mtx;
10286 uint_t prev_rszc = UINT_MAX;
10287 page_t *rootpp;
10288 uint_t szc;
10289 uint_t rszc;
10290 uint_t pszc = pp->p_szc;
10291
10292 ASSERT(pp != NULL);
10293
10294 again:
10295 if (pszc == 0) {
10296 mtx = SFMMU_MLSPL_MTX(type, pp);
10297 mutex_enter(mtx);
10298 return (mtx);
10299 }
10300
10301 /* The lock lives in the root page */
10302 rootpp = PP_GROUPLEADER(pp, pszc);
10303 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10304 mutex_enter(mtx);
10305
10306 /*
10307 * Return mml in the following 3 cases:
10308 *
10309 * 1) If pp itself is root since if its p_szc decreased before we took
10310 * the lock pp is still the root of smaller szc page. And if its p_szc
10311 * increased it doesn't matter what lock we return (see comment in
10312 * front of this routine).
10313 *
10314 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10315 * large page we have the right lock since any previous potential
10316 * hat_page_demote() is done demoting from greater than current root's
10317 * p_szc because hat_page_demote() changes root's p_szc last. No
10318 * further hat_page_demote() can start or be in progress since it
10319 * would need the same lock we currently hold.
10320 *
10321 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10322 * matter what lock we return (see comment in front of this routine).
10323 */
10324 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10325 rszc >= prev_rszc) {
10326 return (mtx);
10327 }
10328
10329 /*
10330 * hat_page_demote() could have decreased root's p_szc.
10331 * In this case pp's p_szc must also be smaller than pszc.
10332 * Retry.
10333 */
10334 if (rszc < pszc) {
10335 szc = pp->p_szc;
10336 if (szc < pszc) {
10337 mutex_exit(mtx);
10338 pszc = szc;
10339 goto again;
10340 }
10341 /*
10342 * pp's p_szc increased after it was decreased.
10343 * page cannot be mapped. Return current lock. The caller
10344 * will drop it right away.
10345 */
10346 return (mtx);
10347 }
10348
10349 /*
10350 * root's p_szc is greater than pp's p_szc.
10351 * hat_page_demote() is not done with all pages
10352 * yet. Wait for it to complete.
10353 */
10354 mutex_exit(mtx);
10355 rootpp = PP_GROUPLEADER(rootpp, rszc);
10356 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10357 mutex_enter(mtx);
10358 mutex_exit(mtx);
10359 prev_rszc = rszc;
10360 goto again;
10361 }
10362
10363 static int
10364 sfmmu_mlspl_held(struct page *pp, int type)
10365 {
10366 kmutex_t *mtx;
10367
10368 ASSERT(pp != NULL);
10369 /* The lock lives in the root page */
10370 pp = PP_PAGEROOT(pp);
10371 ASSERT(pp != NULL);
10372
10373 mtx = SFMMU_MLSPL_MTX(type, pp);
10374 return (MUTEX_HELD(mtx));
10375 }
10376
10377 static uint_t
10378 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10379 {
10380 struct hme_blk *hblkp;
10381
10382
10383 if (freehblkp != NULL) {
10384 mutex_enter(&freehblkp_lock);
10385 if (freehblkp != NULL) {
10386 /*
10387 * If the current thread is owning hblk_reserve OR
10388 * critical request from sfmmu_hblk_steal()
10389 * let it succeed even if freehblkcnt is really low.
10390 */
10391 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10392 SFMMU_STAT(sf_get_free_throttle);
10393 mutex_exit(&freehblkp_lock);
10394 return (0);
10395 }
10396 freehblkcnt--;
10397 *hmeblkpp = freehblkp;
10398 hblkp = *hmeblkpp;
10399 freehblkp = hblkp->hblk_next;
10400 mutex_exit(&freehblkp_lock);
10401 hblkp->hblk_next = NULL;
10402 SFMMU_STAT(sf_get_free_success);
10403
10404 ASSERT(hblkp->hblk_hmecnt == 0);
10405 ASSERT(hblkp->hblk_vcnt == 0);
10406 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10407
10408 return (1);
10409 }
10410 mutex_exit(&freehblkp_lock);
10411 }
10412
10413 /* Check cpu hblk pending queues */
10414 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10415 hblkp = *hmeblkpp;
10416 hblkp->hblk_next = NULL;
10417 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10418
10419 ASSERT(hblkp->hblk_hmecnt == 0);
10420 ASSERT(hblkp->hblk_vcnt == 0);
10421
10422 return (1);
10423 }
10424
10425 SFMMU_STAT(sf_get_free_fail);
10426 return (0);
10427 }
10428
10429 static uint_t
10430 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10431 {
10432 struct hme_blk *hblkp;
10433
10434 ASSERT(hmeblkp->hblk_hmecnt == 0);
10435 ASSERT(hmeblkp->hblk_vcnt == 0);
10436 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10437
10438 /*
10439 * If the current thread is mapping into kernel space,
10440 * let it succede even if freehblkcnt is max
10441 * so that it will avoid freeing it to kmem.
10442 * This will prevent stack overflow due to
10443 * possible recursion since kmem_cache_free()
10444 * might require creation of a slab which
10445 * in turn needs an hmeblk to map that slab;
10446 * let's break this vicious chain at the first
10447 * opportunity.
10448 */
10449 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10450 mutex_enter(&freehblkp_lock);
10451 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10452 SFMMU_STAT(sf_put_free_success);
10453 freehblkcnt++;
10454 hmeblkp->hblk_next = freehblkp;
10455 freehblkp = hmeblkp;
10456 mutex_exit(&freehblkp_lock);
10457 return (1);
10458 }
10459 mutex_exit(&freehblkp_lock);
10460 }
10461
10462 /*
10463 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10464 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10465 * we are not in the process of mapping into kernel space.
10466 */
10467 ASSERT(!critical);
10468 while (freehblkcnt > HBLK_RESERVE_CNT) {
10469 mutex_enter(&freehblkp_lock);
10470 if (freehblkcnt > HBLK_RESERVE_CNT) {
10471 freehblkcnt--;
10472 hblkp = freehblkp;
10473 freehblkp = hblkp->hblk_next;
10474 mutex_exit(&freehblkp_lock);
10475 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10476 kmem_cache_free(sfmmu8_cache, hblkp);
10477 continue;
10478 }
10479 mutex_exit(&freehblkp_lock);
10480 }
10481 SFMMU_STAT(sf_put_free_fail);
10482 return (0);
10483 }
10484
10485 static void
10486 sfmmu_hblk_swap(struct hme_blk *new)
10487 {
10488 struct hme_blk *old, *hblkp, *prev;
10489 uint64_t newpa;
10490 caddr_t base, vaddr, endaddr;
10491 struct hmehash_bucket *hmebp;
10492 struct sf_hment *osfhme, *nsfhme;
10493 page_t *pp;
10494 kmutex_t *pml;
10495 tte_t tte;
10496 struct hme_blk *list = NULL;
10497
10498 #ifdef DEBUG
10499 hmeblk_tag hblktag;
10500 struct hme_blk *found;
10501 #endif
10502 old = HBLK_RESERVE;
10503 ASSERT(!old->hblk_shared);
10504
10505 /*
10506 * save pa before bcopy clobbers it
10507 */
10508 newpa = new->hblk_nextpa;
10509
10510 base = (caddr_t)get_hblk_base(old);
10511 endaddr = base + get_hblk_span(old);
10512
10513 /*
10514 * acquire hash bucket lock.
10515 */
10516 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10517 SFMMU_INVALID_SHMERID);
10518
10519 /*
10520 * copy contents from old to new
10521 */
10522 bcopy((void *)old, (void *)new, HME8BLK_SZ);
10523
10524 /*
10525 * add new to hash chain
10526 */
10527 sfmmu_hblk_hash_add(hmebp, new, newpa);
10528
10529 /*
10530 * search hash chain for hblk_reserve; this needs to be performed
10531 * after adding new, otherwise prev won't correspond to the hblk which
10532 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10533 * remove old later.
10534 */
10535 for (prev = NULL,
10536 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10537 prev = hblkp, hblkp = hblkp->hblk_next)
10538 ;
10539
10540 if (hblkp != old)
10541 panic("sfmmu_hblk_swap: hblk_reserve not found");
10542
10543 /*
10544 * p_mapping list is still pointing to hments in hblk_reserve;
10545 * fix up p_mapping list so that they point to hments in new.
10546 *
10547 * Since all these mappings are created by hblk_reserve_thread
10548 * on the way and it's using at least one of the buffers from each of
10549 * the newly minted slabs, there is no danger of any of these
10550 * mappings getting unloaded by another thread.
10551 *
10552 * tsbmiss could only modify ref/mod bits of hments in old/new.
10553 * Since all of these hments hold mappings established by segkmem
10554 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10555 * have no meaning for the mappings in hblk_reserve. hments in
10556 * old and new are identical except for ref/mod bits.
10557 */
10558 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10559
10560 HBLKTOHME(osfhme, old, vaddr);
10561 sfmmu_copytte(&osfhme->hme_tte, &tte);
10562
10563 if (TTE_IS_VALID(&tte)) {
10564 if ((pp = osfhme->hme_page) == NULL)
10565 panic("sfmmu_hblk_swap: page not mapped");
10566
10567 pml = sfmmu_mlist_enter(pp);
10568
10569 if (pp != osfhme->hme_page)
10570 panic("sfmmu_hblk_swap: mapping changed");
10571
10572 HBLKTOHME(nsfhme, new, vaddr);
10573
10574 HME_ADD(nsfhme, pp);
10575 HME_SUB(osfhme, pp);
10576
10577 sfmmu_mlist_exit(pml);
10578 }
10579 }
10580
10581 /*
10582 * remove old from hash chain
10583 */
10584 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10585
10586 #ifdef DEBUG
10587
10588 hblktag.htag_id = ksfmmup;
10589 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10590 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10591 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10592 HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10593
10594 if (found != new)
10595 panic("sfmmu_hblk_swap: new hblk not found");
10596 #endif
10597
10598 SFMMU_HASH_UNLOCK(hmebp);
10599
10600 /*
10601 * Reset hblk_reserve
10602 */
10603 bzero((void *)old, HME8BLK_SZ);
10604 old->hblk_nextpa = va_to_pa((caddr_t)old);
10605 }
10606
10607 /*
10608 * Grab the mlist mutex for both pages passed in.
10609 *
10610 * low and high will be returned as pointers to the mutexes for these pages.
10611 * low refers to the mutex residing in the lower bin of the mlist hash, while
10612 * high refers to the mutex residing in the higher bin of the mlist hash. This
10613 * is due to the locking order restrictions on the same thread grabbing
10614 * multiple mlist mutexes. The low lock must be acquired before the high lock.
10615 *
10616 * If both pages hash to the same mutex, only grab that single mutex, and
10617 * high will be returned as NULL
10618 * If the pages hash to different bins in the hash, grab the lower addressed
10619 * lock first and then the higher addressed lock in order to follow the locking
10620 * rules involved with the same thread grabbing multiple mlist mutexes.
10621 * low and high will both have non-NULL values.
10622 */
10623 static void
10624 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10625 kmutex_t **low, kmutex_t **high)
10626 {
10627 kmutex_t *mml_targ, *mml_repl;
10628
10629 /*
10630 * no need to do the dance around szc as in sfmmu_mlist_enter()
10631 * because this routine is only called by hat_page_relocate() and all
10632 * targ and repl pages are already locked EXCL so szc can't change.
10633 */
10634
10635 mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10636 mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10637
10638 if (mml_targ == mml_repl) {
10639 *low = mml_targ;
10640 *high = NULL;
10641 } else {
10642 if (mml_targ < mml_repl) {
10643 *low = mml_targ;
10644 *high = mml_repl;
10645 } else {
10646 *low = mml_repl;
10647 *high = mml_targ;
10648 }
10649 }
10650
10651 mutex_enter(*low);
10652 if (*high)
10653 mutex_enter(*high);
10654 }
10655
10656 static void
10657 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10658 {
10659 if (high)
10660 mutex_exit(high);
10661 mutex_exit(low);
10662 }
10663
10664 static hatlock_t *
10665 sfmmu_hat_enter(sfmmu_t *sfmmup)
10666 {
10667 hatlock_t *hatlockp;
10668
10669 if (sfmmup != ksfmmup) {
10670 hatlockp = TSB_HASH(sfmmup);
10671 mutex_enter(HATLOCK_MUTEXP(hatlockp));
10672 return (hatlockp);
10673 }
10674 return (NULL);
10675 }
10676
10677 static hatlock_t *
10678 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
10679 {
10680 hatlock_t *hatlockp;
10681
10682 if (sfmmup != ksfmmup) {
10683 hatlockp = TSB_HASH(sfmmup);
10684 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
10685 return (NULL);
10686 return (hatlockp);
10687 }
10688 return (NULL);
10689 }
10690
10691 static void
10692 sfmmu_hat_exit(hatlock_t *hatlockp)
10693 {
10694 if (hatlockp != NULL)
10695 mutex_exit(HATLOCK_MUTEXP(hatlockp));
10696 }
10697
10698 static void
10699 sfmmu_hat_lock_all(void)
10700 {
10701 int i;
10702 for (i = 0; i < SFMMU_NUM_LOCK; i++)
10703 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
10704 }
10705
10706 static void
10707 sfmmu_hat_unlock_all(void)
10708 {
10709 int i;
10710 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
10711 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
10712 }
10713
10714 int
10715 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
10716 {
10717 ASSERT(sfmmup != ksfmmup);
10718 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
10719 }
10720
10721 /*
10722 * Locking primitives to provide consistency between ISM unmap
10723 * and other operations. Since ISM unmap can take a long time, we
10724 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
10725 * contention on the hatlock buckets while ISM segments are being
10726 * unmapped. The tradeoff is that the flags don't prevent priority
10727 * inversion from occurring, so we must request kernel priority in
10728 * case we have to sleep to keep from getting buried while holding
10729 * the HAT_ISMBUSY flag set, which in turn could block other kernel
10730 * threads from running (for example, in sfmmu_uvatopfn()).
10731 */
10732 static void
10733 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10734 {
10735 hatlock_t *hatlockp;
10736
10737 THREAD_KPRI_REQUEST();
10738 if (!hatlock_held)
10739 hatlockp = sfmmu_hat_enter(sfmmup);
10740 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10741 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10742 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10743 if (!hatlock_held)
10744 sfmmu_hat_exit(hatlockp);
10745 }
10746
10747 static void
10748 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
10749 {
10750 hatlock_t *hatlockp;
10751
10752 if (!hatlock_held)
10753 hatlockp = sfmmu_hat_enter(sfmmup);
10754 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10755 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10756 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10757 if (!hatlock_held)
10758 sfmmu_hat_exit(hatlockp);
10759 THREAD_KPRI_RELEASE();
10760 }
10761
10762 /*
10763 *
10764 * Algorithm:
10765 *
10766 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10767 * hblks.
10768 *
10769 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10770 *
10771 * (a) try to return an hblk from reserve pool of free hblks;
10772 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
10773 * and return hblk_reserve.
10774 *
10775 * (3) call kmem_cache_alloc() to allocate hblk;
10776 *
10777 * (a) if hblk_reserve_lock is held by the current thread,
10778 * atomically replace hblk_reserve by the hblk that is
10779 * returned by kmem_cache_alloc; release hblk_reserve_lock
10780 * and call kmem_cache_alloc() again.
10781 * (b) if reserve pool is not full, add the hblk that is
10782 * returned by kmem_cache_alloc to reserve pool and
10783 * call kmem_cache_alloc again.
10784 *
10785 */
10786 static struct hme_blk *
10787 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
10788 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
10789 uint_t flags, uint_t rid)
10790 {
10791 struct hme_blk *hmeblkp = NULL;
10792 struct hme_blk *newhblkp;
10793 struct hme_blk *shw_hblkp = NULL;
10794 struct kmem_cache *sfmmu_cache = NULL;
10795 uint64_t hblkpa;
10796 ulong_t index;
10797 uint_t owner; /* set to 1 if using hblk_reserve */
10798 uint_t forcefree;
10799 int sleep;
10800 sf_srd_t *srdp;
10801 sf_region_t *rgnp;
10802
10803 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
10804 ASSERT(hblktag.htag_rid == rid);
10805 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
10806 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
10807 IS_P2ALIGNED(vaddr, TTEBYTES(size)));
10808
10809 /*
10810 * If segkmem is not created yet, allocate from static hmeblks
10811 * created at the end of startup_modules(). See the block comment
10812 * in startup_modules() describing how we estimate the number of
10813 * static hmeblks that will be needed during re-map.
10814 */
10815 if (!hblk_alloc_dynamic) {
10816
10817 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
10818
10819 if (size == TTE8K) {
10820 index = nucleus_hblk8.index;
10821 if (index >= nucleus_hblk8.len) {
10822 /*
10823 * If we panic here, see startup_modules() to
10824 * make sure that we are calculating the
10825 * number of hblk8's that we need correctly.
10826 */
10827 prom_panic("no nucleus hblk8 to allocate");
10828 }
10829 hmeblkp =
10830 (struct hme_blk *)&nucleus_hblk8.list[index];
10831 nucleus_hblk8.index++;
10832 SFMMU_STAT(sf_hblk8_nalloc);
10833 } else {
10834 index = nucleus_hblk1.index;
10835 if (nucleus_hblk1.index >= nucleus_hblk1.len) {
10836 /*
10837 * If we panic here, see startup_modules().
10838 * Most likely you need to update the
10839 * calculation of the number of hblk1 elements
10840 * that the kernel needs to boot.
10841 */
10842 prom_panic("no nucleus hblk1 to allocate");
10843 }
10844 hmeblkp =
10845 (struct hme_blk *)&nucleus_hblk1.list[index];
10846 nucleus_hblk1.index++;
10847 SFMMU_STAT(sf_hblk1_nalloc);
10848 }
10849
10850 goto hblk_init;
10851 }
10852
10853 SFMMU_HASH_UNLOCK(hmebp);
10854
10855 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
10856 if (mmu_page_sizes == max_mmu_page_sizes) {
10857 if (size < TTE256M)
10858 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
10859 size, flags);
10860 } else {
10861 if (size < TTE4M)
10862 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
10863 size, flags);
10864 }
10865 } else if (SFMMU_IS_SHMERID_VALID(rid)) {
10866 /*
10867 * Shared hmes use per region bitmaps in rgn_hmeflag
10868 * rather than shadow hmeblks to keep track of the
10869 * mapping sizes which have been allocated for the region.
10870 * Here we cleanup old invalid hmeblks with this rid,
10871 * which may be left around by pageunload().
10872 */
10873 int ttesz;
10874 caddr_t va;
10875 caddr_t eva = vaddr + TTEBYTES(size);
10876
10877 ASSERT(sfmmup != KHATID);
10878
10879 srdp = sfmmup->sfmmu_srdp;
10880 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
10881 rgnp = srdp->srd_hmergnp[rid];
10882 ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
10883 ASSERT(rgnp->rgn_refcnt != 0);
10884 ASSERT(size <= rgnp->rgn_pgszc);
10885
10886 ttesz = HBLK_MIN_TTESZ;
10887 do {
10888 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
10889 continue;
10890 }
10891
10892 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
10893 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
10894 } else if (ttesz < size) {
10895 for (va = vaddr; va < eva;
10896 va += TTEBYTES(ttesz)) {
10897 sfmmu_cleanup_rhblk(srdp, va, rid,
10898 ttesz);
10899 }
10900 }
10901 } while (++ttesz <= rgnp->rgn_pgszc);
10902 }
10903
10904 fill_hblk:
10905 owner = (hblk_reserve_thread == curthread) ? 1 : 0;
10906
10907 if (owner && size == TTE8K) {
10908
10909 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
10910 /*
10911 * We are really in a tight spot. We already own
10912 * hblk_reserve and we need another hblk. In anticipation
10913 * of this kind of scenario, we specifically set aside
10914 * HBLK_RESERVE_MIN number of hblks to be used exclusively
10915 * by owner of hblk_reserve.
10916 */
10917 SFMMU_STAT(sf_hblk_recurse_cnt);
10918
10919 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
10920 panic("sfmmu_hblk_alloc: reserve list is empty");
10921
10922 goto hblk_verify;
10923 }
10924
10925 ASSERT(!owner);
10926
10927 if ((flags & HAT_NO_KALLOC) == 0) {
10928
10929 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
10930 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
10931
10932 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
10933 hmeblkp = sfmmu_hblk_steal(size);
10934 } else {
10935 /*
10936 * if we are the owner of hblk_reserve,
10937 * swap hblk_reserve with hmeblkp and
10938 * start a fresh life. Hope things go
10939 * better this time.
10940 */
10941 if (hblk_reserve_thread == curthread) {
10942 ASSERT(sfmmu_cache == sfmmu8_cache);
10943 sfmmu_hblk_swap(hmeblkp);
10944 hblk_reserve_thread = NULL;
10945 mutex_exit(&hblk_reserve_lock);
10946 goto fill_hblk;
10947 }
10948 /*
10949 * let's donate this hblk to our reserve list if
10950 * we are not mapping kernel range
10951 */
10952 if (size == TTE8K && sfmmup != KHATID) {
10953 if (sfmmu_put_free_hblk(hmeblkp, 0))
10954 goto fill_hblk;
10955 }
10956 }
10957 } else {
10958 /*
10959 * We are here to map the slab in sfmmu8_cache; let's
10960 * check if we could tap our reserve list; if successful,
10961 * this will avoid the pain of going thru sfmmu_hblk_swap
10962 */
10963 SFMMU_STAT(sf_hblk_slab_cnt);
10964 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
10965 /*
10966 * let's start hblk_reserve dance
10967 */
10968 SFMMU_STAT(sf_hblk_reserve_cnt);
10969 owner = 1;
10970 mutex_enter(&hblk_reserve_lock);
10971 hmeblkp = HBLK_RESERVE;
10972 hblk_reserve_thread = curthread;
10973 }
10974 }
10975
10976 hblk_verify:
10977 ASSERT(hmeblkp != NULL);
10978 set_hblk_sz(hmeblkp, size);
10979 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10980 SFMMU_HASH_LOCK(hmebp);
10981 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
10982 if (newhblkp != NULL) {
10983 SFMMU_HASH_UNLOCK(hmebp);
10984 if (hmeblkp != HBLK_RESERVE) {
10985 /*
10986 * This is really tricky!
10987 *
10988 * vmem_alloc(vmem_seg_arena)
10989 * vmem_alloc(vmem_internal_arena)
10990 * segkmem_alloc(heap_arena)
10991 * vmem_alloc(heap_arena)
10992 * page_create()
10993 * hat_memload()
10994 * kmem_cache_free()
10995 * kmem_cache_alloc()
10996 * kmem_slab_create()
10997 * vmem_alloc(kmem_internal_arena)
10998 * segkmem_alloc(heap_arena)
10999 * vmem_alloc(heap_arena)
11000 * page_create()
11001 * hat_memload()
11002 * kmem_cache_free()
11003 * ...
11004 *
11005 * Thus, hat_memload() could call kmem_cache_free
11006 * for enough number of times that we could easily
11007 * hit the bottom of the stack or run out of reserve
11008 * list of vmem_seg structs. So, we must donate
11009 * this hblk to reserve list if it's allocated
11010 * from sfmmu8_cache *and* mapping kernel range.
11011 * We don't need to worry about freeing hmeblk1's
11012 * to kmem since they don't map any kmem slabs.
11013 *
11014 * Note: When segkmem supports largepages, we must
11015 * free hmeblk1's to reserve list as well.
11016 */
11017 forcefree = (sfmmup == KHATID) ? 1 : 0;
11018 if (size == TTE8K &&
11019 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11020 goto re_verify;
11021 }
11022 ASSERT(sfmmup != KHATID);
11023 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11024 } else {
11025 /*
11026 * Hey! we don't need hblk_reserve any more.
11027 */
11028 ASSERT(owner);
11029 hblk_reserve_thread = NULL;
11030 mutex_exit(&hblk_reserve_lock);
11031 owner = 0;
11032 }
11033 re_verify:
11034 /*
11035 * let's check if the goodies are still present
11036 */
11037 SFMMU_HASH_LOCK(hmebp);
11038 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11039 if (newhblkp != NULL) {
11040 /*
11041 * return newhblkp if it's not hblk_reserve;
11042 * if newhblkp is hblk_reserve, return it
11043 * _only if_ we are the owner of hblk_reserve.
11044 */
11045 if (newhblkp != HBLK_RESERVE || owner) {
11046 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11047 newhblkp->hblk_shared);
11048 ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11049 !newhblkp->hblk_shared);
11050 return (newhblkp);
11051 } else {
11052 /*
11053 * we just hit hblk_reserve in the hash and
11054 * we are not the owner of that;
11055 *
11056 * block until hblk_reserve_thread completes
11057 * swapping hblk_reserve and try the dance
11058 * once again.
11059 */
11060 SFMMU_HASH_UNLOCK(hmebp);
11061 mutex_enter(&hblk_reserve_lock);
11062 mutex_exit(&hblk_reserve_lock);
11063 SFMMU_STAT(sf_hblk_reserve_hit);
11064 goto fill_hblk;
11065 }
11066 } else {
11067 /*
11068 * it's no more! try the dance once again.
11069 */
11070 SFMMU_HASH_UNLOCK(hmebp);
11071 goto fill_hblk;
11072 }
11073 }
11074
11075 hblk_init:
11076 if (SFMMU_IS_SHMERID_VALID(rid)) {
11077 uint16_t tteflag = 0x1 <<
11078 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11079
11080 if (!(rgnp->rgn_hmeflags & tteflag)) {
11081 atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11082 }
11083 hmeblkp->hblk_shared = 1;
11084 } else {
11085 hmeblkp->hblk_shared = 0;
11086 }
11087 set_hblk_sz(hmeblkp, size);
11088 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11089 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11090 hmeblkp->hblk_tag = hblktag;
11091 hmeblkp->hblk_shadow = shw_hblkp;
11092 hblkpa = hmeblkp->hblk_nextpa;
11093 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11094
11095 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11096 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11097 ASSERT(hmeblkp->hblk_hmecnt == 0);
11098 ASSERT(hmeblkp->hblk_vcnt == 0);
11099 ASSERT(hmeblkp->hblk_lckcnt == 0);
11100 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11101 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11102 return (hmeblkp);
11103 }
11104
11105 /*
11106 * This function cleans up the hme_blk and returns it to the free list.
11107 */
11108 /* ARGSUSED */
11109 static void
11110 sfmmu_hblk_free(struct hme_blk **listp)
11111 {
11112 struct hme_blk *hmeblkp, *next_hmeblkp;
11113 int size;
11114 uint_t critical;
11115 uint64_t hblkpa;
11116
11117 ASSERT(*listp != NULL);
11118
11119 hmeblkp = *listp;
11120 while (hmeblkp != NULL) {
11121 next_hmeblkp = hmeblkp->hblk_next;
11122 ASSERT(!hmeblkp->hblk_hmecnt);
11123 ASSERT(!hmeblkp->hblk_vcnt);
11124 ASSERT(!hmeblkp->hblk_lckcnt);
11125 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11126 ASSERT(hmeblkp->hblk_shared == 0);
11127 ASSERT(hmeblkp->hblk_shw_bit == 0);
11128 ASSERT(hmeblkp->hblk_shadow == NULL);
11129
11130 hblkpa = va_to_pa((caddr_t)hmeblkp);
11131 ASSERT(hblkpa != (uint64_t)-1);
11132 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11133
11134 size = get_hblk_ttesz(hmeblkp);
11135 hmeblkp->hblk_next = NULL;
11136 hmeblkp->hblk_nextpa = hblkpa;
11137
11138 if (hmeblkp->hblk_nuc_bit == 0) {
11139
11140 if (size != TTE8K ||
11141 !sfmmu_put_free_hblk(hmeblkp, critical))
11142 kmem_cache_free(get_hblk_cache(hmeblkp),
11143 hmeblkp);
11144 }
11145 hmeblkp = next_hmeblkp;
11146 }
11147 }
11148
11149 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
11150 #define SFMMU_HBLK_STEAL_THRESHOLD 5
11151
11152 static uint_t sfmmu_hblk_steal_twice;
11153 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11154
11155 /*
11156 * Steal a hmeblk from user or kernel hme hash lists.
11157 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11158 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11159 * tap into critical reserve of freehblkp.
11160 * Note: We remain looping in this routine until we find one.
11161 */
11162 static struct hme_blk *
11163 sfmmu_hblk_steal(int size)
11164 {
11165 static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11166 struct hmehash_bucket *hmebp;
11167 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11168 uint64_t hblkpa;
11169 int i;
11170 uint_t loop_cnt = 0, critical;
11171
11172 for (;;) {
11173 /* Check cpu hblk pending queues */
11174 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11175 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11176 ASSERT(hmeblkp->hblk_hmecnt == 0);
11177 ASSERT(hmeblkp->hblk_vcnt == 0);
11178 return (hmeblkp);
11179 }
11180
11181 if (size == TTE8K) {
11182 critical =
11183 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11184 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11185 return (hmeblkp);
11186 }
11187
11188 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11189 uhmehash_steal_hand;
11190 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11191
11192 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11193 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11194 SFMMU_HASH_LOCK(hmebp);
11195 hmeblkp = hmebp->hmeblkp;
11196 hblkpa = hmebp->hmeh_nextpa;
11197 pr_hblk = NULL;
11198 while (hmeblkp) {
11199 /*
11200 * check if it is a hmeblk that is not locked
11201 * and not shared. skip shadow hmeblks with
11202 * shadow_mask set i.e valid count non zero.
11203 */
11204 if ((get_hblk_ttesz(hmeblkp) == size) &&
11205 (hmeblkp->hblk_shw_bit == 0 ||
11206 hmeblkp->hblk_vcnt == 0) &&
11207 (hmeblkp->hblk_lckcnt == 0)) {
11208 /*
11209 * there is a high probability that we
11210 * will find a free one. search some
11211 * buckets for a free hmeblk initially
11212 * before unloading a valid hmeblk.
11213 */
11214 if ((hmeblkp->hblk_vcnt == 0 &&
11215 hmeblkp->hblk_hmecnt == 0) || (i >=
11216 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11217 if (sfmmu_steal_this_hblk(hmebp,
11218 hmeblkp, hblkpa, pr_hblk)) {
11219 /*
11220 * Hblk is unloaded
11221 * successfully
11222 */
11223 break;
11224 }
11225 }
11226 }
11227 pr_hblk = hmeblkp;
11228 hblkpa = hmeblkp->hblk_nextpa;
11229 hmeblkp = hmeblkp->hblk_next;
11230 }
11231
11232 SFMMU_HASH_UNLOCK(hmebp);
11233 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11234 hmebp = uhme_hash;
11235 }
11236 uhmehash_steal_hand = hmebp;
11237
11238 if (hmeblkp != NULL)
11239 break;
11240
11241 /*
11242 * in the worst case, look for a free one in the kernel
11243 * hash table.
11244 */
11245 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11246 SFMMU_HASH_LOCK(hmebp);
11247 hmeblkp = hmebp->hmeblkp;
11248 hblkpa = hmebp->hmeh_nextpa;
11249 pr_hblk = NULL;
11250 while (hmeblkp) {
11251 /*
11252 * check if it is free hmeblk
11253 */
11254 if ((get_hblk_ttesz(hmeblkp) == size) &&
11255 (hmeblkp->hblk_lckcnt == 0) &&
11256 (hmeblkp->hblk_vcnt == 0) &&
11257 (hmeblkp->hblk_hmecnt == 0)) {
11258 if (sfmmu_steal_this_hblk(hmebp,
11259 hmeblkp, hblkpa, pr_hblk)) {
11260 break;
11261 } else {
11262 /*
11263 * Cannot fail since we have
11264 * hash lock.
11265 */
11266 panic("fail to steal?");
11267 }
11268 }
11269
11270 pr_hblk = hmeblkp;
11271 hblkpa = hmeblkp->hblk_nextpa;
11272 hmeblkp = hmeblkp->hblk_next;
11273 }
11274
11275 SFMMU_HASH_UNLOCK(hmebp);
11276 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11277 hmebp = khme_hash;
11278 }
11279
11280 if (hmeblkp != NULL)
11281 break;
11282 sfmmu_hblk_steal_twice++;
11283 }
11284 return (hmeblkp);
11285 }
11286
11287 /*
11288 * This routine does real work to prepare a hblk to be "stolen" by
11289 * unloading the mappings, updating shadow counts ....
11290 * It returns 1 if the block is ready to be reused (stolen), or 0
11291 * means the block cannot be stolen yet- pageunload is still working
11292 * on this hblk.
11293 */
11294 static int
11295 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11296 uint64_t hblkpa, struct hme_blk *pr_hblk)
11297 {
11298 int shw_size, vshift;
11299 struct hme_blk *shw_hblkp;
11300 caddr_t vaddr;
11301 uint_t shw_mask, newshw_mask;
11302 struct hme_blk *list = NULL;
11303
11304 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11305
11306 /*
11307 * check if the hmeblk is free, unload if necessary
11308 */
11309 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11310 sfmmu_t *sfmmup;
11311 demap_range_t dmr;
11312
11313 sfmmup = hblktosfmmu(hmeblkp);
11314 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11315 return (0);
11316 }
11317 DEMAP_RANGE_INIT(sfmmup, &dmr);
11318 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11319 (caddr_t)get_hblk_base(hmeblkp),
11320 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11321 DEMAP_RANGE_FLUSH(&dmr);
11322 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11323 /*
11324 * Pageunload is working on the same hblk.
11325 */
11326 return (0);
11327 }
11328
11329 sfmmu_hblk_steal_unload_count++;
11330 }
11331
11332 ASSERT(hmeblkp->hblk_lckcnt == 0);
11333 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11334
11335 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11336 hmeblkp->hblk_nextpa = hblkpa;
11337
11338 shw_hblkp = hmeblkp->hblk_shadow;
11339 if (shw_hblkp) {
11340 ASSERT(!hmeblkp->hblk_shared);
11341 shw_size = get_hblk_ttesz(shw_hblkp);
11342 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11343 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11344 ASSERT(vshift < 8);
11345 /*
11346 * Atomically clear shadow mask bit
11347 */
11348 do {
11349 shw_mask = shw_hblkp->hblk_shw_mask;
11350 ASSERT(shw_mask & (1 << vshift));
11351 newshw_mask = shw_mask & ~(1 << vshift);
11352 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11353 shw_mask, newshw_mask);
11354 } while (newshw_mask != shw_mask);
11355 hmeblkp->hblk_shadow = NULL;
11356 }
11357
11358 /*
11359 * remove shadow bit if we are stealing an unused shadow hmeblk.
11360 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11361 * we are indeed allocating a shadow hmeblk.
11362 */
11363 hmeblkp->hblk_shw_bit = 0;
11364
11365 if (hmeblkp->hblk_shared) {
11366 sf_srd_t *srdp;
11367 sf_region_t *rgnp;
11368 uint_t rid;
11369
11370 srdp = hblktosrd(hmeblkp);
11371 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11372 rid = hmeblkp->hblk_tag.htag_rid;
11373 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11374 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11375 rgnp = srdp->srd_hmergnp[rid];
11376 ASSERT(rgnp != NULL);
11377 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11378 hmeblkp->hblk_shared = 0;
11379 }
11380
11381 sfmmu_hblk_steal_count++;
11382 SFMMU_STAT(sf_steal_count);
11383
11384 return (1);
11385 }
11386
11387 struct hme_blk *
11388 sfmmu_hmetohblk(struct sf_hment *sfhme)
11389 {
11390 struct hme_blk *hmeblkp;
11391 struct sf_hment *sfhme0;
11392 struct hme_blk *hblk_dummy = 0;
11393
11394 /*
11395 * No dummy sf_hments, please.
11396 */
11397 ASSERT(sfhme->hme_tte.ll != 0);
11398
11399 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11400 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11401 (uintptr_t)&hblk_dummy->hblk_hme[0]);
11402
11403 return (hmeblkp);
11404 }
11405
11406 /*
11407 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11408 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11409 * KM_SLEEP allocation.
11410 *
11411 * Return 0 on success, -1 otherwise.
11412 */
11413 static void
11414 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11415 {
11416 struct tsb_info *tsbinfop, *next;
11417 tsb_replace_rc_t rc;
11418 boolean_t gotfirst = B_FALSE;
11419
11420 ASSERT(sfmmup != ksfmmup);
11421 ASSERT(sfmmu_hat_lock_held(sfmmup));
11422
11423 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11424 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11425 }
11426
11427 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11428 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11429 } else {
11430 return;
11431 }
11432
11433 ASSERT(sfmmup->sfmmu_tsb != NULL);
11434
11435 /*
11436 * Loop over all tsbinfo's replacing them with ones that actually have
11437 * a TSB. If any of the replacements ever fail, bail out of the loop.
11438 */
11439 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11440 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11441 next = tsbinfop->tsb_next;
11442 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11443 hatlockp, TSB_SWAPIN);
11444 if (rc != TSB_SUCCESS) {
11445 break;
11446 }
11447 gotfirst = B_TRUE;
11448 }
11449
11450 switch (rc) {
11451 case TSB_SUCCESS:
11452 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11453 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11454 return;
11455 case TSB_LOSTRACE:
11456 break;
11457 case TSB_ALLOCFAIL:
11458 break;
11459 default:
11460 panic("sfmmu_replace_tsb returned unrecognized failure code "
11461 "%d", rc);
11462 }
11463
11464 /*
11465 * In this case, we failed to get one of our TSBs. If we failed to
11466 * get the first TSB, get one of minimum size (8KB). Walk the list
11467 * and throw away the tsbinfos, starting where the allocation failed;
11468 * we can get by with just one TSB as long as we don't leave the
11469 * SWAPPED tsbinfo structures lying around.
11470 */
11471 tsbinfop = sfmmup->sfmmu_tsb;
11472 next = tsbinfop->tsb_next;
11473 tsbinfop->tsb_next = NULL;
11474
11475 sfmmu_hat_exit(hatlockp);
11476 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11477 next = tsbinfop->tsb_next;
11478 sfmmu_tsbinfo_free(tsbinfop);
11479 }
11480 hatlockp = sfmmu_hat_enter(sfmmup);
11481
11482 /*
11483 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11484 * pages.
11485 */
11486 if (!gotfirst) {
11487 tsbinfop = sfmmup->sfmmu_tsb;
11488 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11489 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11490 ASSERT(rc == TSB_SUCCESS);
11491 }
11492
11493 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11494 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11495 }
11496
11497 static int
11498 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11499 {
11500 ulong_t bix = 0;
11501 uint_t rid;
11502 sf_region_t *rgnp;
11503
11504 ASSERT(srdp != NULL);
11505 ASSERT(srdp->srd_refcnt != 0);
11506
11507 w <<= BT_ULSHIFT;
11508 while (bmw) {
11509 if (!(bmw & 0x1)) {
11510 bix++;
11511 bmw >>= 1;
11512 continue;
11513 }
11514 rid = w | bix;
11515 rgnp = srdp->srd_hmergnp[rid];
11516 ASSERT(rgnp->rgn_refcnt > 0);
11517 ASSERT(rgnp->rgn_id == rid);
11518 if (addr < rgnp->rgn_saddr ||
11519 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11520 bix++;
11521 bmw >>= 1;
11522 } else {
11523 return (1);
11524 }
11525 }
11526 return (0);
11527 }
11528
11529 /*
11530 * Handle exceptions for low level tsb_handler.
11531 *
11532 * There are many scenarios that could land us here:
11533 *
11534 * If the context is invalid we land here. The context can be invalid
11535 * for 3 reasons: 1) we couldn't allocate a new context and now need to
11536 * perform a wrap around operation in order to allocate a new context.
11537 * 2) Context was invalidated to change pagesize programming 3) ISMs or
11538 * TSBs configuration is changeing for this process and we are forced into
11539 * here to do a syncronization operation. If the context is valid we can
11540 * be here from window trap hanlder. In this case just call trap to handle
11541 * the fault.
11542 *
11543 * Note that the process will run in INVALID_CONTEXT before
11544 * faulting into here and subsequently loading the MMU registers
11545 * (including the TSB base register) associated with this process.
11546 * For this reason, the trap handlers must all test for
11547 * INVALID_CONTEXT before attempting to access any registers other
11548 * than the context registers.
11549 */
11550 void
11551 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11552 {
11553 sfmmu_t *sfmmup, *shsfmmup;
11554 uint_t ctxtype;
11555 klwp_id_t lwp;
11556 char lwp_save_state;
11557 hatlock_t *hatlockp, *shatlockp;
11558 struct tsb_info *tsbinfop;
11559 struct tsbmiss *tsbmp;
11560 sf_scd_t *scdp;
11561
11562 SFMMU_STAT(sf_tsb_exceptions);
11563 SFMMU_MMU_STAT(mmu_tsb_exceptions);
11564 sfmmup = astosfmmu(curthread->t_procp->p_as);
11565 /*
11566 * note that in sun4u, tagacces register contains ctxnum
11567 * while sun4v passes ctxtype in the tagaccess register.
11568 */
11569 ctxtype = tagaccess & TAGACC_CTX_MASK;
11570
11571 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11572 ASSERT(sfmmup->sfmmu_ismhat == 0);
11573 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11574 ctxtype == INVALID_CONTEXT);
11575
11576 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11577 /*
11578 * We may land here because shme bitmap and pagesize
11579 * flags are updated lazily in tsbmiss area on other cpus.
11580 * If we detect here that tsbmiss area is out of sync with
11581 * sfmmu update it and retry the trapped instruction.
11582 * Otherwise call trap().
11583 */
11584 int ret = 0;
11585 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11586 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11587
11588 /*
11589 * Must set lwp state to LWP_SYS before
11590 * trying to acquire any adaptive lock
11591 */
11592 lwp = ttolwp(curthread);
11593 ASSERT(lwp);
11594 lwp_save_state = lwp->lwp_state;
11595 lwp->lwp_state = LWP_SYS;
11596
11597 hatlockp = sfmmu_hat_enter(sfmmup);
11598 kpreempt_disable();
11599 tsbmp = &tsbmiss_area[CPU->cpu_id];
11600 ASSERT(sfmmup == tsbmp->usfmmup);
11601 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11602 ~tteflag_mask) ||
11603 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) &
11604 ~tteflag_mask)) {
11605 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11606 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11607 ret = 1;
11608 }
11609 if (sfmmup->sfmmu_srdp != NULL) {
11610 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11611 ulong_t *tm = tsbmp->shmermap;
11612 ulong_t i;
11613 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11614 ulong_t d = tm[i] ^ sm[i];
11615 if (d) {
11616 if (d & sm[i]) {
11617 if (!ret && sfmmu_is_rgnva(
11618 sfmmup->sfmmu_srdp,
11619 addr, i, d & sm[i])) {
11620 ret = 1;
11621 }
11622 }
11623 tm[i] = sm[i];
11624 }
11625 }
11626 }
11627 kpreempt_enable();
11628 sfmmu_hat_exit(hatlockp);
11629 lwp->lwp_state = lwp_save_state;
11630 if (ret) {
11631 return;
11632 }
11633 } else if (ctxtype == INVALID_CONTEXT) {
11634 /*
11635 * First, make sure we come out of here with a valid ctx,
11636 * since if we don't get one we'll simply loop on the
11637 * faulting instruction.
11638 *
11639 * If the ISM mappings are changing, the TSB is relocated,
11640 * the process is swapped, the process is joining SCD or
11641 * leaving SCD or shared regions we serialize behind the
11642 * controlling thread with hat lock, sfmmu_flags and
11643 * sfmmu_tsb_cv condition variable.
11644 */
11645
11646 /*
11647 * Must set lwp state to LWP_SYS before
11648 * trying to acquire any adaptive lock
11649 */
11650 lwp = ttolwp(curthread);
11651 ASSERT(lwp);
11652 lwp_save_state = lwp->lwp_state;
11653 lwp->lwp_state = LWP_SYS;
11654
11655 hatlockp = sfmmu_hat_enter(sfmmup);
11656 retry:
11657 if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11658 shsfmmup = scdp->scd_sfmmup;
11659 ASSERT(shsfmmup != NULL);
11660
11661 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11662 tsbinfop = tsbinfop->tsb_next) {
11663 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11664 /* drop the private hat lock */
11665 sfmmu_hat_exit(hatlockp);
11666 /* acquire the shared hat lock */
11667 shatlockp = sfmmu_hat_enter(shsfmmup);
11668 /*
11669 * recheck to see if anything changed
11670 * after we drop the private hat lock.
11671 */
11672 if (sfmmup->sfmmu_scdp == scdp &&
11673 shsfmmup == scdp->scd_sfmmup) {
11674 sfmmu_tsb_chk_reloc(shsfmmup,
11675 shatlockp);
11676 }
11677 sfmmu_hat_exit(shatlockp);
11678 hatlockp = sfmmu_hat_enter(sfmmup);
11679 goto retry;
11680 }
11681 }
11682 }
11683
11684 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
11685 tsbinfop = tsbinfop->tsb_next) {
11686 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11687 cv_wait(&sfmmup->sfmmu_tsb_cv,
11688 HATLOCK_MUTEXP(hatlockp));
11689 goto retry;
11690 }
11691 }
11692
11693 /*
11694 * Wait for ISM maps to be updated.
11695 */
11696 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
11697 cv_wait(&sfmmup->sfmmu_tsb_cv,
11698 HATLOCK_MUTEXP(hatlockp));
11699 goto retry;
11700 }
11701
11702 /* Is this process joining an SCD? */
11703 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11704 /*
11705 * Flush private TSB and setup shared TSB.
11706 * sfmmu_finish_join_scd() does not drop the
11707 * hat lock.
11708 */
11709 sfmmu_finish_join_scd(sfmmup);
11710 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
11711 }
11712
11713 /*
11714 * If we're swapping in, get TSB(s). Note that we must do
11715 * this before we get a ctx or load the MMU state. Once
11716 * we swap in we have to recheck to make sure the TSB(s) and
11717 * ISM mappings didn't change while we slept.
11718 */
11719 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11720 sfmmu_tsb_swapin(sfmmup, hatlockp);
11721 goto retry;
11722 }
11723
11724 sfmmu_get_ctx(sfmmup);
11725
11726 sfmmu_hat_exit(hatlockp);
11727 /*
11728 * Must restore lwp_state if not calling
11729 * trap() for further processing. Restore
11730 * it anyway.
11731 */
11732 lwp->lwp_state = lwp_save_state;
11733 return;
11734 }
11735 trap(rp, (caddr_t)tagaccess, traptype, 0);
11736 }
11737
11738 static void
11739 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11740 {
11741 struct tsb_info *tp;
11742
11743 ASSERT(sfmmu_hat_lock_held(sfmmup));
11744
11745 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
11746 if (tp->tsb_flags & TSB_RELOC_FLAG) {
11747 cv_wait(&sfmmup->sfmmu_tsb_cv,
11748 HATLOCK_MUTEXP(hatlockp));
11749 break;
11750 }
11751 }
11752 }
11753
11754 /*
11755 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
11756 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
11757 * rather than spinning to avoid send mondo timeouts with
11758 * interrupts enabled. When the lock is acquired it is immediately
11759 * released and we return back to sfmmu_vatopfn just after
11760 * the GET_TTE call.
11761 */
11762 void
11763 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
11764 {
11765 struct page **pp;
11766
11767 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11768 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
11769 }
11770
11771 /*
11772 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
11773 * TTE_SUSPENDED bit set in tte. We do this so that we can handle
11774 * cross traps which cannot be handled while spinning in the
11775 * trap handlers. Simply enter and exit the kpr_suspendlock spin
11776 * mutex, which is held by the holder of the suspend bit, and then
11777 * retry the trapped instruction after unwinding.
11778 */
11779 /*ARGSUSED*/
11780 void
11781 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
11782 {
11783 ASSERT(curthread != kreloc_thread);
11784 mutex_enter(&kpr_suspendlock);
11785 mutex_exit(&kpr_suspendlock);
11786 }
11787
11788 /*
11789 * This routine could be optimized to reduce the number of xcalls by flushing
11790 * the entire TLBs if region reference count is above some threshold but the
11791 * tradeoff will depend on the size of the TLB. So for now flush the specific
11792 * page a context at a time.
11793 *
11794 * If uselocks is 0 then it's called after all cpus were captured and all the
11795 * hat locks were taken. In this case don't take the region lock by relying on
11796 * the order of list region update operations in hat_join_region(),
11797 * hat_leave_region() and hat_dup_region(). The ordering in those routines
11798 * guarantees that list is always forward walkable and reaches active sfmmus
11799 * regardless of where xc_attention() captures a cpu.
11800 */
11801 cpuset_t
11802 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
11803 struct hme_blk *hmeblkp, int uselocks)
11804 {
11805 sfmmu_t *sfmmup;
11806 cpuset_t cpuset;
11807 cpuset_t rcpuset;
11808 hatlock_t *hatlockp;
11809 uint_t rid = rgnp->rgn_id;
11810 sf_rgn_link_t *rlink;
11811 sf_scd_t *scdp;
11812
11813 ASSERT(hmeblkp->hblk_shared);
11814 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11815 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11816
11817 CPUSET_ZERO(rcpuset);
11818 if (uselocks) {
11819 mutex_enter(&rgnp->rgn_mutex);
11820 }
11821 sfmmup = rgnp->rgn_sfmmu_head;
11822 while (sfmmup != NULL) {
11823 if (uselocks) {
11824 hatlockp = sfmmu_hat_enter(sfmmup);
11825 }
11826
11827 /*
11828 * When an SCD is created the SCD hat is linked on the sfmmu
11829 * region lists for each hme region which is part of the
11830 * SCD. If we find an SCD hat, when walking these lists,
11831 * then we flush the shared TSBs, if we find a private hat,
11832 * which is part of an SCD, but where the region
11833 * is not part of the SCD then we flush the private TSBs.
11834 */
11835 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
11836 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
11837 scdp = sfmmup->sfmmu_scdp;
11838 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
11839 if (uselocks) {
11840 sfmmu_hat_exit(hatlockp);
11841 }
11842 goto next;
11843 }
11844 }
11845
11846 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
11847
11848 kpreempt_disable();
11849 cpuset = sfmmup->sfmmu_cpusran;
11850 CPUSET_AND(cpuset, cpu_ready_set);
11851 CPUSET_DEL(cpuset, CPU->cpu_id);
11852 SFMMU_XCALL_STATS(sfmmup);
11853 xt_some(cpuset, vtag_flushpage_tl1,
11854 (uint64_t)addr, (uint64_t)sfmmup);
11855 vtag_flushpage(addr, (uint64_t)sfmmup);
11856 if (uselocks) {
11857 sfmmu_hat_exit(hatlockp);
11858 }
11859 kpreempt_enable();
11860 CPUSET_OR(rcpuset, cpuset);
11861
11862 next:
11863 /* LINTED: constant in conditional context */
11864 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
11865 ASSERT(rlink != NULL);
11866 sfmmup = rlink->next;
11867 }
11868 if (uselocks) {
11869 mutex_exit(&rgnp->rgn_mutex);
11870 }
11871 return (rcpuset);
11872 }
11873
11874 /*
11875 * This routine takes an sfmmu pointer and the va for an adddress in an
11876 * ISM region as input and returns the corresponding region id in ism_rid.
11877 * The return value of 1 indicates that a region has been found and ism_rid
11878 * is valid, otherwise 0 is returned.
11879 */
11880 static int
11881 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
11882 {
11883 ism_blk_t *ism_blkp;
11884 int i;
11885 ism_map_t *ism_map;
11886 #ifdef DEBUG
11887 struct hat *ism_hatid;
11888 #endif
11889 ASSERT(sfmmu_hat_lock_held(sfmmup));
11890
11891 ism_blkp = sfmmup->sfmmu_iblk;
11892 while (ism_blkp != NULL) {
11893 ism_map = ism_blkp->iblk_maps;
11894 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
11895 if ((va >= ism_start(ism_map[i])) &&
11896 (va < ism_end(ism_map[i]))) {
11897
11898 *ism_rid = ism_map[i].imap_rid;
11899 #ifdef DEBUG
11900 ism_hatid = ism_map[i].imap_ismhat;
11901 ASSERT(ism_hatid == ism_sfmmup);
11902 ASSERT(ism_hatid->sfmmu_ismhat);
11903 #endif
11904 return (1);
11905 }
11906 }
11907 ism_blkp = ism_blkp->iblk_next;
11908 }
11909 return (0);
11910 }
11911
11912 /*
11913 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
11914 * This routine may be called with all cpu's captured. Therefore, the
11915 * caller is responsible for holding all locks and disabling kernel
11916 * preemption.
11917 */
11918 /* ARGSUSED */
11919 static void
11920 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
11921 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
11922 {
11923 cpuset_t cpuset;
11924 caddr_t va;
11925 ism_ment_t *ment;
11926 sfmmu_t *sfmmup;
11927 #ifdef VAC
11928 int vcolor;
11929 #endif
11930
11931 sf_scd_t *scdp;
11932 uint_t ism_rid;
11933
11934 ASSERT(!hmeblkp->hblk_shared);
11935 /*
11936 * Walk the ism_hat's mapping list and flush the page
11937 * from every hat sharing this ism_hat. This routine
11938 * may be called while all cpu's have been captured.
11939 * Therefore we can't attempt to grab any locks. For now
11940 * this means we will protect the ism mapping list under
11941 * a single lock which will be grabbed by the caller.
11942 * If hat_share/unshare scalibility becomes a performance
11943 * problem then we may need to re-think ism mapping list locking.
11944 */
11945 ASSERT(ism_sfmmup->sfmmu_ismhat);
11946 ASSERT(MUTEX_HELD(&ism_mlist_lock));
11947 addr = addr - ISMID_STARTADDR;
11948
11949 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
11950
11951 sfmmup = ment->iment_hat;
11952
11953 va = ment->iment_base_va;
11954 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr);
11955
11956 /*
11957 * When an SCD is created the SCD hat is linked on the ism
11958 * mapping lists for each ISM segment which is part of the
11959 * SCD. If we find an SCD hat, when walking these lists,
11960 * then we flush the shared TSBs, if we find a private hat,
11961 * which is part of an SCD, but where the region
11962 * corresponding to this va is not part of the SCD then we
11963 * flush the private TSBs.
11964 */
11965 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
11966 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
11967 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
11968 if (!find_ism_rid(sfmmup, ism_sfmmup, va,
11969 &ism_rid)) {
11970 cmn_err(CE_PANIC,
11971 "can't find matching ISM rid!");
11972 }
11973
11974 scdp = sfmmup->sfmmu_scdp;
11975 if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
11976 SF_RGNMAP_TEST(scdp->scd_ismregion_map,
11977 ism_rid)) {
11978 continue;
11979 }
11980 }
11981 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
11982
11983 cpuset = sfmmup->sfmmu_cpusran;
11984 CPUSET_AND(cpuset, cpu_ready_set);
11985 CPUSET_DEL(cpuset, CPU->cpu_id);
11986 SFMMU_XCALL_STATS(sfmmup);
11987 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
11988 (uint64_t)sfmmup);
11989 vtag_flushpage(va, (uint64_t)sfmmup);
11990
11991 #ifdef VAC
11992 /*
11993 * Flush D$
11994 * When flushing D$ we must flush all
11995 * cpu's. See sfmmu_cache_flush().
11996 */
11997 if (cache_flush_flag == CACHE_FLUSH) {
11998 cpuset = cpu_ready_set;
11999 CPUSET_DEL(cpuset, CPU->cpu_id);
12000
12001 SFMMU_XCALL_STATS(sfmmup);
12002 vcolor = addr_to_vcolor(va);
12003 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12004 vac_flushpage(pfnum, vcolor);
12005 }
12006 #endif /* VAC */
12007 }
12008 }
12009
12010 /*
12011 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12012 * a particular virtual address and ctx. If noflush is set we do not
12013 * flush the TLB/TSB. This function may or may not be called with the
12014 * HAT lock held.
12015 */
12016 static void
12017 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12018 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12019 int hat_lock_held)
12020 {
12021 #ifdef VAC
12022 int vcolor;
12023 #endif
12024 cpuset_t cpuset;
12025 hatlock_t *hatlockp;
12026
12027 ASSERT(!hmeblkp->hblk_shared);
12028
12029 #if defined(lint) && !defined(VAC)
12030 pfnum = pfnum;
12031 cpu_flag = cpu_flag;
12032 cache_flush_flag = cache_flush_flag;
12033 #endif
12034
12035 /*
12036 * There is no longer a need to protect against ctx being
12037 * stolen here since we don't store the ctx in the TSB anymore.
12038 */
12039 #ifdef VAC
12040 vcolor = addr_to_vcolor(addr);
12041 #endif
12042
12043 /*
12044 * We must hold the hat lock during the flush of TLB,
12045 * to avoid a race with sfmmu_invalidate_ctx(), where
12046 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12047 * causing TLB demap routine to skip flush on that MMU.
12048 * If the context on a MMU has already been set to
12049 * INVALID_CONTEXT, we just get an extra flush on
12050 * that MMU.
12051 */
12052 if (!hat_lock_held && !tlb_noflush)
12053 hatlockp = sfmmu_hat_enter(sfmmup);
12054
12055 kpreempt_disable();
12056 if (!tlb_noflush) {
12057 /*
12058 * Flush the TSB and TLB.
12059 */
12060 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12061
12062 cpuset = sfmmup->sfmmu_cpusran;
12063 CPUSET_AND(cpuset, cpu_ready_set);
12064 CPUSET_DEL(cpuset, CPU->cpu_id);
12065
12066 SFMMU_XCALL_STATS(sfmmup);
12067
12068 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12069 (uint64_t)sfmmup);
12070
12071 vtag_flushpage(addr, (uint64_t)sfmmup);
12072 }
12073
12074 if (!hat_lock_held && !tlb_noflush)
12075 sfmmu_hat_exit(hatlockp);
12076
12077 #ifdef VAC
12078 /*
12079 * Flush the D$
12080 *
12081 * Even if the ctx is stolen, we need to flush the
12082 * cache. Our ctx stealer only flushes the TLBs.
12083 */
12084 if (cache_flush_flag == CACHE_FLUSH) {
12085 if (cpu_flag & FLUSH_ALL_CPUS) {
12086 cpuset = cpu_ready_set;
12087 } else {
12088 cpuset = sfmmup->sfmmu_cpusran;
12089 CPUSET_AND(cpuset, cpu_ready_set);
12090 }
12091 CPUSET_DEL(cpuset, CPU->cpu_id);
12092 SFMMU_XCALL_STATS(sfmmup);
12093 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12094 vac_flushpage(pfnum, vcolor);
12095 }
12096 #endif /* VAC */
12097 kpreempt_enable();
12098 }
12099
12100 /*
12101 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12102 * address and ctx. If noflush is set we do not currently do anything.
12103 * This function may or may not be called with the HAT lock held.
12104 */
12105 static void
12106 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12107 int tlb_noflush, int hat_lock_held)
12108 {
12109 cpuset_t cpuset;
12110 hatlock_t *hatlockp;
12111
12112 ASSERT(!hmeblkp->hblk_shared);
12113
12114 /*
12115 * If the process is exiting we have nothing to do.
12116 */
12117 if (tlb_noflush)
12118 return;
12119
12120 /*
12121 * Flush TSB.
12122 */
12123 if (!hat_lock_held)
12124 hatlockp = sfmmu_hat_enter(sfmmup);
12125 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12126
12127 kpreempt_disable();
12128
12129 cpuset = sfmmup->sfmmu_cpusran;
12130 CPUSET_AND(cpuset, cpu_ready_set);
12131 CPUSET_DEL(cpuset, CPU->cpu_id);
12132
12133 SFMMU_XCALL_STATS(sfmmup);
12134 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12135
12136 vtag_flushpage(addr, (uint64_t)sfmmup);
12137
12138 if (!hat_lock_held)
12139 sfmmu_hat_exit(hatlockp);
12140
12141 kpreempt_enable();
12142
12143 }
12144
12145 /*
12146 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12147 * call handler that can flush a range of pages to save on xcalls.
12148 */
12149 static int sfmmu_xcall_save;
12150
12151 /*
12152 * this routine is never used for demaping addresses backed by SRD hmeblks.
12153 */
12154 static void
12155 sfmmu_tlb_range_demap(demap_range_t *dmrp)
12156 {
12157 sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12158 hatlock_t *hatlockp;
12159 cpuset_t cpuset;
12160 uint64_t sfmmu_pgcnt;
12161 pgcnt_t pgcnt = 0;
12162 int pgunload = 0;
12163 int dirtypg = 0;
12164 caddr_t addr = dmrp->dmr_addr;
12165 caddr_t eaddr;
12166 uint64_t bitvec = dmrp->dmr_bitvec;
12167
12168 ASSERT(bitvec & 1);
12169
12170 /*
12171 * Flush TSB and calculate number of pages to flush.
12172 */
12173 while (bitvec != 0) {
12174 dirtypg = 0;
12175 /*
12176 * Find the first page to flush and then count how many
12177 * pages there are after it that also need to be flushed.
12178 * This way the number of TSB flushes is minimized.
12179 */
12180 while ((bitvec & 1) == 0) {
12181 pgcnt++;
12182 addr += MMU_PAGESIZE;
12183 bitvec >>= 1;
12184 }
12185 while (bitvec & 1) {
12186 dirtypg++;
12187 bitvec >>= 1;
12188 }
12189 eaddr = addr + ptob(dirtypg);
12190 hatlockp = sfmmu_hat_enter(sfmmup);
12191 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12192 sfmmu_hat_exit(hatlockp);
12193 pgunload += dirtypg;
12194 addr = eaddr;
12195 pgcnt += dirtypg;
12196 }
12197
12198 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12199 if (sfmmup->sfmmu_free == 0) {
12200 addr = dmrp->dmr_addr;
12201 bitvec = dmrp->dmr_bitvec;
12202
12203 /*
12204 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12205 * as it will be used to pack argument for xt_some
12206 */
12207 ASSERT((pgcnt > 0) &&
12208 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12209
12210 /*
12211 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12212 * the low 6 bits of sfmmup. This is doable since pgcnt
12213 * always >= 1.
12214 */
12215 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12216 sfmmu_pgcnt = (uint64_t)sfmmup |
12217 ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12218
12219 /*
12220 * We must hold the hat lock during the flush of TLB,
12221 * to avoid a race with sfmmu_invalidate_ctx(), where
12222 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12223 * causing TLB demap routine to skip flush on that MMU.
12224 * If the context on a MMU has already been set to
12225 * INVALID_CONTEXT, we just get an extra flush on
12226 * that MMU.
12227 */
12228 hatlockp = sfmmu_hat_enter(sfmmup);
12229 kpreempt_disable();
12230
12231 cpuset = sfmmup->sfmmu_cpusran;
12232 CPUSET_AND(cpuset, cpu_ready_set);
12233 CPUSET_DEL(cpuset, CPU->cpu_id);
12234
12235 SFMMU_XCALL_STATS(sfmmup);
12236 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12237 sfmmu_pgcnt);
12238
12239 for (; bitvec != 0; bitvec >>= 1) {
12240 if (bitvec & 1)
12241 vtag_flushpage(addr, (uint64_t)sfmmup);
12242 addr += MMU_PAGESIZE;
12243 }
12244 kpreempt_enable();
12245 sfmmu_hat_exit(hatlockp);
12246
12247 sfmmu_xcall_save += (pgunload-1);
12248 }
12249 dmrp->dmr_bitvec = 0;
12250 }
12251
12252 /*
12253 * In cases where we need to synchronize with TLB/TSB miss trap
12254 * handlers, _and_ need to flush the TLB, it's a lot easier to
12255 * throw away the context from the process than to do a
12256 * special song and dance to keep things consistent for the
12257 * handlers.
12258 *
12259 * Since the process suddenly ends up without a context and our caller
12260 * holds the hat lock, threads that fault after this function is called
12261 * will pile up on the lock. We can then do whatever we need to
12262 * atomically from the context of the caller. The first blocked thread
12263 * to resume executing will get the process a new context, and the
12264 * process will resume executing.
12265 *
12266 * One added advantage of this approach is that on MMUs that
12267 * support a "flush all" operation, we will delay the flush until
12268 * cnum wrap-around, and then flush the TLB one time. This
12269 * is rather rare, so it's a lot less expensive than making 8000
12270 * x-calls to flush the TLB 8000 times.
12271 *
12272 * A per-process (PP) lock is used to synchronize ctx allocations in
12273 * resume() and ctx invalidations here.
12274 */
12275 static void
12276 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12277 {
12278 cpuset_t cpuset;
12279 int cnum, currcnum;
12280 mmu_ctx_t *mmu_ctxp;
12281 int i;
12282 uint_t pstate_save;
12283
12284 SFMMU_STAT(sf_ctx_inv);
12285
12286 ASSERT(sfmmu_hat_lock_held(sfmmup));
12287 ASSERT(sfmmup != ksfmmup);
12288
12289 kpreempt_disable();
12290
12291 mmu_ctxp = CPU_MMU_CTXP(CPU);
12292 ASSERT(mmu_ctxp);
12293 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12294 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12295
12296 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12297
12298 pstate_save = sfmmu_disable_intrs();
12299
12300 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */
12301 /* set HAT cnum invalid across all context domains. */
12302 for (i = 0; i < max_mmu_ctxdoms; i++) {
12303
12304 cnum = sfmmup->sfmmu_ctxs[i].cnum;
12305 if (cnum == INVALID_CONTEXT) {
12306 continue;
12307 }
12308
12309 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12310 }
12311 membar_enter(); /* make sure globally visible to all CPUs */
12312 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */
12313
12314 sfmmu_enable_intrs(pstate_save);
12315
12316 cpuset = sfmmup->sfmmu_cpusran;
12317 CPUSET_DEL(cpuset, CPU->cpu_id);
12318 CPUSET_AND(cpuset, cpu_ready_set);
12319 if (!CPUSET_ISNULL(cpuset)) {
12320 SFMMU_XCALL_STATS(sfmmup);
12321 xt_some(cpuset, sfmmu_raise_tsb_exception,
12322 (uint64_t)sfmmup, INVALID_CONTEXT);
12323 xt_sync(cpuset);
12324 SFMMU_STAT(sf_tsb_raise_exception);
12325 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12326 }
12327
12328 /*
12329 * If the hat to-be-invalidated is the same as the current
12330 * process on local CPU we need to invalidate
12331 * this CPU context as well.
12332 */
12333 if ((sfmmu_getctx_sec() == currcnum) &&
12334 (currcnum != INVALID_CONTEXT)) {
12335 /* sets shared context to INVALID too */
12336 sfmmu_setctx_sec(INVALID_CONTEXT);
12337 sfmmu_clear_utsbinfo();
12338 }
12339
12340 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12341
12342 kpreempt_enable();
12343
12344 /*
12345 * we hold the hat lock, so nobody should allocate a context
12346 * for us yet
12347 */
12348 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12349 }
12350
12351 #ifdef VAC
12352 /*
12353 * We need to flush the cache in all cpus. It is possible that
12354 * a process referenced a page as cacheable but has sinced exited
12355 * and cleared the mapping list. We still to flush it but have no
12356 * state so all cpus is the only alternative.
12357 */
12358 void
12359 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12360 {
12361 cpuset_t cpuset;
12362
12363 kpreempt_disable();
12364 cpuset = cpu_ready_set;
12365 CPUSET_DEL(cpuset, CPU->cpu_id);
12366 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12367 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12368 xt_sync(cpuset);
12369 vac_flushpage(pfnum, vcolor);
12370 kpreempt_enable();
12371 }
12372
12373 void
12374 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12375 {
12376 cpuset_t cpuset;
12377
12378 ASSERT(vcolor >= 0);
12379
12380 kpreempt_disable();
12381 cpuset = cpu_ready_set;
12382 CPUSET_DEL(cpuset, CPU->cpu_id);
12383 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12384 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12385 xt_sync(cpuset);
12386 vac_flushcolor(vcolor, pfnum);
12387 kpreempt_enable();
12388 }
12389 #endif /* VAC */
12390
12391 /*
12392 * We need to prevent processes from accessing the TSB using a cached physical
12393 * address. It's alright if they try to access the TSB via virtual address
12394 * since they will just fault on that virtual address once the mapping has
12395 * been suspended.
12396 */
12397 #pragma weak sendmondo_in_recover
12398
12399 /* ARGSUSED */
12400 static int
12401 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12402 {
12403 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12404 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12405 hatlock_t *hatlockp;
12406 sf_scd_t *scdp;
12407
12408 if (flags != HAT_PRESUSPEND)
12409 return (0);
12410
12411 /*
12412 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12413 * be a shared hat, then set SCD's tsbinfo's flag.
12414 * If tsb is not shared, sfmmup is a private hat, then set
12415 * its private tsbinfo's flag.
12416 */
12417 hatlockp = sfmmu_hat_enter(sfmmup);
12418 tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12419
12420 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12421 sfmmu_tsb_inv_ctx(sfmmup);
12422 sfmmu_hat_exit(hatlockp);
12423 } else {
12424 /* release lock on the shared hat */
12425 sfmmu_hat_exit(hatlockp);
12426 /* sfmmup is a shared hat */
12427 ASSERT(sfmmup->sfmmu_scdhat);
12428 scdp = sfmmup->sfmmu_scdp;
12429 ASSERT(scdp != NULL);
12430 /* get private hat from the scd list */
12431 mutex_enter(&scdp->scd_mutex);
12432 sfmmup = scdp->scd_sf_list;
12433 while (sfmmup != NULL) {
12434 hatlockp = sfmmu_hat_enter(sfmmup);
12435 /*
12436 * We do not call sfmmu_tsb_inv_ctx here because
12437 * sendmondo_in_recover check is only needed for
12438 * sun4u.
12439 */
12440 sfmmu_invalidate_ctx(sfmmup);
12441 sfmmu_hat_exit(hatlockp);
12442 sfmmup = sfmmup->sfmmu_scd_link.next;
12443
12444 }
12445 mutex_exit(&scdp->scd_mutex);
12446 }
12447 return (0);
12448 }
12449
12450 static void
12451 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12452 {
12453 extern uint32_t sendmondo_in_recover;
12454
12455 ASSERT(sfmmu_hat_lock_held(sfmmup));
12456
12457 /*
12458 * For Cheetah+ Erratum 25:
12459 * Wait for any active recovery to finish. We can't risk
12460 * relocating the TSB of the thread running mondo_recover_proc()
12461 * since, if we did that, we would deadlock. The scenario we are
12462 * trying to avoid is as follows:
12463 *
12464 * THIS CPU RECOVER CPU
12465 * -------- -----------
12466 * Begins recovery, walking through TSB
12467 * hat_pagesuspend() TSB TTE
12468 * TLB miss on TSB TTE, spins at TL1
12469 * xt_sync()
12470 * send_mondo_timeout()
12471 * mondo_recover_proc()
12472 * ((deadlocked))
12473 *
12474 * The second half of the workaround is that mondo_recover_proc()
12475 * checks to see if the tsb_info has the RELOC flag set, and if it
12476 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12477 * and hence avoiding the TLB miss that could result in a deadlock.
12478 */
12479 if (&sendmondo_in_recover) {
12480 membar_enter(); /* make sure RELOC flag visible */
12481 while (sendmondo_in_recover) {
12482 drv_usecwait(1);
12483 membar_consumer();
12484 }
12485 }
12486
12487 sfmmu_invalidate_ctx(sfmmup);
12488 }
12489
12490 /* ARGSUSED */
12491 static int
12492 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12493 void *tsbinfo, pfn_t newpfn)
12494 {
12495 hatlock_t *hatlockp;
12496 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12497 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12498
12499 if (flags != HAT_POSTUNSUSPEND)
12500 return (0);
12501
12502 hatlockp = sfmmu_hat_enter(sfmmup);
12503
12504 SFMMU_STAT(sf_tsb_reloc);
12505
12506 /*
12507 * The process may have swapped out while we were relocating one
12508 * of its TSBs. If so, don't bother doing the setup since the
12509 * process can't be using the memory anymore.
12510 */
12511 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12512 ASSERT(va == tsbinfop->tsb_va);
12513 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12514
12515 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12516 sfmmu_inv_tsb(tsbinfop->tsb_va,
12517 TSB_BYTES(tsbinfop->tsb_szc));
12518 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12519 }
12520 }
12521
12522 membar_exit();
12523 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12524 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12525
12526 sfmmu_hat_exit(hatlockp);
12527
12528 return (0);
12529 }
12530
12531 /*
12532 * Allocate and initialize a tsb_info structure. Note that we may or may not
12533 * allocate a TSB here, depending on the flags passed in.
12534 */
12535 static int
12536 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12537 uint_t flags, sfmmu_t *sfmmup)
12538 {
12539 int err;
12540
12541 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12542 sfmmu_tsbinfo_cache, KM_SLEEP);
12543
12544 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12545 tsb_szc, flags, sfmmup)) != 0) {
12546 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12547 SFMMU_STAT(sf_tsb_allocfail);
12548 *tsbinfopp = NULL;
12549 return (err);
12550 }
12551 SFMMU_STAT(sf_tsb_alloc);
12552
12553 /*
12554 * Bump the TSB size counters for this TSB size.
12555 */
12556 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12557 return (0);
12558 }
12559
12560 static void
12561 sfmmu_tsb_free(struct tsb_info *tsbinfo)
12562 {
12563 caddr_t tsbva = tsbinfo->tsb_va;
12564 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12565 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12566 vmem_t *vmp = tsbinfo->tsb_vmp;
12567
12568 /*
12569 * If we allocated this TSB from relocatable kernel memory, then we
12570 * need to uninstall the callback handler.
12571 */
12572 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12573 uintptr_t slab_mask;
12574 caddr_t slab_vaddr;
12575 page_t **ppl;
12576 int ret;
12577
12578 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12579 if (tsb_size > MMU_PAGESIZE4M)
12580 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12581 else
12582 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12583 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12584
12585 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12586 ASSERT(ret == 0);
12587 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12588 0, NULL);
12589 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12590 }
12591
12592 if (kmem_cachep != NULL) {
12593 kmem_cache_free(kmem_cachep, tsbva);
12594 } else {
12595 vmem_xfree(vmp, (void *)tsbva, tsb_size);
12596 }
12597 tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12598 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12599 }
12600
12601 static void
12602 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12603 {
12604 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12605 sfmmu_tsb_free(tsbinfo);
12606 }
12607 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12608
12609 }
12610
12611 /*
12612 * Setup all the references to physical memory for this tsbinfo.
12613 * The underlying page(s) must be locked.
12614 */
12615 static void
12616 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12617 {
12618 ASSERT(pfn != PFN_INVALID);
12619 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12620
12621 #ifndef sun4v
12622 if (tsbinfo->tsb_szc == 0) {
12623 sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12624 PROT_WRITE|PROT_READ, TTE8K);
12625 } else {
12626 /*
12627 * Round down PA and use a large mapping; the handlers will
12628 * compute the TSB pointer at the correct offset into the
12629 * big virtual page. NOTE: this assumes all TSBs larger
12630 * than 8K must come from physically contiguous slabs of
12631 * size tsb_slab_size.
12632 */
12633 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12634 PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12635 }
12636 tsbinfo->tsb_pa = ptob(pfn);
12637
12638 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12639 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */
12640
12641 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12642 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12643 #else /* sun4v */
12644 tsbinfo->tsb_pa = ptob(pfn);
12645 #endif /* sun4v */
12646 }
12647
12648
12649 /*
12650 * Returns zero on success, ENOMEM if over the high water mark,
12651 * or EAGAIN if the caller needs to retry with a smaller TSB
12652 * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12653 *
12654 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12655 * is specified and the TSB requested is PAGESIZE, though it
12656 * may sleep waiting for memory if sufficient memory is not
12657 * available.
12658 */
12659 static int
12660 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12661 int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12662 {
12663 caddr_t vaddr = NULL;
12664 caddr_t slab_vaddr;
12665 uintptr_t slab_mask;
12666 int tsbbytes = TSB_BYTES(tsbcode);
12667 int lowmem = 0;
12668 struct kmem_cache *kmem_cachep = NULL;
12669 vmem_t *vmp = NULL;
12670 lgrp_id_t lgrpid = LGRP_NONE;
12671 pfn_t pfn;
12672 uint_t cbflags = HAC_SLEEP;
12673 page_t **pplist;
12674 int ret;
12675
12676 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
12677 if (tsbbytes > MMU_PAGESIZE4M)
12678 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12679 else
12680 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12681
12682 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
12683 flags |= TSB_ALLOC;
12684
12685 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
12686
12687 tsbinfo->tsb_sfmmu = sfmmup;
12688
12689 /*
12690 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
12691 * return.
12692 */
12693 if ((flags & TSB_ALLOC) == 0) {
12694 tsbinfo->tsb_szc = tsbcode;
12695 tsbinfo->tsb_ttesz_mask = tteszmask;
12696 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
12697 tsbinfo->tsb_pa = -1;
12698 tsbinfo->tsb_tte.ll = 0;
12699 tsbinfo->tsb_next = NULL;
12700 tsbinfo->tsb_flags = TSB_SWAPPED;
12701 tsbinfo->tsb_cache = NULL;
12702 tsbinfo->tsb_vmp = NULL;
12703 return (0);
12704 }
12705
12706 #ifdef DEBUG
12707 /*
12708 * For debugging:
12709 * Randomly force allocation failures every tsb_alloc_mtbf
12710 * tries if TSB_FORCEALLOC is not specified. This will
12711 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
12712 * it is even, to allow testing of both failure paths...
12713 */
12714 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
12715 (tsb_alloc_count++ == tsb_alloc_mtbf)) {
12716 tsb_alloc_count = 0;
12717 tsb_alloc_fail_mtbf++;
12718 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
12719 }
12720 #endif /* DEBUG */
12721
12722 /*
12723 * Enforce high water mark if we are not doing a forced allocation
12724 * and are not shrinking a process' TSB.
12725 */
12726 if ((flags & TSB_SHRINK) == 0 &&
12727 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
12728 if ((flags & TSB_FORCEALLOC) == 0)
12729 return (ENOMEM);
12730 lowmem = 1;
12731 }
12732
12733 /*
12734 * Allocate from the correct location based upon the size of the TSB
12735 * compared to the base page size, and what memory conditions dictate.
12736 * Note we always do nonblocking allocations from the TSB arena since
12737 * we don't want memory fragmentation to cause processes to block
12738 * indefinitely waiting for memory; until the kernel algorithms that
12739 * coalesce large pages are improved this is our best option.
12740 *
12741 * Algorithm:
12742 * If allocating a "large" TSB (>8K), allocate from the
12743 * appropriate kmem_tsb_default_arena vmem arena
12744 * else if low on memory or the TSB_FORCEALLOC flag is set or
12745 * tsb_forceheap is set
12746 * Allocate from kernel heap via sfmmu_tsb8k_cache with
12747 * KM_SLEEP (never fails)
12748 * else
12749 * Allocate from appropriate sfmmu_tsb_cache with
12750 * KM_NOSLEEP
12751 * endif
12752 */
12753 if (tsb_lgrp_affinity)
12754 lgrpid = lgrp_home_id(curthread);
12755 if (lgrpid == LGRP_NONE)
12756 lgrpid = 0; /* use lgrp of boot CPU */
12757
12758 if (tsbbytes > MMU_PAGESIZE) {
12759 if (tsbbytes > MMU_PAGESIZE4M) {
12760 vmp = kmem_bigtsb_default_arena[lgrpid];
12761 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12762 0, 0, NULL, NULL, VM_NOSLEEP);
12763 } else {
12764 vmp = kmem_tsb_default_arena[lgrpid];
12765 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
12766 0, 0, NULL, NULL, VM_NOSLEEP);
12767 }
12768 #ifdef DEBUG
12769 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
12770 #else /* !DEBUG */
12771 } else if (lowmem || (flags & TSB_FORCEALLOC)) {
12772 #endif /* DEBUG */
12773 kmem_cachep = sfmmu_tsb8k_cache;
12774 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
12775 ASSERT(vaddr != NULL);
12776 } else {
12777 kmem_cachep = sfmmu_tsb_cache[lgrpid];
12778 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
12779 }
12780
12781 tsbinfo->tsb_cache = kmem_cachep;
12782 tsbinfo->tsb_vmp = vmp;
12783
12784 if (vaddr == NULL) {
12785 return (EAGAIN);
12786 }
12787
12788 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
12789 kmem_cachep = tsbinfo->tsb_cache;
12790
12791 /*
12792 * If we are allocating from outside the cage, then we need to
12793 * register a relocation callback handler. Note that for now
12794 * since pseudo mappings always hang off of the slab's root page,
12795 * we need only lock the first 8K of the TSB slab. This is a bit
12796 * hacky but it is good for performance.
12797 */
12798 if (kmem_cachep != sfmmu_tsb8k_cache) {
12799 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
12800 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
12801 ASSERT(ret == 0);
12802 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
12803 cbflags, (void *)tsbinfo, &pfn, NULL);
12804
12805 /*
12806 * Need to free up resources if we could not successfully
12807 * add the callback function and return an error condition.
12808 */
12809 if (ret != 0) {
12810 if (kmem_cachep) {
12811 kmem_cache_free(kmem_cachep, vaddr);
12812 } else {
12813 vmem_xfree(vmp, (void *)vaddr, tsbbytes);
12814 }
12815 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
12816 S_WRITE);
12817 return (EAGAIN);
12818 }
12819 } else {
12820 /*
12821 * Since allocation of 8K TSBs from heap is rare and occurs
12822 * during memory pressure we allocate them from permanent
12823 * memory rather than using callbacks to get the PFN.
12824 */
12825 pfn = hat_getpfnum(kas.a_hat, vaddr);
12826 }
12827
12828 tsbinfo->tsb_va = vaddr;
12829 tsbinfo->tsb_szc = tsbcode;
12830 tsbinfo->tsb_ttesz_mask = tteszmask;
12831 tsbinfo->tsb_next = NULL;
12832 tsbinfo->tsb_flags = 0;
12833
12834 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
12835
12836 sfmmu_inv_tsb(vaddr, tsbbytes);
12837
12838 if (kmem_cachep != sfmmu_tsb8k_cache) {
12839 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
12840 }
12841
12842 return (0);
12843 }
12844
12845 /*
12846 * Initialize per cpu tsb and per cpu tsbmiss_area
12847 */
12848 void
12849 sfmmu_init_tsbs(void)
12850 {
12851 int i;
12852 struct tsbmiss *tsbmissp;
12853 struct kpmtsbm *kpmtsbmp;
12854 #ifndef sun4v
12855 extern int dcache_line_mask;
12856 #endif /* sun4v */
12857 extern uint_t vac_colors;
12858
12859 /*
12860 * Init. tsb miss area.
12861 */
12862 tsbmissp = tsbmiss_area;
12863
12864 for (i = 0; i < NCPU; tsbmissp++, i++) {
12865 /*
12866 * initialize the tsbmiss area.
12867 * Do this for all possible CPUs as some may be added
12868 * while the system is running. There is no cost to this.
12869 */
12870 tsbmissp->ksfmmup = ksfmmup;
12871 #ifndef sun4v
12872 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
12873 #endif /* sun4v */
12874 tsbmissp->khashstart =
12875 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
12876 tsbmissp->uhashstart =
12877 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
12878 tsbmissp->khashsz = khmehash_num;
12879 tsbmissp->uhashsz = uhmehash_num;
12880 }
12881
12882 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
12883 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
12884
12885 if (kpm_enable == 0)
12886 return;
12887
12888 /* -- Begin KPM specific init -- */
12889
12890 if (kpm_smallpages) {
12891 /*
12892 * If we're using base pagesize pages for seg_kpm
12893 * mappings, we use the kernel TSB since we can't afford
12894 * to allocate a second huge TSB for these mappings.
12895 */
12896 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
12897 kpm_tsbsz = ktsb_szcode;
12898 kpmsm_tsbbase = kpm_tsbbase;
12899 kpmsm_tsbsz = kpm_tsbsz;
12900 } else {
12901 /*
12902 * In VAC conflict case, just put the entries in the
12903 * kernel 8K indexed TSB for now so we can find them.
12904 * This could really be changed in the future if we feel
12905 * the need...
12906 */
12907 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
12908 kpmsm_tsbsz = ktsb_szcode;
12909 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
12910 kpm_tsbsz = ktsb4m_szcode;
12911 }
12912
12913 kpmtsbmp = kpmtsbm_area;
12914 for (i = 0; i < NCPU; kpmtsbmp++, i++) {
12915 /*
12916 * Initialize the kpmtsbm area.
12917 * Do this for all possible CPUs as some may be added
12918 * while the system is running. There is no cost to this.
12919 */
12920 kpmtsbmp->vbase = kpm_vbase;
12921 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
12922 kpmtsbmp->sz_shift = kpm_size_shift;
12923 kpmtsbmp->kpmp_shift = kpmp_shift;
12924 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
12925 if (kpm_smallpages == 0) {
12926 kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
12927 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
12928 } else {
12929 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
12930 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
12931 }
12932 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
12933 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
12934 #ifdef DEBUG
12935 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0;
12936 #endif /* DEBUG */
12937 if (ktsb_phys)
12938 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
12939 }
12940
12941 /* -- End KPM specific init -- */
12942 }
12943
12944 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
12945 struct tsb_info ktsb_info[2];
12946
12947 /*
12948 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
12949 */
12950 void
12951 sfmmu_init_ktsbinfo()
12952 {
12953 ASSERT(ksfmmup != NULL);
12954 ASSERT(ksfmmup->sfmmu_tsb == NULL);
12955 /*
12956 * Allocate tsbinfos for kernel and copy in data
12957 * to make debug easier and sun4v setup easier.
12958 */
12959 ktsb_info[0].tsb_sfmmu = ksfmmup;
12960 ktsb_info[0].tsb_szc = ktsb_szcode;
12961 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
12962 ktsb_info[0].tsb_va = ktsb_base;
12963 ktsb_info[0].tsb_pa = ktsb_pbase;
12964 ktsb_info[0].tsb_flags = 0;
12965 ktsb_info[0].tsb_tte.ll = 0;
12966 ktsb_info[0].tsb_cache = NULL;
12967
12968 ktsb_info[1].tsb_sfmmu = ksfmmup;
12969 ktsb_info[1].tsb_szc = ktsb4m_szcode;
12970 ktsb_info[1].tsb_ttesz_mask = TSB4M;
12971 ktsb_info[1].tsb_va = ktsb4m_base;
12972 ktsb_info[1].tsb_pa = ktsb4m_pbase;
12973 ktsb_info[1].tsb_flags = 0;
12974 ktsb_info[1].tsb_tte.ll = 0;
12975 ktsb_info[1].tsb_cache = NULL;
12976
12977 /* Link them into ksfmmup. */
12978 ktsb_info[0].tsb_next = &ktsb_info[1];
12979 ktsb_info[1].tsb_next = NULL;
12980 ksfmmup->sfmmu_tsb = &ktsb_info[0];
12981
12982 sfmmu_setup_tsbinfo(ksfmmup);
12983 }
12984
12985 /*
12986 * Cache the last value returned from va_to_pa(). If the VA specified
12987 * in the current call to cached_va_to_pa() maps to the same Page (as the
12988 * previous call to cached_va_to_pa()), then compute the PA using
12989 * cached info, else call va_to_pa().
12990 *
12991 * Note: this function is neither MT-safe nor consistent in the presence
12992 * of multiple, interleaved threads. This function was created to enable
12993 * an optimization used during boot (at a point when there's only one thread
12994 * executing on the "boot CPU", and before startup_vm() has been called).
12995 */
12996 static uint64_t
12997 cached_va_to_pa(void *vaddr)
12998 {
12999 static uint64_t prev_vaddr_base = 0;
13000 static uint64_t prev_pfn = 0;
13001
13002 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13003 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13004 } else {
13005 uint64_t pa = va_to_pa(vaddr);
13006
13007 if (pa != ((uint64_t)-1)) {
13008 /*
13009 * Computed physical address is valid. Cache its
13010 * related info for the next cached_va_to_pa() call.
13011 */
13012 prev_pfn = pa & MMU_PAGEMASK;
13013 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13014 }
13015
13016 return (pa);
13017 }
13018 }
13019
13020 /*
13021 * Carve up our nucleus hblk region. We may allocate more hblks than
13022 * asked due to rounding errors but we are guaranteed to have at least
13023 * enough space to allocate the requested number of hblk8's and hblk1's.
13024 */
13025 void
13026 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13027 {
13028 struct hme_blk *hmeblkp;
13029 size_t hme8blk_sz, hme1blk_sz;
13030 size_t i;
13031 size_t hblk8_bound;
13032 ulong_t j = 0, k = 0;
13033
13034 ASSERT(addr != NULL && size != 0);
13035
13036 /* Need to use proper structure alignment */
13037 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13038 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13039
13040 nucleus_hblk8.list = (void *)addr;
13041 nucleus_hblk8.index = 0;
13042
13043 /*
13044 * Use as much memory as possible for hblk8's since we
13045 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13046 * We need to hold back enough space for the hblk1's which
13047 * we'll allocate next.
13048 */
13049 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13050 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13051 hmeblkp = (struct hme_blk *)addr;
13052 addr += hme8blk_sz;
13053 hmeblkp->hblk_nuc_bit = 1;
13054 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13055 }
13056 nucleus_hblk8.len = j;
13057 ASSERT(j >= nhblk8);
13058 SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13059
13060 nucleus_hblk1.list = (void *)addr;
13061 nucleus_hblk1.index = 0;
13062 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13063 hmeblkp = (struct hme_blk *)addr;
13064 addr += hme1blk_sz;
13065 hmeblkp->hblk_nuc_bit = 1;
13066 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13067 }
13068 ASSERT(k >= nhblk1);
13069 nucleus_hblk1.len = k;
13070 SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13071 }
13072
13073 /*
13074 * This function is currently not supported on this platform. For what
13075 * it's supposed to do, see hat.c and hat_srmmu.c
13076 */
13077 /* ARGSUSED */
13078 faultcode_t
13079 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13080 uint_t flags)
13081 {
13082 return (FC_NOSUPPORT);
13083 }
13084
13085 /*
13086 * Searchs the mapping list of the page for a mapping of the same size. If not
13087 * found the corresponding bit is cleared in the p_index field. When large
13088 * pages are more prevalent in the system, we can maintain the mapping list
13089 * in order and we don't have to traverse the list each time. Just check the
13090 * next and prev entries, and if both are of different size, we clear the bit.
13091 */
13092 static void
13093 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13094 {
13095 struct sf_hment *sfhmep;
13096 struct hme_blk *hmeblkp;
13097 int index;
13098 pgcnt_t npgs;
13099
13100 ASSERT(ttesz > TTE8K);
13101
13102 ASSERT(sfmmu_mlist_held(pp));
13103
13104 ASSERT(PP_ISMAPPED_LARGE(pp));
13105
13106 /*
13107 * Traverse mapping list looking for another mapping of same size.
13108 * since we only want to clear index field if all mappings of
13109 * that size are gone.
13110 */
13111
13112 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13113 if (IS_PAHME(sfhmep))
13114 continue;
13115 hmeblkp = sfmmu_hmetohblk(sfhmep);
13116 if (hme_size(sfhmep) == ttesz) {
13117 /*
13118 * another mapping of the same size. don't clear index.
13119 */
13120 return;
13121 }
13122 }
13123
13124 /*
13125 * Clear the p_index bit for large page.
13126 */
13127 index = PAGESZ_TO_INDEX(ttesz);
13128 npgs = TTEPAGES(ttesz);
13129 while (npgs-- > 0) {
13130 ASSERT(pp->p_index & index);
13131 pp->p_index &= ~index;
13132 pp = PP_PAGENEXT(pp);
13133 }
13134 }
13135
13136 /*
13137 * return supported features
13138 */
13139 /* ARGSUSED */
13140 int
13141 hat_supported(enum hat_features feature, void *arg)
13142 {
13143 switch (feature) {
13144 case HAT_SHARED_PT:
13145 case HAT_DYNAMIC_ISM_UNMAP:
13146 case HAT_VMODSORT:
13147 return (1);
13148 case HAT_SHARED_REGIONS:
13149 if (shctx_on)
13150 return (1);
13151 else
13152 return (0);
13153 default:
13154 return (0);
13155 }
13156 }
13157
13158 void
13159 hat_enter(struct hat *hat)
13160 {
13161 hatlock_t *hatlockp;
13162
13163 if (hat != ksfmmup) {
13164 hatlockp = TSB_HASH(hat);
13165 mutex_enter(HATLOCK_MUTEXP(hatlockp));
13166 }
13167 }
13168
13169 void
13170 hat_exit(struct hat *hat)
13171 {
13172 hatlock_t *hatlockp;
13173
13174 if (hat != ksfmmup) {
13175 hatlockp = TSB_HASH(hat);
13176 mutex_exit(HATLOCK_MUTEXP(hatlockp));
13177 }
13178 }
13179
13180 /*ARGSUSED*/
13181 void
13182 hat_reserve(struct as *as, caddr_t addr, size_t len)
13183 {
13184 }
13185
13186 static void
13187 hat_kstat_init(void)
13188 {
13189 kstat_t *ksp;
13190
13191 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13192 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13193 KSTAT_FLAG_VIRTUAL);
13194 if (ksp) {
13195 ksp->ks_data = (void *) &sfmmu_global_stat;
13196 kstat_install(ksp);
13197 }
13198 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13199 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13200 KSTAT_FLAG_VIRTUAL);
13201 if (ksp) {
13202 ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13203 kstat_install(ksp);
13204 }
13205 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13206 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13207 KSTAT_FLAG_WRITABLE);
13208 if (ksp) {
13209 ksp->ks_update = sfmmu_kstat_percpu_update;
13210 kstat_install(ksp);
13211 }
13212 }
13213
13214 /* ARGSUSED */
13215 static int
13216 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13217 {
13218 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13219 struct tsbmiss *tsbm = tsbmiss_area;
13220 struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13221 int i;
13222
13223 ASSERT(cpu_kstat);
13224 if (rw == KSTAT_READ) {
13225 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13226 cpu_kstat->sf_itlb_misses = 0;
13227 cpu_kstat->sf_dtlb_misses = 0;
13228 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13229 tsbm->uprot_traps;
13230 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13231 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13232 cpu_kstat->sf_tsb_hits = 0;
13233 cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13234 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13235 }
13236 } else {
13237 /* KSTAT_WRITE is used to clear stats */
13238 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13239 tsbm->utsb_misses = 0;
13240 tsbm->ktsb_misses = 0;
13241 tsbm->uprot_traps = 0;
13242 tsbm->kprot_traps = 0;
13243 kpmtsbm->kpm_dtlb_misses = 0;
13244 kpmtsbm->kpm_tsb_misses = 0;
13245 }
13246 }
13247 return (0);
13248 }
13249
13250 #ifdef DEBUG
13251
13252 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13253
13254 /*
13255 * A tte checker. *orig_old is the value we read before cas.
13256 * *cur is the value returned by cas.
13257 * *new is the desired value when we do the cas.
13258 *
13259 * *hmeblkp is currently unused.
13260 */
13261
13262 /* ARGSUSED */
13263 void
13264 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13265 {
13266 pfn_t i, j, k;
13267 int cpuid = CPU->cpu_id;
13268
13269 gorig[cpuid] = orig_old;
13270 gcur[cpuid] = cur;
13271 gnew[cpuid] = new;
13272
13273 #ifdef lint
13274 hmeblkp = hmeblkp;
13275 #endif
13276
13277 if (TTE_IS_VALID(orig_old)) {
13278 if (TTE_IS_VALID(cur)) {
13279 i = TTE_TO_TTEPFN(orig_old);
13280 j = TTE_TO_TTEPFN(cur);
13281 k = TTE_TO_TTEPFN(new);
13282 if (i != j) {
13283 /* remap error? */
13284 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13285 }
13286
13287 if (i != k) {
13288 /* remap error? */
13289 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13290 }
13291 } else {
13292 if (TTE_IS_VALID(new)) {
13293 panic("chk_tte: invalid cur? ");
13294 }
13295
13296 i = TTE_TO_TTEPFN(orig_old);
13297 k = TTE_TO_TTEPFN(new);
13298 if (i != k) {
13299 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13300 }
13301 }
13302 } else {
13303 if (TTE_IS_VALID(cur)) {
13304 j = TTE_TO_TTEPFN(cur);
13305 if (TTE_IS_VALID(new)) {
13306 k = TTE_TO_TTEPFN(new);
13307 if (j != k) {
13308 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13309 j, k);
13310 }
13311 } else {
13312 panic("chk_tte: why here?");
13313 }
13314 } else {
13315 if (!TTE_IS_VALID(new)) {
13316 panic("chk_tte: why here2 ?");
13317 }
13318 }
13319 }
13320 }
13321
13322 #endif /* DEBUG */
13323
13324 extern void prefetch_tsbe_read(struct tsbe *);
13325 extern void prefetch_tsbe_write(struct tsbe *);
13326
13327
13328 /*
13329 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives
13330 * us optimal performance on Cheetah+. You can only have 8 outstanding
13331 * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13332 * prefetch to make the most utilization of the prefetch capability.
13333 */
13334 #define TSBE_PREFETCH_STRIDE (7)
13335
13336 void
13337 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13338 {
13339 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13340 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13341 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13342 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13343 struct tsbe *old;
13344 struct tsbe *new;
13345 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13346 uint64_t va;
13347 int new_offset;
13348 int i;
13349 int vpshift;
13350 int last_prefetch;
13351
13352 if (old_bytes == new_bytes) {
13353 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13354 } else {
13355
13356 /*
13357 * A TSBE is 16 bytes which means there are four TSBE's per
13358 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13359 */
13360 old = (struct tsbe *)old_tsbinfo->tsb_va;
13361 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13362 for (i = 0; i < old_entries; i++, old++) {
13363 if (((i & (4-1)) == 0) && (i < last_prefetch))
13364 prefetch_tsbe_read(old);
13365 if (!old->tte_tag.tag_invalid) {
13366 /*
13367 * We have a valid TTE to remap. Check the
13368 * size. We won't remap 64K or 512K TTEs
13369 * because they span more than one TSB entry
13370 * and are indexed using an 8K virt. page.
13371 * Ditto for 32M and 256M TTEs.
13372 */
13373 if (TTE_CSZ(&old->tte_data) == TTE64K ||
13374 TTE_CSZ(&old->tte_data) == TTE512K)
13375 continue;
13376 if (mmu_page_sizes == max_mmu_page_sizes) {
13377 if (TTE_CSZ(&old->tte_data) == TTE32M ||
13378 TTE_CSZ(&old->tte_data) == TTE256M)
13379 continue;
13380 }
13381
13382 /* clear the lower 22 bits of the va */
13383 va = *(uint64_t *)old << 22;
13384 /* turn va into a virtual pfn */
13385 va >>= 22 - TSB_START_SIZE;
13386 /*
13387 * or in bits from the offset in the tsb
13388 * to get the real virtual pfn. These
13389 * correspond to bits [21:13] in the va
13390 */
13391 vpshift =
13392 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13393 0x1ff;
13394 va |= (i << vpshift);
13395 va >>= vpshift;
13396 new_offset = va & (new_entries - 1);
13397 new = new_base + new_offset;
13398 prefetch_tsbe_write(new);
13399 *new = *old;
13400 }
13401 }
13402 }
13403 }
13404
13405 /*
13406 * unused in sfmmu
13407 */
13408 void
13409 hat_dump(void)
13410 {
13411 }
13412
13413 /*
13414 * Called when a thread is exiting and we have switched to the kernel address
13415 * space. Perform the same VM initialization resume() uses when switching
13416 * processes.
13417 *
13418 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13419 * we call it anyway in case the semantics change in the future.
13420 */
13421 /*ARGSUSED*/
13422 void
13423 hat_thread_exit(kthread_t *thd)
13424 {
13425 uint_t pgsz_cnum;
13426 uint_t pstate_save;
13427
13428 ASSERT(thd->t_procp->p_as == &kas);
13429
13430 pgsz_cnum = KCONTEXT;
13431 #ifdef sun4u
13432 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13433 #endif
13434
13435 /*
13436 * Note that sfmmu_load_mmustate() is currently a no-op for
13437 * kernel threads. We need to disable interrupts here,
13438 * simply because otherwise sfmmu_load_mmustate() would panic
13439 * if the caller does not disable interrupts.
13440 */
13441 pstate_save = sfmmu_disable_intrs();
13442
13443 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13444 sfmmu_setctx_sec(pgsz_cnum);
13445 sfmmu_load_mmustate(ksfmmup);
13446 sfmmu_enable_intrs(pstate_save);
13447 }
13448
13449
13450 /*
13451 * SRD support
13452 */
13453 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \
13454 (((uintptr_t)(vp)) >> 11)) & \
13455 srd_hashmask)
13456
13457 /*
13458 * Attach the process to the srd struct associated with the exec vnode
13459 * from which the process is started.
13460 */
13461 void
13462 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13463 {
13464 uint_t hash = SRD_HASH_FUNCTION(evp);
13465 sf_srd_t *srdp;
13466 sf_srd_t *newsrdp;
13467
13468 ASSERT(sfmmup != ksfmmup);
13469 ASSERT(sfmmup->sfmmu_srdp == NULL);
13470
13471 if (!shctx_on) {
13472 return;
13473 }
13474
13475 VN_HOLD(evp);
13476
13477 if (srd_buckets[hash].srdb_srdp != NULL) {
13478 mutex_enter(&srd_buckets[hash].srdb_lock);
13479 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13480 srdp = srdp->srd_hash) {
13481 if (srdp->srd_evp == evp) {
13482 ASSERT(srdp->srd_refcnt >= 0);
13483 sfmmup->sfmmu_srdp = srdp;
13484 atomic_inc_32(
13485 (volatile uint_t *)&srdp->srd_refcnt);
13486 mutex_exit(&srd_buckets[hash].srdb_lock);
13487 return;
13488 }
13489 }
13490 mutex_exit(&srd_buckets[hash].srdb_lock);
13491 }
13492 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13493 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13494
13495 newsrdp->srd_evp = evp;
13496 newsrdp->srd_refcnt = 1;
13497 newsrdp->srd_hmergnfree = NULL;
13498 newsrdp->srd_ismrgnfree = NULL;
13499
13500 mutex_enter(&srd_buckets[hash].srdb_lock);
13501 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13502 srdp = srdp->srd_hash) {
13503 if (srdp->srd_evp == evp) {
13504 ASSERT(srdp->srd_refcnt >= 0);
13505 sfmmup->sfmmu_srdp = srdp;
13506 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13507 mutex_exit(&srd_buckets[hash].srdb_lock);
13508 kmem_cache_free(srd_cache, newsrdp);
13509 return;
13510 }
13511 }
13512 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13513 srd_buckets[hash].srdb_srdp = newsrdp;
13514 sfmmup->sfmmu_srdp = newsrdp;
13515
13516 mutex_exit(&srd_buckets[hash].srdb_lock);
13517
13518 }
13519
13520 static void
13521 sfmmu_leave_srd(sfmmu_t *sfmmup)
13522 {
13523 vnode_t *evp;
13524 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13525 uint_t hash;
13526 sf_srd_t **prev_srdpp;
13527 sf_region_t *rgnp;
13528 sf_region_t *nrgnp;
13529 #ifdef DEBUG
13530 int rgns = 0;
13531 #endif
13532 int i;
13533
13534 ASSERT(sfmmup != ksfmmup);
13535 ASSERT(srdp != NULL);
13536 ASSERT(srdp->srd_refcnt > 0);
13537 ASSERT(sfmmup->sfmmu_scdp == NULL);
13538 ASSERT(sfmmup->sfmmu_free == 1);
13539
13540 sfmmup->sfmmu_srdp = NULL;
13541 evp = srdp->srd_evp;
13542 ASSERT(evp != NULL);
13543 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13544 VN_RELE(evp);
13545 return;
13546 }
13547
13548 hash = SRD_HASH_FUNCTION(evp);
13549 mutex_enter(&srd_buckets[hash].srdb_lock);
13550 for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13551 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13552 if (srdp->srd_evp == evp) {
13553 break;
13554 }
13555 }
13556 if (srdp == NULL || srdp->srd_refcnt) {
13557 mutex_exit(&srd_buckets[hash].srdb_lock);
13558 VN_RELE(evp);
13559 return;
13560 }
13561 *prev_srdpp = srdp->srd_hash;
13562 mutex_exit(&srd_buckets[hash].srdb_lock);
13563
13564 ASSERT(srdp->srd_refcnt == 0);
13565 VN_RELE(evp);
13566
13567 #ifdef DEBUG
13568 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13569 ASSERT(srdp->srd_rgnhash[i] == NULL);
13570 }
13571 #endif /* DEBUG */
13572
13573 /* free each hme regions in the srd */
13574 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13575 nrgnp = rgnp->rgn_next;
13576 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13577 ASSERT(rgnp->rgn_refcnt == 0);
13578 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13579 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13580 ASSERT(rgnp->rgn_hmeflags == 0);
13581 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13582 #ifdef DEBUG
13583 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13584 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13585 }
13586 rgns++;
13587 #endif /* DEBUG */
13588 kmem_cache_free(region_cache, rgnp);
13589 }
13590 ASSERT(rgns == srdp->srd_next_hmerid);
13591
13592 #ifdef DEBUG
13593 rgns = 0;
13594 #endif
13595 /* free each ism rgns in the srd */
13596 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13597 nrgnp = rgnp->rgn_next;
13598 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13599 ASSERT(rgnp->rgn_refcnt == 0);
13600 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13601 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13602 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13603 #ifdef DEBUG
13604 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13605 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13606 }
13607 rgns++;
13608 #endif /* DEBUG */
13609 kmem_cache_free(region_cache, rgnp);
13610 }
13611 ASSERT(rgns == srdp->srd_next_ismrid);
13612 ASSERT(srdp->srd_ismbusyrgns == 0);
13613 ASSERT(srdp->srd_hmebusyrgns == 0);
13614
13615 srdp->srd_next_ismrid = 0;
13616 srdp->srd_next_hmerid = 0;
13617
13618 bzero((void *)srdp->srd_ismrgnp,
13619 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13620 bzero((void *)srdp->srd_hmergnp,
13621 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13622
13623 ASSERT(srdp->srd_scdp == NULL);
13624 kmem_cache_free(srd_cache, srdp);
13625 }
13626
13627 /* ARGSUSED */
13628 static int
13629 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13630 {
13631 sf_srd_t *srdp = (sf_srd_t *)buf;
13632 bzero(buf, sizeof (*srdp));
13633
13634 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13635 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13636 return (0);
13637 }
13638
13639 /* ARGSUSED */
13640 static void
13641 sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13642 {
13643 sf_srd_t *srdp = (sf_srd_t *)buf;
13644
13645 mutex_destroy(&srdp->srd_mutex);
13646 mutex_destroy(&srdp->srd_scd_mutex);
13647 }
13648
13649 /*
13650 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13651 * at the same time for the same process and address range. This is ensured by
13652 * the fact that address space is locked as writer when a process joins the
13653 * regions. Therefore there's no need to hold an srd lock during the entire
13654 * execution of hat_join_region()/hat_leave_region().
13655 */
13656
13657 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13658 (((uintptr_t)(obj)) >> 11)) & \
13659 srd_rgn_hashmask)
13660 /*
13661 * This routine implements the shared context functionality required when
13662 * attaching a segment to an address space. It must be called from
13663 * hat_share() for D(ISM) segments and from segvn_create() for segments
13664 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13665 * which is saved in the private segment data for hme segments and
13666 * the ism_map structure for ism segments.
13667 */
13668 hat_region_cookie_t
13669 hat_join_region(struct hat *sfmmup,
13670 caddr_t r_saddr,
13671 size_t r_size,
13672 void *r_obj,
13673 u_offset_t r_objoff,
13674 uchar_t r_perm,
13675 uchar_t r_pgszc,
13676 hat_rgn_cb_func_t r_cb_function,
13677 uint_t flags)
13678 {
13679 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13680 uint_t rhash;
13681 uint_t rid;
13682 hatlock_t *hatlockp;
13683 sf_region_t *rgnp;
13684 sf_region_t *new_rgnp = NULL;
13685 int i;
13686 uint16_t *nextidp;
13687 sf_region_t **freelistp;
13688 int maxids;
13689 sf_region_t **rarrp;
13690 uint16_t *busyrgnsp;
13691 ulong_t rttecnt;
13692 uchar_t tteflag;
13693 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13694 int text = (r_type == HAT_REGION_TEXT);
13695
13696 if (srdp == NULL || r_size == 0) {
13697 return (HAT_INVALID_REGION_COOKIE);
13698 }
13699
13700 ASSERT(sfmmup != ksfmmup);
13701 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
13702 ASSERT(srdp->srd_refcnt > 0);
13703 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
13704 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
13705 ASSERT(r_pgszc < mmu_page_sizes);
13706 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
13707 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
13708 panic("hat_join_region: region addr or size is not aligned\n");
13709 }
13710
13711
13712 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
13713 SFMMU_REGION_HME;
13714 /*
13715 * Currently only support shared hmes for the read only main text
13716 * region.
13717 */
13718 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
13719 (r_perm & PROT_WRITE))) {
13720 return (HAT_INVALID_REGION_COOKIE);
13721 }
13722
13723 rhash = RGN_HASH_FUNCTION(r_obj);
13724
13725 if (r_type == SFMMU_REGION_ISM) {
13726 nextidp = &srdp->srd_next_ismrid;
13727 freelistp = &srdp->srd_ismrgnfree;
13728 maxids = SFMMU_MAX_ISM_REGIONS;
13729 rarrp = srdp->srd_ismrgnp;
13730 busyrgnsp = &srdp->srd_ismbusyrgns;
13731 } else {
13732 nextidp = &srdp->srd_next_hmerid;
13733 freelistp = &srdp->srd_hmergnfree;
13734 maxids = SFMMU_MAX_HME_REGIONS;
13735 rarrp = srdp->srd_hmergnp;
13736 busyrgnsp = &srdp->srd_hmebusyrgns;
13737 }
13738
13739 mutex_enter(&srdp->srd_mutex);
13740
13741 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
13742 rgnp = rgnp->rgn_hash) {
13743 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
13744 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
13745 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
13746 break;
13747 }
13748 }
13749
13750 rfound:
13751 if (rgnp != NULL) {
13752 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
13753 ASSERT(rgnp->rgn_cb_function == r_cb_function);
13754 ASSERT(rgnp->rgn_refcnt >= 0);
13755 rid = rgnp->rgn_id;
13756 ASSERT(rid < maxids);
13757 ASSERT(rarrp[rid] == rgnp);
13758 ASSERT(rid < *nextidp);
13759 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
13760 mutex_exit(&srdp->srd_mutex);
13761 if (new_rgnp != NULL) {
13762 kmem_cache_free(region_cache, new_rgnp);
13763 }
13764 if (r_type == SFMMU_REGION_HME) {
13765 int myjoin =
13766 (sfmmup == astosfmmu(curthread->t_procp->p_as));
13767
13768 sfmmu_link_to_hmeregion(sfmmup, rgnp);
13769 /*
13770 * bitmap should be updated after linking sfmmu on
13771 * region list so that pageunload() doesn't skip
13772 * TSB/TLB flush. As soon as bitmap is updated another
13773 * thread in this process can already start accessing
13774 * this region.
13775 */
13776 /*
13777 * Normally ttecnt accounting is done as part of
13778 * pagefault handling. But a process may not take any
13779 * pagefaults on shared hmeblks created by some other
13780 * process. To compensate for this assume that the
13781 * entire region will end up faulted in using
13782 * the region's pagesize.
13783 *
13784 */
13785 if (r_pgszc > TTE8K) {
13786 tteflag = 1 << r_pgszc;
13787 if (disable_large_pages & tteflag) {
13788 tteflag = 0;
13789 }
13790 } else {
13791 tteflag = 0;
13792 }
13793 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
13794 hatlockp = sfmmu_hat_enter(sfmmup);
13795 sfmmup->sfmmu_rtteflags |= tteflag;
13796 sfmmu_hat_exit(hatlockp);
13797 }
13798 hatlockp = sfmmu_hat_enter(sfmmup);
13799
13800 /*
13801 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
13802 * region to allow for large page allocation failure.
13803 */
13804 if (r_pgszc >= TTE4M) {
13805 sfmmup->sfmmu_tsb0_4minflcnt +=
13806 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
13807 }
13808
13809 /* update sfmmu_ttecnt with the shme rgn ttecnt */
13810 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
13811 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
13812 rttecnt);
13813
13814 if (text && r_pgszc >= TTE4M &&
13815 (tteflag || ((disable_large_pages >> TTE4M) &
13816 ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
13817 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
13818 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
13819 }
13820
13821 sfmmu_hat_exit(hatlockp);
13822 /*
13823 * On Panther we need to make sure TLB is programmed
13824 * to accept 32M/256M pages. Call
13825 * sfmmu_check_page_sizes() now to make sure TLB is
13826 * setup before making hmeregions visible to other
13827 * threads.
13828 */
13829 sfmmu_check_page_sizes(sfmmup, 1);
13830 hatlockp = sfmmu_hat_enter(sfmmup);
13831 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
13832
13833 /*
13834 * if context is invalid tsb miss exception code will
13835 * call sfmmu_check_page_sizes() and update tsbmiss
13836 * area later.
13837 */
13838 kpreempt_disable();
13839 if (myjoin &&
13840 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
13841 != INVALID_CONTEXT)) {
13842 struct tsbmiss *tsbmp;
13843
13844 tsbmp = &tsbmiss_area[CPU->cpu_id];
13845 ASSERT(sfmmup == tsbmp->usfmmup);
13846 BT_SET(tsbmp->shmermap, rid);
13847 if (r_pgszc > TTE64K) {
13848 tsbmp->uhat_rtteflags |= tteflag;
13849 }
13850
13851 }
13852 kpreempt_enable();
13853
13854 sfmmu_hat_exit(hatlockp);
13855 ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
13856 HAT_INVALID_REGION_COOKIE);
13857 } else {
13858 hatlockp = sfmmu_hat_enter(sfmmup);
13859 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
13860 sfmmu_hat_exit(hatlockp);
13861 }
13862 ASSERT(rid < maxids);
13863
13864 if (r_type == SFMMU_REGION_ISM) {
13865 sfmmu_find_scd(sfmmup);
13866 }
13867 return ((hat_region_cookie_t)((uint64_t)rid));
13868 }
13869
13870 ASSERT(new_rgnp == NULL);
13871
13872 if (*busyrgnsp >= maxids) {
13873 mutex_exit(&srdp->srd_mutex);
13874 return (HAT_INVALID_REGION_COOKIE);
13875 }
13876
13877 ASSERT(MUTEX_HELD(&srdp->srd_mutex));
13878 if (*freelistp != NULL) {
13879 rgnp = *freelistp;
13880 *freelistp = rgnp->rgn_next;
13881 ASSERT(rgnp->rgn_id < *nextidp);
13882 ASSERT(rgnp->rgn_id < maxids);
13883 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13884 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
13885 == r_type);
13886 ASSERT(rarrp[rgnp->rgn_id] == rgnp);
13887 ASSERT(rgnp->rgn_hmeflags == 0);
13888 } else {
13889 /*
13890 * release local locks before memory allocation.
13891 */
13892 mutex_exit(&srdp->srd_mutex);
13893
13894 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
13895
13896 mutex_enter(&srdp->srd_mutex);
13897 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
13898 rgnp = rgnp->rgn_hash) {
13899 if (rgnp->rgn_saddr == r_saddr &&
13900 rgnp->rgn_size == r_size &&
13901 rgnp->rgn_obj == r_obj &&
13902 rgnp->rgn_objoff == r_objoff &&
13903 rgnp->rgn_perm == r_perm &&
13904 rgnp->rgn_pgszc == r_pgszc) {
13905 break;
13906 }
13907 }
13908 if (rgnp != NULL) {
13909 goto rfound;
13910 }
13911
13912 if (*nextidp >= maxids) {
13913 mutex_exit(&srdp->srd_mutex);
13914 goto fail;
13915 }
13916 rgnp = new_rgnp;
13917 new_rgnp = NULL;
13918 rgnp->rgn_id = (*nextidp)++;
13919 ASSERT(rgnp->rgn_id < maxids);
13920 ASSERT(rarrp[rgnp->rgn_id] == NULL);
13921 rarrp[rgnp->rgn_id] = rgnp;
13922 }
13923
13924 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13925 ASSERT(rgnp->rgn_hmeflags == 0);
13926 #ifdef DEBUG
13927 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13928 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13929 }
13930 #endif
13931 rgnp->rgn_saddr = r_saddr;
13932 rgnp->rgn_size = r_size;
13933 rgnp->rgn_obj = r_obj;
13934 rgnp->rgn_objoff = r_objoff;
13935 rgnp->rgn_perm = r_perm;
13936 rgnp->rgn_pgszc = r_pgszc;
13937 rgnp->rgn_flags = r_type;
13938 rgnp->rgn_refcnt = 0;
13939 rgnp->rgn_cb_function = r_cb_function;
13940 rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
13941 srdp->srd_rgnhash[rhash] = rgnp;
13942 (*busyrgnsp)++;
13943 ASSERT(*busyrgnsp <= maxids);
13944 goto rfound;
13945
13946 fail:
13947 ASSERT(new_rgnp != NULL);
13948 kmem_cache_free(region_cache, new_rgnp);
13949 return (HAT_INVALID_REGION_COOKIE);
13950 }
13951
13952 /*
13953 * This function implements the shared context functionality required
13954 * when detaching a segment from an address space. It must be called
13955 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
13956 * for segments with a valid region_cookie.
13957 * It will also be called from all seg_vn routines which change a
13958 * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
13959 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
13960 * from segvn_fault().
13961 */
13962 void
13963 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
13964 {
13965 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13966 sf_scd_t *scdp;
13967 uint_t rhash;
13968 uint_t rid = (uint_t)((uint64_t)rcookie);
13969 hatlock_t *hatlockp = NULL;
13970 sf_region_t *rgnp;
13971 sf_region_t **prev_rgnpp;
13972 sf_region_t *cur_rgnp;
13973 void *r_obj;
13974 int i;
13975 caddr_t r_saddr;
13976 caddr_t r_eaddr;
13977 size_t r_size;
13978 uchar_t r_pgszc;
13979 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
13980
13981 ASSERT(sfmmup != ksfmmup);
13982 ASSERT(srdp != NULL);
13983 ASSERT(srdp->srd_refcnt > 0);
13984 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
13985 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
13986 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
13987
13988 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
13989 SFMMU_REGION_HME;
13990
13991 if (r_type == SFMMU_REGION_ISM) {
13992 ASSERT(SFMMU_IS_ISMRID_VALID(rid));
13993 ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
13994 rgnp = srdp->srd_ismrgnp[rid];
13995 } else {
13996 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
13997 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
13998 rgnp = srdp->srd_hmergnp[rid];
13999 }
14000 ASSERT(rgnp != NULL);
14001 ASSERT(rgnp->rgn_id == rid);
14002 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14003 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14004 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
14005
14006 if (sfmmup->sfmmu_free) {
14007 ulong_t rttecnt;
14008 r_pgszc = rgnp->rgn_pgszc;
14009 r_size = rgnp->rgn_size;
14010
14011 ASSERT(sfmmup->sfmmu_scdp == NULL);
14012 if (r_type == SFMMU_REGION_ISM) {
14013 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14014 } else {
14015 /* update shme rgns ttecnt in sfmmu_ttecnt */
14016 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14017 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14018
14019 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14020 -rttecnt);
14021
14022 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14023 }
14024 } else if (r_type == SFMMU_REGION_ISM) {
14025 hatlockp = sfmmu_hat_enter(sfmmup);
14026 ASSERT(rid < srdp->srd_next_ismrid);
14027 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14028 scdp = sfmmup->sfmmu_scdp;
14029 if (scdp != NULL &&
14030 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14031 sfmmu_leave_scd(sfmmup, r_type);
14032 ASSERT(sfmmu_hat_lock_held(sfmmup));
14033 }
14034 sfmmu_hat_exit(hatlockp);
14035 } else {
14036 ulong_t rttecnt;
14037 r_pgszc = rgnp->rgn_pgszc;
14038 r_saddr = rgnp->rgn_saddr;
14039 r_size = rgnp->rgn_size;
14040 r_eaddr = r_saddr + r_size;
14041
14042 ASSERT(r_type == SFMMU_REGION_HME);
14043 hatlockp = sfmmu_hat_enter(sfmmup);
14044 ASSERT(rid < srdp->srd_next_hmerid);
14045 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14046
14047 /*
14048 * If region is part of an SCD call sfmmu_leave_scd().
14049 * Otherwise if process is not exiting and has valid context
14050 * just drop the context on the floor to lose stale TLB
14051 * entries and force the update of tsb miss area to reflect
14052 * the new region map. After that clean our TSB entries.
14053 */
14054 scdp = sfmmup->sfmmu_scdp;
14055 if (scdp != NULL &&
14056 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14057 sfmmu_leave_scd(sfmmup, r_type);
14058 ASSERT(sfmmu_hat_lock_held(sfmmup));
14059 }
14060 sfmmu_invalidate_ctx(sfmmup);
14061
14062 i = TTE8K;
14063 while (i < mmu_page_sizes) {
14064 if (rgnp->rgn_ttecnt[i] != 0) {
14065 sfmmu_unload_tsb_range(sfmmup, r_saddr,
14066 r_eaddr, i);
14067 if (i < TTE4M) {
14068 i = TTE4M;
14069 continue;
14070 } else {
14071 break;
14072 }
14073 }
14074 i++;
14075 }
14076 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14077 if (r_pgszc >= TTE4M) {
14078 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14079 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14080 rttecnt);
14081 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14082 }
14083
14084 /* update shme rgns ttecnt in sfmmu_ttecnt */
14085 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14086 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14087 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14088
14089 sfmmu_hat_exit(hatlockp);
14090 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14091 /* sfmmup left the scd, grow private tsb */
14092 sfmmu_check_page_sizes(sfmmup, 1);
14093 } else {
14094 sfmmu_check_page_sizes(sfmmup, 0);
14095 }
14096 }
14097
14098 if (r_type == SFMMU_REGION_HME) {
14099 sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14100 }
14101
14102 r_obj = rgnp->rgn_obj;
14103 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14104 return;
14105 }
14106
14107 /*
14108 * looks like nobody uses this region anymore. Free it.
14109 */
14110 rhash = RGN_HASH_FUNCTION(r_obj);
14111 mutex_enter(&srdp->srd_mutex);
14112 for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14113 (cur_rgnp = *prev_rgnpp) != NULL;
14114 prev_rgnpp = &cur_rgnp->rgn_hash) {
14115 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14116 break;
14117 }
14118 }
14119
14120 if (cur_rgnp == NULL) {
14121 mutex_exit(&srdp->srd_mutex);
14122 return;
14123 }
14124
14125 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14126 *prev_rgnpp = rgnp->rgn_hash;
14127 if (r_type == SFMMU_REGION_ISM) {
14128 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14129 ASSERT(rid < srdp->srd_next_ismrid);
14130 rgnp->rgn_next = srdp->srd_ismrgnfree;
14131 srdp->srd_ismrgnfree = rgnp;
14132 ASSERT(srdp->srd_ismbusyrgns > 0);
14133 srdp->srd_ismbusyrgns--;
14134 mutex_exit(&srdp->srd_mutex);
14135 return;
14136 }
14137 mutex_exit(&srdp->srd_mutex);
14138
14139 /*
14140 * Destroy region's hmeblks.
14141 */
14142 sfmmu_unload_hmeregion(srdp, rgnp);
14143
14144 rgnp->rgn_hmeflags = 0;
14145
14146 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14147 ASSERT(rgnp->rgn_id == rid);
14148 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14149 rgnp->rgn_ttecnt[i] = 0;
14150 }
14151 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14152 mutex_enter(&srdp->srd_mutex);
14153 ASSERT(rid < srdp->srd_next_hmerid);
14154 rgnp->rgn_next = srdp->srd_hmergnfree;
14155 srdp->srd_hmergnfree = rgnp;
14156 ASSERT(srdp->srd_hmebusyrgns > 0);
14157 srdp->srd_hmebusyrgns--;
14158 mutex_exit(&srdp->srd_mutex);
14159 }
14160
14161 /*
14162 * For now only called for hmeblk regions and not for ISM regions.
14163 */
14164 void
14165 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14166 {
14167 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14168 uint_t rid = (uint_t)((uint64_t)rcookie);
14169 sf_region_t *rgnp;
14170 sf_rgn_link_t *rlink;
14171 sf_rgn_link_t *hrlink;
14172 ulong_t rttecnt;
14173
14174 ASSERT(sfmmup != ksfmmup);
14175 ASSERT(srdp != NULL);
14176 ASSERT(srdp->srd_refcnt > 0);
14177
14178 ASSERT(rid < srdp->srd_next_hmerid);
14179 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14180 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14181
14182 rgnp = srdp->srd_hmergnp[rid];
14183 ASSERT(rgnp->rgn_refcnt > 0);
14184 ASSERT(rgnp->rgn_id == rid);
14185 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14186 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14187
14188 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14189
14190 /* LINTED: constant in conditional context */
14191 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14192 ASSERT(rlink != NULL);
14193 mutex_enter(&rgnp->rgn_mutex);
14194 ASSERT(rgnp->rgn_sfmmu_head != NULL);
14195 /* LINTED: constant in conditional context */
14196 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14197 ASSERT(hrlink != NULL);
14198 ASSERT(hrlink->prev == NULL);
14199 rlink->next = rgnp->rgn_sfmmu_head;
14200 rlink->prev = NULL;
14201 hrlink->prev = sfmmup;
14202 /*
14203 * make sure rlink's next field is correct
14204 * before making this link visible.
14205 */
14206 membar_stst();
14207 rgnp->rgn_sfmmu_head = sfmmup;
14208 mutex_exit(&rgnp->rgn_mutex);
14209
14210 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14211 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14212 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14213 /* update tsb0 inflation count */
14214 if (rgnp->rgn_pgszc >= TTE4M) {
14215 sfmmup->sfmmu_tsb0_4minflcnt +=
14216 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14217 }
14218 /*
14219 * Update regionid bitmask without hat lock since no other thread
14220 * can update this region bitmask right now.
14221 */
14222 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14223 }
14224
14225 /* ARGSUSED */
14226 static int
14227 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14228 {
14229 sf_region_t *rgnp = (sf_region_t *)buf;
14230 bzero(buf, sizeof (*rgnp));
14231
14232 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14233
14234 return (0);
14235 }
14236
14237 /* ARGSUSED */
14238 static void
14239 sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14240 {
14241 sf_region_t *rgnp = (sf_region_t *)buf;
14242 mutex_destroy(&rgnp->rgn_mutex);
14243 }
14244
14245 static int
14246 sfrgnmap_isnull(sf_region_map_t *map)
14247 {
14248 int i;
14249
14250 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14251 if (map->bitmap[i] != 0) {
14252 return (0);
14253 }
14254 }
14255 return (1);
14256 }
14257
14258 static int
14259 sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14260 {
14261 int i;
14262
14263 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14264 if (map->bitmap[i] != 0) {
14265 return (0);
14266 }
14267 }
14268 return (1);
14269 }
14270
14271 #ifdef DEBUG
14272 static void
14273 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14274 {
14275 sfmmu_t *sp;
14276 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14277
14278 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14279 ASSERT(srdp == sp->sfmmu_srdp);
14280 if (sp == sfmmup) {
14281 if (onlist) {
14282 return;
14283 } else {
14284 panic("shctx: sfmmu 0x%p found on scd"
14285 "list 0x%p", (void *)sfmmup,
14286 (void *)*headp);
14287 }
14288 }
14289 }
14290 if (onlist) {
14291 panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14292 (void *)sfmmup, (void *)*headp);
14293 } else {
14294 return;
14295 }
14296 }
14297 #else /* DEBUG */
14298 #define check_scd_sfmmu_list(headp, sfmmup, onlist)
14299 #endif /* DEBUG */
14300
14301 /*
14302 * Removes an sfmmu from the SCD sfmmu list.
14303 */
14304 static void
14305 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14306 {
14307 ASSERT(sfmmup->sfmmu_srdp != NULL);
14308 check_scd_sfmmu_list(headp, sfmmup, 1);
14309 if (sfmmup->sfmmu_scd_link.prev != NULL) {
14310 ASSERT(*headp != sfmmup);
14311 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14312 sfmmup->sfmmu_scd_link.next;
14313 } else {
14314 ASSERT(*headp == sfmmup);
14315 *headp = sfmmup->sfmmu_scd_link.next;
14316 }
14317 if (sfmmup->sfmmu_scd_link.next != NULL) {
14318 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14319 sfmmup->sfmmu_scd_link.prev;
14320 }
14321 }
14322
14323
14324 /*
14325 * Adds an sfmmu to the start of the queue.
14326 */
14327 static void
14328 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14329 {
14330 check_scd_sfmmu_list(headp, sfmmup, 0);
14331 sfmmup->sfmmu_scd_link.prev = NULL;
14332 sfmmup->sfmmu_scd_link.next = *headp;
14333 if (*headp != NULL)
14334 (*headp)->sfmmu_scd_link.prev = sfmmup;
14335 *headp = sfmmup;
14336 }
14337
14338 /*
14339 * Remove an scd from the start of the queue.
14340 */
14341 static void
14342 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14343 {
14344 if (scdp->scd_prev != NULL) {
14345 ASSERT(*headp != scdp);
14346 scdp->scd_prev->scd_next = scdp->scd_next;
14347 } else {
14348 ASSERT(*headp == scdp);
14349 *headp = scdp->scd_next;
14350 }
14351
14352 if (scdp->scd_next != NULL) {
14353 scdp->scd_next->scd_prev = scdp->scd_prev;
14354 }
14355 }
14356
14357 /*
14358 * Add an scd to the start of the queue.
14359 */
14360 static void
14361 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14362 {
14363 scdp->scd_prev = NULL;
14364 scdp->scd_next = *headp;
14365 if (*headp != NULL) {
14366 (*headp)->scd_prev = scdp;
14367 }
14368 *headp = scdp;
14369 }
14370
14371 static int
14372 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14373 {
14374 uint_t rid;
14375 uint_t i;
14376 uint_t j;
14377 ulong_t w;
14378 sf_region_t *rgnp;
14379 ulong_t tte8k_cnt = 0;
14380 ulong_t tte4m_cnt = 0;
14381 uint_t tsb_szc;
14382 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14383 sfmmu_t *ism_hatid;
14384 struct tsb_info *newtsb;
14385 int szc;
14386
14387 ASSERT(srdp != NULL);
14388
14389 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14390 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14391 continue;
14392 }
14393 j = 0;
14394 while (w) {
14395 if (!(w & 0x1)) {
14396 j++;
14397 w >>= 1;
14398 continue;
14399 }
14400 rid = (i << BT_ULSHIFT) | j;
14401 j++;
14402 w >>= 1;
14403
14404 if (rid < SFMMU_MAX_HME_REGIONS) {
14405 rgnp = srdp->srd_hmergnp[rid];
14406 ASSERT(rgnp->rgn_id == rid);
14407 ASSERT(rgnp->rgn_refcnt > 0);
14408
14409 if (rgnp->rgn_pgszc < TTE4M) {
14410 tte8k_cnt += rgnp->rgn_size >>
14411 TTE_PAGE_SHIFT(TTE8K);
14412 } else {
14413 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14414 tte4m_cnt += rgnp->rgn_size >>
14415 TTE_PAGE_SHIFT(TTE4M);
14416 /*
14417 * Inflate SCD tsb0 by preallocating
14418 * 1/4 8k ttecnt for 4M regions to
14419 * allow for lgpg alloc failure.
14420 */
14421 tte8k_cnt += rgnp->rgn_size >>
14422 (TTE_PAGE_SHIFT(TTE8K) + 2);
14423 }
14424 } else {
14425 rid -= SFMMU_MAX_HME_REGIONS;
14426 rgnp = srdp->srd_ismrgnp[rid];
14427 ASSERT(rgnp->rgn_id == rid);
14428 ASSERT(rgnp->rgn_refcnt > 0);
14429
14430 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14431 ASSERT(ism_hatid->sfmmu_ismhat);
14432
14433 for (szc = 0; szc < TTE4M; szc++) {
14434 tte8k_cnt +=
14435 ism_hatid->sfmmu_ttecnt[szc] <<
14436 TTE_BSZS_SHIFT(szc);
14437 }
14438
14439 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14440 if (rgnp->rgn_pgszc >= TTE4M) {
14441 tte4m_cnt += rgnp->rgn_size >>
14442 TTE_PAGE_SHIFT(TTE4M);
14443 }
14444 }
14445 }
14446 }
14447
14448 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14449
14450 /* Allocate both the SCD TSBs here. */
14451 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14452 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14453 (tsb_szc <= TSB_4M_SZCODE ||
14454 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14455 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14456 TSB_ALLOC, scsfmmup))) {
14457
14458 SFMMU_STAT(sf_scd_1sttsb_allocfail);
14459 return (TSB_ALLOCFAIL);
14460 } else {
14461 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14462
14463 if (tte4m_cnt) {
14464 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14465 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14466 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14467 (tsb_szc <= TSB_4M_SZCODE ||
14468 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14469 TSB4M|TSB32M|TSB256M,
14470 TSB_ALLOC, scsfmmup))) {
14471 /*
14472 * If we fail to allocate the 2nd shared tsb,
14473 * just free the 1st tsb, return failure.
14474 */
14475 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14476 SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14477 return (TSB_ALLOCFAIL);
14478 } else {
14479 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14480 newtsb->tsb_flags |= TSB_SHAREDCTX;
14481 scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14482 SFMMU_STAT(sf_scd_2ndtsb_alloc);
14483 }
14484 }
14485 SFMMU_STAT(sf_scd_1sttsb_alloc);
14486 }
14487 return (TSB_SUCCESS);
14488 }
14489
14490 static void
14491 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14492 {
14493 while (scd_sfmmu->sfmmu_tsb != NULL) {
14494 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14495 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14496 scd_sfmmu->sfmmu_tsb = next;
14497 }
14498 }
14499
14500 /*
14501 * Link the sfmmu onto the hme region list.
14502 */
14503 void
14504 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14505 {
14506 uint_t rid;
14507 sf_rgn_link_t *rlink;
14508 sfmmu_t *head;
14509 sf_rgn_link_t *hrlink;
14510
14511 rid = rgnp->rgn_id;
14512 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14513
14514 /* LINTED: constant in conditional context */
14515 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14516 ASSERT(rlink != NULL);
14517 mutex_enter(&rgnp->rgn_mutex);
14518 if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14519 rlink->next = NULL;
14520 rlink->prev = NULL;
14521 /*
14522 * make sure rlink's next field is NULL
14523 * before making this link visible.
14524 */
14525 membar_stst();
14526 rgnp->rgn_sfmmu_head = sfmmup;
14527 } else {
14528 /* LINTED: constant in conditional context */
14529 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14530 ASSERT(hrlink != NULL);
14531 ASSERT(hrlink->prev == NULL);
14532 rlink->next = head;
14533 rlink->prev = NULL;
14534 hrlink->prev = sfmmup;
14535 /*
14536 * make sure rlink's next field is correct
14537 * before making this link visible.
14538 */
14539 membar_stst();
14540 rgnp->rgn_sfmmu_head = sfmmup;
14541 }
14542 mutex_exit(&rgnp->rgn_mutex);
14543 }
14544
14545 /*
14546 * Unlink the sfmmu from the hme region list.
14547 */
14548 void
14549 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14550 {
14551 uint_t rid;
14552 sf_rgn_link_t *rlink;
14553
14554 rid = rgnp->rgn_id;
14555 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14556
14557 /* LINTED: constant in conditional context */
14558 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14559 ASSERT(rlink != NULL);
14560 mutex_enter(&rgnp->rgn_mutex);
14561 if (rgnp->rgn_sfmmu_head == sfmmup) {
14562 sfmmu_t *next = rlink->next;
14563 rgnp->rgn_sfmmu_head = next;
14564 /*
14565 * if we are stopped by xc_attention() after this
14566 * point the forward link walking in
14567 * sfmmu_rgntlb_demap() will work correctly since the
14568 * head correctly points to the next element.
14569 */
14570 membar_stst();
14571 rlink->next = NULL;
14572 ASSERT(rlink->prev == NULL);
14573 if (next != NULL) {
14574 sf_rgn_link_t *nrlink;
14575 /* LINTED: constant in conditional context */
14576 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14577 ASSERT(nrlink != NULL);
14578 ASSERT(nrlink->prev == sfmmup);
14579 nrlink->prev = NULL;
14580 }
14581 } else {
14582 sfmmu_t *next = rlink->next;
14583 sfmmu_t *prev = rlink->prev;
14584 sf_rgn_link_t *prlink;
14585
14586 ASSERT(prev != NULL);
14587 /* LINTED: constant in conditional context */
14588 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14589 ASSERT(prlink != NULL);
14590 ASSERT(prlink->next == sfmmup);
14591 prlink->next = next;
14592 /*
14593 * if we are stopped by xc_attention()
14594 * after this point the forward link walking
14595 * will work correctly since the prev element
14596 * correctly points to the next element.
14597 */
14598 membar_stst();
14599 rlink->next = NULL;
14600 rlink->prev = NULL;
14601 if (next != NULL) {
14602 sf_rgn_link_t *nrlink;
14603 /* LINTED: constant in conditional context */
14604 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14605 ASSERT(nrlink != NULL);
14606 ASSERT(nrlink->prev == sfmmup);
14607 nrlink->prev = prev;
14608 }
14609 }
14610 mutex_exit(&rgnp->rgn_mutex);
14611 }
14612
14613 /*
14614 * Link scd sfmmu onto ism or hme region list for each region in the
14615 * scd region map.
14616 */
14617 void
14618 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14619 {
14620 uint_t rid;
14621 uint_t i;
14622 uint_t j;
14623 ulong_t w;
14624 sf_region_t *rgnp;
14625 sfmmu_t *scsfmmup;
14626
14627 scsfmmup = scdp->scd_sfmmup;
14628 ASSERT(scsfmmup->sfmmu_scdhat);
14629 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14630 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14631 continue;
14632 }
14633 j = 0;
14634 while (w) {
14635 if (!(w & 0x1)) {
14636 j++;
14637 w >>= 1;
14638 continue;
14639 }
14640 rid = (i << BT_ULSHIFT) | j;
14641 j++;
14642 w >>= 1;
14643
14644 if (rid < SFMMU_MAX_HME_REGIONS) {
14645 rgnp = srdp->srd_hmergnp[rid];
14646 ASSERT(rgnp->rgn_id == rid);
14647 ASSERT(rgnp->rgn_refcnt > 0);
14648 sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14649 } else {
14650 sfmmu_t *ism_hatid = NULL;
14651 ism_ment_t *ism_ment;
14652 rid -= SFMMU_MAX_HME_REGIONS;
14653 rgnp = srdp->srd_ismrgnp[rid];
14654 ASSERT(rgnp->rgn_id == rid);
14655 ASSERT(rgnp->rgn_refcnt > 0);
14656
14657 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14658 ASSERT(ism_hatid->sfmmu_ismhat);
14659 ism_ment = &scdp->scd_ism_links[rid];
14660 ism_ment->iment_hat = scsfmmup;
14661 ism_ment->iment_base_va = rgnp->rgn_saddr;
14662 mutex_enter(&ism_mlist_lock);
14663 iment_add(ism_ment, ism_hatid);
14664 mutex_exit(&ism_mlist_lock);
14665
14666 }
14667 }
14668 }
14669 }
14670 /*
14671 * Unlink scd sfmmu from ism or hme region list for each region in the
14672 * scd region map.
14673 */
14674 void
14675 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14676 {
14677 uint_t rid;
14678 uint_t i;
14679 uint_t j;
14680 ulong_t w;
14681 sf_region_t *rgnp;
14682 sfmmu_t *scsfmmup;
14683
14684 scsfmmup = scdp->scd_sfmmup;
14685 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14686 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14687 continue;
14688 }
14689 j = 0;
14690 while (w) {
14691 if (!(w & 0x1)) {
14692 j++;
14693 w >>= 1;
14694 continue;
14695 }
14696 rid = (i << BT_ULSHIFT) | j;
14697 j++;
14698 w >>= 1;
14699
14700 if (rid < SFMMU_MAX_HME_REGIONS) {
14701 rgnp = srdp->srd_hmergnp[rid];
14702 ASSERT(rgnp->rgn_id == rid);
14703 ASSERT(rgnp->rgn_refcnt > 0);
14704 sfmmu_unlink_from_hmeregion(scsfmmup,
14705 rgnp);
14706
14707 } else {
14708 sfmmu_t *ism_hatid = NULL;
14709 ism_ment_t *ism_ment;
14710 rid -= SFMMU_MAX_HME_REGIONS;
14711 rgnp = srdp->srd_ismrgnp[rid];
14712 ASSERT(rgnp->rgn_id == rid);
14713 ASSERT(rgnp->rgn_refcnt > 0);
14714
14715 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14716 ASSERT(ism_hatid->sfmmu_ismhat);
14717 ism_ment = &scdp->scd_ism_links[rid];
14718 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
14719 ASSERT(ism_ment->iment_base_va ==
14720 rgnp->rgn_saddr);
14721 mutex_enter(&ism_mlist_lock);
14722 iment_sub(ism_ment, ism_hatid);
14723 mutex_exit(&ism_mlist_lock);
14724
14725 }
14726 }
14727 }
14728 }
14729 /*
14730 * Allocates and initialises a new SCD structure, this is called with
14731 * the srd_scd_mutex held and returns with the reference count
14732 * initialised to 1.
14733 */
14734 static sf_scd_t *
14735 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
14736 {
14737 sf_scd_t *new_scdp;
14738 sfmmu_t *scsfmmup;
14739 int i;
14740
14741 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
14742 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
14743
14744 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
14745 new_scdp->scd_sfmmup = scsfmmup;
14746 scsfmmup->sfmmu_srdp = srdp;
14747 scsfmmup->sfmmu_scdp = new_scdp;
14748 scsfmmup->sfmmu_tsb0_4minflcnt = 0;
14749 scsfmmup->sfmmu_scdhat = 1;
14750 CPUSET_ALL(scsfmmup->sfmmu_cpusran);
14751 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
14752
14753 ASSERT(max_mmu_ctxdoms > 0);
14754 for (i = 0; i < max_mmu_ctxdoms; i++) {
14755 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
14756 scsfmmup->sfmmu_ctxs[i].gnum = 0;
14757 }
14758
14759 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14760 new_scdp->scd_rttecnt[i] = 0;
14761 }
14762
14763 new_scdp->scd_region_map = *new_map;
14764 new_scdp->scd_refcnt = 1;
14765 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
14766 kmem_cache_free(scd_cache, new_scdp);
14767 kmem_cache_free(sfmmuid_cache, scsfmmup);
14768 return (NULL);
14769 }
14770 if (&mmu_init_scd) {
14771 mmu_init_scd(new_scdp);
14772 }
14773 return (new_scdp);
14774 }
14775
14776 /*
14777 * The first phase of a process joining an SCD. The hat structure is
14778 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
14779 * and a cross-call with context invalidation is used to cause the
14780 * remaining work to be carried out in the sfmmu_tsbmiss_exception()
14781 * routine.
14782 */
14783 static void
14784 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
14785 {
14786 hatlock_t *hatlockp;
14787 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14788 int i;
14789 sf_scd_t *old_scdp;
14790
14791 ASSERT(srdp != NULL);
14792 ASSERT(scdp != NULL);
14793 ASSERT(scdp->scd_refcnt > 0);
14794 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
14795
14796 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
14797 ASSERT(old_scdp != scdp);
14798
14799 mutex_enter(&old_scdp->scd_mutex);
14800 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
14801 mutex_exit(&old_scdp->scd_mutex);
14802 /*
14803 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
14804 * include the shme rgn ttecnt for rgns that
14805 * were in the old SCD
14806 */
14807 for (i = 0; i < mmu_page_sizes; i++) {
14808 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
14809 old_scdp->scd_rttecnt[i]);
14810 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14811 sfmmup->sfmmu_scdrttecnt[i]);
14812 }
14813 }
14814
14815 /*
14816 * Move sfmmu to the scd lists.
14817 */
14818 mutex_enter(&scdp->scd_mutex);
14819 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
14820 mutex_exit(&scdp->scd_mutex);
14821 SF_SCD_INCR_REF(scdp);
14822
14823 hatlockp = sfmmu_hat_enter(sfmmup);
14824 /*
14825 * For a multi-thread process, we must stop
14826 * all the other threads before joining the scd.
14827 */
14828
14829 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
14830
14831 sfmmu_invalidate_ctx(sfmmup);
14832 sfmmup->sfmmu_scdp = scdp;
14833
14834 /*
14835 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
14836 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
14837 */
14838 for (i = 0; i < mmu_page_sizes; i++) {
14839 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
14840 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
14841 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14842 -sfmmup->sfmmu_scdrttecnt[i]);
14843 }
14844 /* update tsb0 inflation count */
14845 if (old_scdp != NULL) {
14846 sfmmup->sfmmu_tsb0_4minflcnt +=
14847 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14848 }
14849 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14850 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
14851 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
14852
14853 sfmmu_hat_exit(hatlockp);
14854
14855 if (old_scdp != NULL) {
14856 SF_SCD_DECR_REF(srdp, old_scdp);
14857 }
14858
14859 }
14860
14861 /*
14862 * This routine is called by a process to become part of an SCD. It is called
14863 * from sfmmu_tsbmiss_exception() once most of the initial work has been
14864 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
14865 */
14866 static void
14867 sfmmu_finish_join_scd(sfmmu_t *sfmmup)
14868 {
14869 struct tsb_info *tsbinfop;
14870
14871 ASSERT(sfmmu_hat_lock_held(sfmmup));
14872 ASSERT(sfmmup->sfmmu_scdp != NULL);
14873 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
14874 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
14875 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
14876
14877 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
14878 tsbinfop = tsbinfop->tsb_next) {
14879 if (tsbinfop->tsb_flags & TSB_SWAPPED) {
14880 continue;
14881 }
14882 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
14883
14884 sfmmu_inv_tsb(tsbinfop->tsb_va,
14885 TSB_BYTES(tsbinfop->tsb_szc));
14886 }
14887
14888 /* Set HAT_CTX1_FLAG for all SCD ISMs */
14889 sfmmu_ism_hatflags(sfmmup, 1);
14890
14891 SFMMU_STAT(sf_join_scd);
14892 }
14893
14894 /*
14895 * This routine is called in order to check if there is an SCD which matches
14896 * the process's region map if not then a new SCD may be created.
14897 */
14898 static void
14899 sfmmu_find_scd(sfmmu_t *sfmmup)
14900 {
14901 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14902 sf_scd_t *scdp, *new_scdp;
14903 int ret;
14904
14905 ASSERT(srdp != NULL);
14906 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
14907
14908 mutex_enter(&srdp->srd_scd_mutex);
14909 for (scdp = srdp->srd_scdp; scdp != NULL;
14910 scdp = scdp->scd_next) {
14911 SF_RGNMAP_EQUAL(&scdp->scd_region_map,
14912 &sfmmup->sfmmu_region_map, ret);
14913 if (ret == 1) {
14914 SF_SCD_INCR_REF(scdp);
14915 mutex_exit(&srdp->srd_scd_mutex);
14916 sfmmu_join_scd(scdp, sfmmup);
14917 ASSERT(scdp->scd_refcnt >= 2);
14918 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
14919 return;
14920 } else {
14921 /*
14922 * If the sfmmu region map is a subset of the scd
14923 * region map, then the assumption is that this process
14924 * will continue attaching to ISM segments until the
14925 * region maps are equal.
14926 */
14927 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
14928 &sfmmup->sfmmu_region_map, ret);
14929 if (ret == 1) {
14930 mutex_exit(&srdp->srd_scd_mutex);
14931 return;
14932 }
14933 }
14934 }
14935
14936 ASSERT(scdp == NULL);
14937 /*
14938 * No matching SCD has been found, create a new one.
14939 */
14940 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
14941 NULL) {
14942 mutex_exit(&srdp->srd_scd_mutex);
14943 return;
14944 }
14945
14946 /*
14947 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
14948 */
14949
14950 /* Set scd_rttecnt for shme rgns in SCD */
14951 sfmmu_set_scd_rttecnt(srdp, new_scdp);
14952
14953 /*
14954 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
14955 */
14956 sfmmu_link_scd_to_regions(srdp, new_scdp);
14957 sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
14958 SFMMU_STAT_ADD(sf_create_scd, 1);
14959
14960 mutex_exit(&srdp->srd_scd_mutex);
14961 sfmmu_join_scd(new_scdp, sfmmup);
14962 ASSERT(new_scdp->scd_refcnt >= 2);
14963 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
14964 }
14965
14966 /*
14967 * This routine is called by a process to remove itself from an SCD. It is
14968 * either called when the processes has detached from a segment or from
14969 * hat_free_start() as a result of calling exit.
14970 */
14971 static void
14972 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
14973 {
14974 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
14975 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14976 hatlock_t *hatlockp = TSB_HASH(sfmmup);
14977 int i;
14978
14979 ASSERT(scdp != NULL);
14980 ASSERT(srdp != NULL);
14981
14982 if (sfmmup->sfmmu_free) {
14983 /*
14984 * If the process is part of an SCD the sfmmu is unlinked
14985 * from scd_sf_list.
14986 */
14987 mutex_enter(&scdp->scd_mutex);
14988 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
14989 mutex_exit(&scdp->scd_mutex);
14990 /*
14991 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
14992 * are about to leave the SCD
14993 */
14994 for (i = 0; i < mmu_page_sizes; i++) {
14995 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
14996 scdp->scd_rttecnt[i]);
14997 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
14998 sfmmup->sfmmu_scdrttecnt[i]);
14999 sfmmup->sfmmu_scdrttecnt[i] = 0;
15000 }
15001 sfmmup->sfmmu_scdp = NULL;
15002
15003 SF_SCD_DECR_REF(srdp, scdp);
15004 return;
15005 }
15006
15007 ASSERT(r_type != SFMMU_REGION_ISM ||
15008 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15009 ASSERT(scdp->scd_refcnt);
15010 ASSERT(!sfmmup->sfmmu_free);
15011 ASSERT(sfmmu_hat_lock_held(sfmmup));
15012 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
15013
15014 /*
15015 * Wait for ISM maps to be updated.
15016 */
15017 if (r_type != SFMMU_REGION_ISM) {
15018 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15019 sfmmup->sfmmu_scdp != NULL) {
15020 cv_wait(&sfmmup->sfmmu_tsb_cv,
15021 HATLOCK_MUTEXP(hatlockp));
15022 }
15023
15024 if (sfmmup->sfmmu_scdp == NULL) {
15025 sfmmu_hat_exit(hatlockp);
15026 return;
15027 }
15028 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15029 }
15030
15031 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15032 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15033 /*
15034 * Since HAT_JOIN_SCD was set our context
15035 * is still invalid.
15036 */
15037 } else {
15038 /*
15039 * For a multi-thread process, we must stop
15040 * all the other threads before leaving the scd.
15041 */
15042
15043 sfmmu_invalidate_ctx(sfmmup);
15044 }
15045
15046 /* Clear all the rid's for ISM, delete flags, etc */
15047 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15048 sfmmu_ism_hatflags(sfmmup, 0);
15049
15050 /*
15051 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15052 * are in SCD before this sfmmup leaves the SCD.
15053 */
15054 for (i = 0; i < mmu_page_sizes; i++) {
15055 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15056 scdp->scd_rttecnt[i]);
15057 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15058 sfmmup->sfmmu_scdrttecnt[i]);
15059 sfmmup->sfmmu_scdrttecnt[i] = 0;
15060 /* update ismttecnt to include SCD ism before hat leaves SCD */
15061 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15062 sfmmup->sfmmu_scdismttecnt[i] = 0;
15063 }
15064 /* update tsb0 inflation count */
15065 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15066
15067 if (r_type != SFMMU_REGION_ISM) {
15068 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15069 }
15070 sfmmup->sfmmu_scdp = NULL;
15071
15072 sfmmu_hat_exit(hatlockp);
15073
15074 /*
15075 * Unlink sfmmu from scd_sf_list this can be done without holding
15076 * the hat lock as we hold the sfmmu_as lock which prevents
15077 * hat_join_region from adding this thread to the scd again. Other
15078 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15079 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15080 * while holding the hat lock.
15081 */
15082 mutex_enter(&scdp->scd_mutex);
15083 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15084 mutex_exit(&scdp->scd_mutex);
15085 SFMMU_STAT(sf_leave_scd);
15086
15087 SF_SCD_DECR_REF(srdp, scdp);
15088 hatlockp = sfmmu_hat_enter(sfmmup);
15089
15090 }
15091
15092 /*
15093 * Unlink and free up an SCD structure with a reference count of 0.
15094 */
15095 static void
15096 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15097 {
15098 sfmmu_t *scsfmmup;
15099 sf_scd_t *sp;
15100 hatlock_t *shatlockp;
15101 int i, ret;
15102
15103 mutex_enter(&srdp->srd_scd_mutex);
15104 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15105 if (sp == scdp)
15106 break;
15107 }
15108 if (sp == NULL || sp->scd_refcnt) {
15109 mutex_exit(&srdp->srd_scd_mutex);
15110 return;
15111 }
15112
15113 /*
15114 * It is possible that the scd has been freed and reallocated with a
15115 * different region map while we've been waiting for the srd_scd_mutex.
15116 */
15117 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15118 if (ret != 1) {
15119 mutex_exit(&srdp->srd_scd_mutex);
15120 return;
15121 }
15122
15123 ASSERT(scdp->scd_sf_list == NULL);
15124 /*
15125 * Unlink scd from srd_scdp list.
15126 */
15127 sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15128 mutex_exit(&srdp->srd_scd_mutex);
15129
15130 sfmmu_unlink_scd_from_regions(srdp, scdp);
15131
15132 /* Clear shared context tsb and release ctx */
15133 scsfmmup = scdp->scd_sfmmup;
15134
15135 /*
15136 * create a barrier so that scd will not be destroyed
15137 * if other thread still holds the same shared hat lock.
15138 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15139 * shared hat lock before checking the shared tsb reloc flag.
15140 */
15141 shatlockp = sfmmu_hat_enter(scsfmmup);
15142 sfmmu_hat_exit(shatlockp);
15143
15144 sfmmu_free_scd_tsbs(scsfmmup);
15145
15146 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15147 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15148 kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15149 SFMMU_L2_HMERLINKS_SIZE);
15150 scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15151 }
15152 }
15153 kmem_cache_free(sfmmuid_cache, scsfmmup);
15154 kmem_cache_free(scd_cache, scdp);
15155 SFMMU_STAT(sf_destroy_scd);
15156 }
15157
15158 /*
15159 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15160 * bits which are set in the ism_region_map parameter. This flag indicates to
15161 * the tsbmiss handler that mapping for these segments should be loaded using
15162 * the shared context.
15163 */
15164 static void
15165 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15166 {
15167 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15168 ism_blk_t *ism_blkp;
15169 ism_map_t *ism_map;
15170 int i, rid;
15171
15172 ASSERT(sfmmup->sfmmu_iblk != NULL);
15173 ASSERT(scdp != NULL);
15174 /*
15175 * Note that the caller either set HAT_ISMBUSY flag or checked
15176 * under hat lock that HAT_ISMBUSY was not set by another thread.
15177 */
15178 ASSERT(sfmmu_hat_lock_held(sfmmup));
15179
15180 ism_blkp = sfmmup->sfmmu_iblk;
15181 while (ism_blkp != NULL) {
15182 ism_map = ism_blkp->iblk_maps;
15183 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15184 rid = ism_map[i].imap_rid;
15185 if (rid == SFMMU_INVALID_ISMRID) {
15186 continue;
15187 }
15188 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15189 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15190 addflag) {
15191 ism_map[i].imap_hatflags |=
15192 HAT_CTX1_FLAG;
15193 } else {
15194 ism_map[i].imap_hatflags &=
15195 ~HAT_CTX1_FLAG;
15196 }
15197 }
15198 ism_blkp = ism_blkp->iblk_next;
15199 }
15200 }
15201
15202 static int
15203 sfmmu_srd_lock_held(sf_srd_t *srdp)
15204 {
15205 return (MUTEX_HELD(&srdp->srd_mutex));
15206 }
15207
15208 /* ARGSUSED */
15209 static int
15210 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15211 {
15212 sf_scd_t *scdp = (sf_scd_t *)buf;
15213
15214 bzero(buf, sizeof (sf_scd_t));
15215 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15216 return (0);
15217 }
15218
15219 /* ARGSUSED */
15220 static void
15221 sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15222 {
15223 sf_scd_t *scdp = (sf_scd_t *)buf;
15224
15225 mutex_destroy(&scdp->scd_mutex);
15226 }
15227
15228 /*
15229 * The listp parameter is a pointer to a list of hmeblks which are partially
15230 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15231 * freeing process is to cross-call all cpus to ensure that there are no
15232 * remaining cached references.
15233 *
15234 * If the local generation number is less than the global then we can free
15235 * hmeblks which are already on the pending queue as another cpu has completed
15236 * the cross-call.
15237 *
15238 * We cross-call to make sure that there are no threads on other cpus accessing
15239 * these hmblks and then complete the process of freeing them under the
15240 * following conditions:
15241 * The total number of pending hmeblks is greater than the threshold
15242 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15243 * It is at least 1 second since the last time we cross-called
15244 *
15245 * Otherwise, we add the hmeblks to the per-cpu pending queue.
15246 */
15247 static void
15248 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15249 {
15250 struct hme_blk *hblkp, *pr_hblkp = NULL;
15251 int count = 0;
15252 cpuset_t cpuset = cpu_ready_set;
15253 cpu_hme_pend_t *cpuhp;
15254 timestruc_t now;
15255 int one_second_expired = 0;
15256
15257 gethrestime_lasttick(&now);
15258
15259 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15260 ASSERT(hblkp->hblk_shw_bit == 0);
15261 ASSERT(hblkp->hblk_shared == 0);
15262 count++;
15263 pr_hblkp = hblkp;
15264 }
15265
15266 cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15267 mutex_enter(&cpuhp->chp_mutex);
15268
15269 if ((cpuhp->chp_count + count) == 0) {
15270 mutex_exit(&cpuhp->chp_mutex);
15271 return;
15272 }
15273
15274 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15275 one_second_expired = 1;
15276 }
15277
15278 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15279 (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15280 one_second_expired)) {
15281 /* Append global list to local */
15282 if (pr_hblkp == NULL) {
15283 *listp = cpuhp->chp_listp;
15284 } else {
15285 pr_hblkp->hblk_next = cpuhp->chp_listp;
15286 }
15287 cpuhp->chp_listp = NULL;
15288 cpuhp->chp_count = 0;
15289 cpuhp->chp_timestamp = now.tv_sec;
15290 mutex_exit(&cpuhp->chp_mutex);
15291
15292 kpreempt_disable();
15293 CPUSET_DEL(cpuset, CPU->cpu_id);
15294 xt_sync(cpuset);
15295 xt_sync(cpuset);
15296 kpreempt_enable();
15297
15298 /*
15299 * At this stage we know that no trap handlers on other
15300 * cpus can have references to hmeblks on the list.
15301 */
15302 sfmmu_hblk_free(listp);
15303 } else if (*listp != NULL) {
15304 pr_hblkp->hblk_next = cpuhp->chp_listp;
15305 cpuhp->chp_listp = *listp;
15306 cpuhp->chp_count += count;
15307 *listp = NULL;
15308 mutex_exit(&cpuhp->chp_mutex);
15309 } else {
15310 mutex_exit(&cpuhp->chp_mutex);
15311 }
15312 }
15313
15314 /*
15315 * Add an hmeblk to the the hash list.
15316 */
15317 void
15318 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15319 uint64_t hblkpa)
15320 {
15321 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15322 #ifdef DEBUG
15323 if (hmebp->hmeblkp == NULL) {
15324 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15325 }
15326 #endif /* DEBUG */
15327
15328 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15329 /*
15330 * Since the TSB miss handler now does not lock the hash chain before
15331 * walking it, make sure that the hmeblks nextpa is globally visible
15332 * before we make the hmeblk globally visible by updating the chain root
15333 * pointer in the hash bucket.
15334 */
15335 membar_producer();
15336 hmebp->hmeh_nextpa = hblkpa;
15337 hmeblkp->hblk_next = hmebp->hmeblkp;
15338 hmebp->hmeblkp = hmeblkp;
15339
15340 }
15341
15342 /*
15343 * This function is the first part of a 2 part process to remove an hmeblk
15344 * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15345 * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15346 * a per-cpu pending list using the virtual address pointer.
15347 *
15348 * TSB miss trap handlers that start after this phase will no longer see
15349 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15350 * can still use it for further chain traversal because we haven't yet modifed
15351 * the next physical pointer or freed it.
15352 *
15353 * In the second phase of hmeblk removal we'll issue a barrier xcall before
15354 * we reuse or free this hmeblk. This will make sure all lingering references to
15355 * the hmeblk after first phase disappear before we finally reclaim it.
15356 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15357 * during their traversal.
15358 *
15359 * The hmehash_mutex must be held when calling this function.
15360 *
15361 * Input:
15362 * hmebp - hme hash bucket pointer
15363 * hmeblkp - address of hmeblk to be removed
15364 * pr_hblk - virtual address of previous hmeblkp
15365 * listp - pointer to list of hmeblks linked by virtual address
15366 * free_now flag - indicates that a complete removal from the hash chains
15367 * is necessary.
15368 *
15369 * It is inefficient to use the free_now flag as a cross-call is required to
15370 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15371 * in short supply.
15372 */
15373 void
15374 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15375 struct hme_blk *pr_hblk, struct hme_blk **listp,
15376 int free_now)
15377 {
15378 int shw_size, vshift;
15379 struct hme_blk *shw_hblkp;
15380 uint_t shw_mask, newshw_mask;
15381 caddr_t vaddr;
15382 int size;
15383 cpuset_t cpuset = cpu_ready_set;
15384
15385 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15386
15387 if (hmebp->hmeblkp == hmeblkp) {
15388 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15389 hmebp->hmeblkp = hmeblkp->hblk_next;
15390 } else {
15391 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15392 pr_hblk->hblk_next = hmeblkp->hblk_next;
15393 }
15394
15395 size = get_hblk_ttesz(hmeblkp);
15396 shw_hblkp = hmeblkp->hblk_shadow;
15397 if (shw_hblkp) {
15398 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15399 ASSERT(!hmeblkp->hblk_shared);
15400 #ifdef DEBUG
15401 if (mmu_page_sizes == max_mmu_page_sizes) {
15402 ASSERT(size < TTE256M);
15403 } else {
15404 ASSERT(size < TTE4M);
15405 }
15406 #endif /* DEBUG */
15407
15408 shw_size = get_hblk_ttesz(shw_hblkp);
15409 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15410 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15411 ASSERT(vshift < 8);
15412 /*
15413 * Atomically clear shadow mask bit
15414 */
15415 do {
15416 shw_mask = shw_hblkp->hblk_shw_mask;
15417 ASSERT(shw_mask & (1 << vshift));
15418 newshw_mask = shw_mask & ~(1 << vshift);
15419 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15420 shw_mask, newshw_mask);
15421 } while (newshw_mask != shw_mask);
15422 hmeblkp->hblk_shadow = NULL;
15423 }
15424 hmeblkp->hblk_shw_bit = 0;
15425
15426 if (hmeblkp->hblk_shared) {
15427 #ifdef DEBUG
15428 sf_srd_t *srdp;
15429 sf_region_t *rgnp;
15430 uint_t rid;
15431
15432 srdp = hblktosrd(hmeblkp);
15433 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15434 rid = hmeblkp->hblk_tag.htag_rid;
15435 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15436 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15437 rgnp = srdp->srd_hmergnp[rid];
15438 ASSERT(rgnp != NULL);
15439 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15440 #endif /* DEBUG */
15441 hmeblkp->hblk_shared = 0;
15442 }
15443 if (free_now) {
15444 kpreempt_disable();
15445 CPUSET_DEL(cpuset, CPU->cpu_id);
15446 xt_sync(cpuset);
15447 xt_sync(cpuset);
15448 kpreempt_enable();
15449
15450 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15451 hmeblkp->hblk_next = NULL;
15452 } else {
15453 /* Append hmeblkp to listp for processing later. */
15454 hmeblkp->hblk_next = *listp;
15455 *listp = hmeblkp;
15456 }
15457 }
15458
15459 /*
15460 * This routine is called when memory is in short supply and returns a free
15461 * hmeblk of the requested size from the cpu pending lists.
15462 */
15463 static struct hme_blk *
15464 sfmmu_check_pending_hblks(int size)
15465 {
15466 int i;
15467 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15468 int found_hmeblk;
15469 cpuset_t cpuset = cpu_ready_set;
15470 cpu_hme_pend_t *cpuhp;
15471
15472 /* Flush cpu hblk pending queues */
15473 for (i = 0; i < NCPU; i++) {
15474 cpuhp = &cpu_hme_pend[i];
15475 if (cpuhp->chp_listp != NULL) {
15476 mutex_enter(&cpuhp->chp_mutex);
15477 if (cpuhp->chp_listp == NULL) {
15478 mutex_exit(&cpuhp->chp_mutex);
15479 continue;
15480 }
15481 found_hmeblk = 0;
15482 last_hmeblkp = NULL;
15483 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15484 hmeblkp = hmeblkp->hblk_next) {
15485 if (get_hblk_ttesz(hmeblkp) == size) {
15486 if (last_hmeblkp == NULL) {
15487 cpuhp->chp_listp =
15488 hmeblkp->hblk_next;
15489 } else {
15490 last_hmeblkp->hblk_next =
15491 hmeblkp->hblk_next;
15492 }
15493 ASSERT(cpuhp->chp_count > 0);
15494 cpuhp->chp_count--;
15495 found_hmeblk = 1;
15496 break;
15497 } else {
15498 last_hmeblkp = hmeblkp;
15499 }
15500 }
15501 mutex_exit(&cpuhp->chp_mutex);
15502
15503 if (found_hmeblk) {
15504 kpreempt_disable();
15505 CPUSET_DEL(cpuset, CPU->cpu_id);
15506 xt_sync(cpuset);
15507 xt_sync(cpuset);
15508 kpreempt_enable();
15509 return (hmeblkp);
15510 }
15511 }
15512 }
15513 return (NULL);
15514 }