Print this page
patch as-lock-macro-simplification
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
+++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 /*
29 29 * VM - Hardware Address Translation management for Spitfire MMU.
30 30 *
31 31 * This file implements the machine specific hardware translation
32 32 * needed by the VM system. The machine independent interface is
33 33 * described in <vm/hat.h> while the machine dependent interface
34 34 * and data structures are described in <vm/hat_sfmmu.h>.
35 35 *
36 36 * The hat layer manages the address translation hardware as a cache
37 37 * driven by calls from the higher levels in the VM system.
38 38 */
39 39
40 40 #include <sys/types.h>
41 41 #include <sys/kstat.h>
42 42 #include <vm/hat.h>
43 43 #include <vm/hat_sfmmu.h>
44 44 #include <vm/page.h>
45 45 #include <sys/pte.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/mman.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/machparam.h>
50 50 #include <sys/vtrace.h>
51 51 #include <sys/kmem.h>
52 52 #include <sys/mmu.h>
53 53 #include <sys/cmn_err.h>
54 54 #include <sys/cpu.h>
55 55 #include <sys/cpuvar.h>
56 56 #include <sys/debug.h>
57 57 #include <sys/lgrp.h>
58 58 #include <sys/archsystm.h>
59 59 #include <sys/machsystm.h>
60 60 #include <sys/vmsystm.h>
61 61 #include <vm/as.h>
62 62 #include <vm/seg.h>
63 63 #include <vm/seg_kp.h>
64 64 #include <vm/seg_kmem.h>
65 65 #include <vm/seg_kpm.h>
66 66 #include <vm/rm.h>
67 67 #include <sys/t_lock.h>
68 68 #include <sys/obpdefs.h>
69 69 #include <sys/vm_machparam.h>
70 70 #include <sys/var.h>
71 71 #include <sys/trap.h>
72 72 #include <sys/machtrap.h>
73 73 #include <sys/scb.h>
74 74 #include <sys/bitmap.h>
75 75 #include <sys/machlock.h>
76 76 #include <sys/membar.h>
77 77 #include <sys/atomic.h>
78 78 #include <sys/cpu_module.h>
79 79 #include <sys/prom_debug.h>
80 80 #include <sys/ksynch.h>
81 81 #include <sys/mem_config.h>
82 82 #include <sys/mem_cage.h>
83 83 #include <vm/vm_dep.h>
84 84 #include <vm/xhat_sfmmu.h>
85 85 #include <sys/fpu/fpusystm.h>
86 86 #include <vm/mach_kpm.h>
87 87 #include <sys/callb.h>
88 88
89 89 #ifdef DEBUG
90 90 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \
91 91 if (SFMMU_IS_SHMERID_VALID(rid)) { \
92 92 caddr_t _eaddr = (saddr) + (len); \
93 93 sf_srd_t *_srdp; \
94 94 sf_region_t *_rgnp; \
95 95 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
96 96 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \
97 97 ASSERT((hat) != ksfmmup); \
98 98 _srdp = (hat)->sfmmu_srdp; \
99 99 ASSERT(_srdp != NULL); \
100 100 ASSERT(_srdp->srd_refcnt != 0); \
101 101 _rgnp = _srdp->srd_hmergnp[(rid)]; \
102 102 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \
103 103 ASSERT(_rgnp->rgn_refcnt != 0); \
104 104 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \
105 105 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
106 106 SFMMU_REGION_HME); \
107 107 ASSERT((saddr) >= _rgnp->rgn_saddr); \
108 108 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \
109 109 ASSERT(_eaddr > _rgnp->rgn_saddr); \
110 110 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \
111 111 }
112 112
113 113 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \
114 114 { \
115 115 caddr_t _hsva; \
116 116 caddr_t _heva; \
117 117 caddr_t _rsva; \
118 118 caddr_t _reva; \
119 119 int _ttesz = get_hblk_ttesz(hmeblkp); \
120 120 int _flagtte; \
121 121 ASSERT((srdp)->srd_refcnt != 0); \
122 122 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \
123 123 ASSERT((rgnp)->rgn_id == rid); \
124 124 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \
125 125 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \
126 126 SFMMU_REGION_HME); \
127 127 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \
128 128 _hsva = (caddr_t)get_hblk_base(hmeblkp); \
129 129 _heva = get_hblk_endaddr(hmeblkp); \
130 130 _rsva = (caddr_t)P2ALIGN( \
131 131 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \
132 132 _reva = (caddr_t)P2ROUNDUP( \
133 133 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \
134 134 HBLK_MIN_BYTES); \
135 135 ASSERT(_hsva >= _rsva); \
136 136 ASSERT(_hsva < _reva); \
137 137 ASSERT(_heva > _rsva); \
138 138 ASSERT(_heva <= _reva); \
139 139 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
140 140 _ttesz; \
141 141 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \
142 142 }
143 143
144 144 #else /* DEBUG */
145 145 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
146 146 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
147 147 #endif /* DEBUG */
148 148
149 149 #if defined(SF_ERRATA_57)
150 150 extern caddr_t errata57_limit;
151 151 #endif
152 152
153 153 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \
154 154 (sizeof (int64_t)))
155 155 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve)
156 156
157 157 #define HBLK_RESERVE_CNT 128
158 158 #define HBLK_RESERVE_MIN 20
159 159
160 160 static struct hme_blk *freehblkp;
161 161 static kmutex_t freehblkp_lock;
162 162 static int freehblkcnt;
163 163
164 164 static int64_t hblk_reserve[HME8BLK_SZ_RND];
165 165 static kmutex_t hblk_reserve_lock;
166 166 static kthread_t *hblk_reserve_thread;
167 167
168 168 static nucleus_hblk8_info_t nucleus_hblk8;
169 169 static nucleus_hblk1_info_t nucleus_hblk1;
170 170
171 171 /*
172 172 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here
173 173 * after the initial phase of removing an hmeblk from the hash chain, see
174 174 * the detailed comment in sfmmu_hblk_hash_rm() for further details.
175 175 */
176 176 static cpu_hme_pend_t *cpu_hme_pend;
177 177 static uint_t cpu_hme_pend_thresh;
178 178 /*
179 179 * SFMMU specific hat functions
180 180 */
181 181 void hat_pagecachectl(struct page *, int);
182 182
183 183 /* flags for hat_pagecachectl */
184 184 #define HAT_CACHE 0x1
185 185 #define HAT_UNCACHE 0x2
186 186 #define HAT_TMPNC 0x4
187 187
188 188 /*
189 189 * Flag to allow the creation of non-cacheable translations
190 190 * to system memory. It is off by default. At the moment this
191 191 * flag is used by the ecache error injector. The error injector
192 192 * will turn it on when creating such a translation then shut it
193 193 * off when it's finished.
194 194 */
195 195
196 196 int sfmmu_allow_nc_trans = 0;
197 197
198 198 /*
199 199 * Flag to disable large page support.
200 200 * value of 1 => disable all large pages.
201 201 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
202 202 *
203 203 * For example, use the value 0x4 to disable 512K pages.
204 204 *
205 205 */
206 206 #define LARGE_PAGES_OFF 0x1
207 207
208 208 /*
209 209 * The disable_large_pages and disable_ism_large_pages variables control
210 210 * hat_memload_array and the page sizes to be used by ISM and the kernel.
211 211 *
212 212 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables
213 213 * are only used to control which OOB pages to use at upper VM segment creation
214 214 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines.
215 215 * Their values may come from platform or CPU specific code to disable page
216 216 * sizes that should not be used.
217 217 *
218 218 * WARNING: 512K pages are currently not supported for ISM/DISM.
219 219 */
220 220 uint_t disable_large_pages = 0;
221 221 uint_t disable_ism_large_pages = (1 << TTE512K);
222 222 uint_t disable_auto_data_large_pages = 0;
223 223 uint_t disable_auto_text_large_pages = 0;
224 224
225 225 /*
226 226 * Private sfmmu data structures for hat management
227 227 */
228 228 static struct kmem_cache *sfmmuid_cache;
229 229 static struct kmem_cache *mmuctxdom_cache;
230 230
231 231 /*
232 232 * Private sfmmu data structures for tsb management
233 233 */
234 234 static struct kmem_cache *sfmmu_tsbinfo_cache;
235 235 static struct kmem_cache *sfmmu_tsb8k_cache;
236 236 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX];
237 237 static vmem_t *kmem_bigtsb_arena;
238 238 static vmem_t *kmem_tsb_arena;
239 239
240 240 /*
241 241 * sfmmu static variables for hmeblk resource management.
242 242 */
243 243 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
244 244 static struct kmem_cache *sfmmu8_cache;
245 245 static struct kmem_cache *sfmmu1_cache;
246 246 static struct kmem_cache *pa_hment_cache;
247 247
248 248 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */
249 249 /*
250 250 * private data for ism
251 251 */
252 252 static struct kmem_cache *ism_blk_cache;
253 253 static struct kmem_cache *ism_ment_cache;
254 254 #define ISMID_STARTADDR NULL
255 255
256 256 /*
257 257 * Region management data structures and function declarations.
258 258 */
259 259
260 260 static void sfmmu_leave_srd(sfmmu_t *);
261 261 static int sfmmu_srdcache_constructor(void *, void *, int);
262 262 static void sfmmu_srdcache_destructor(void *, void *);
263 263 static int sfmmu_rgncache_constructor(void *, void *, int);
264 264 static void sfmmu_rgncache_destructor(void *, void *);
265 265 static int sfrgnmap_isnull(sf_region_map_t *);
266 266 static int sfhmergnmap_isnull(sf_hmeregion_map_t *);
267 267 static int sfmmu_scdcache_constructor(void *, void *, int);
268 268 static void sfmmu_scdcache_destructor(void *, void *);
269 269 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t,
270 270 size_t, void *, u_offset_t);
271 271
272 272 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1;
273 273 static sf_srd_bucket_t *srd_buckets;
274 274 static struct kmem_cache *srd_cache;
275 275 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1;
276 276 static struct kmem_cache *region_cache;
277 277 static struct kmem_cache *scd_cache;
278 278
279 279 #ifdef sun4v
280 280 int use_bigtsb_arena = 1;
281 281 #else
282 282 int use_bigtsb_arena = 0;
283 283 #endif
284 284
285 285 /* External /etc/system tunable, for turning on&off the shctx support */
286 286 int disable_shctx = 0;
287 287 /* Internal variable, set by MD if the HW supports shctx feature */
288 288 int shctx_on = 0;
289 289
290 290 #ifdef DEBUG
291 291 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int);
292 292 #endif
293 293 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *);
294 294 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *);
295 295
296 296 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *);
297 297 static void sfmmu_find_scd(sfmmu_t *);
298 298 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *);
299 299 static void sfmmu_finish_join_scd(sfmmu_t *);
300 300 static void sfmmu_leave_scd(sfmmu_t *, uchar_t);
301 301 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *);
302 302 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *);
303 303 static void sfmmu_free_scd_tsbs(sfmmu_t *);
304 304 static void sfmmu_tsb_inv_ctx(sfmmu_t *);
305 305 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *);
306 306 static void sfmmu_ism_hatflags(sfmmu_t *, int);
307 307 static int sfmmu_srd_lock_held(sf_srd_t *);
308 308 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *);
309 309 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *);
310 310 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *);
311 311 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *);
312 312 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *);
313 313 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *);
314 314
315 315 /*
316 316 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists,
317 317 * HAT flags, synchronizing TLB/TSB coherency, and context management.
318 318 * The lock is hashed on the sfmmup since the case where we need to lock
319 319 * all processes is rare but does occur (e.g. we need to unload a shared
320 320 * mapping from all processes using the mapping). We have a lot of buckets,
321 321 * and each slab of sfmmu_t's can use about a quarter of them, giving us
322 322 * a fairly good distribution without wasting too much space and overhead
323 323 * when we have to grab them all.
324 324 */
325 325 #define SFMMU_NUM_LOCK 128 /* must be power of two */
326 326 hatlock_t hat_lock[SFMMU_NUM_LOCK];
327 327
328 328 /*
329 329 * Hash algorithm optimized for a small number of slabs.
330 330 * 7 is (highbit((sizeof sfmmu_t)) - 1)
331 331 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a
332 332 * kmem_cache, and thus they will be sequential within that cache. In
333 333 * addition, each new slab will have a different "color" up to cache_maxcolor
334 334 * which will skew the hashing for each successive slab which is allocated.
335 335 * If the size of sfmmu_t changed to a larger size, this algorithm may need
336 336 * to be revisited.
337 337 */
338 338 #define TSB_HASH_SHIFT_BITS (7)
339 339 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS)
340 340
341 341 #ifdef DEBUG
342 342 int tsb_hash_debug = 0;
343 343 #define TSB_HASH(sfmmup) \
344 344 (tsb_hash_debug ? &hat_lock[0] : \
345 345 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)])
346 346 #else /* DEBUG */
347 347 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]
348 348 #endif /* DEBUG */
349 349
350 350
351 351 /* sfmmu_replace_tsb() return codes. */
352 352 typedef enum tsb_replace_rc {
353 353 TSB_SUCCESS,
354 354 TSB_ALLOCFAIL,
355 355 TSB_LOSTRACE,
356 356 TSB_ALREADY_SWAPPED,
357 357 TSB_CANTGROW
358 358 } tsb_replace_rc_t;
359 359
360 360 /*
361 361 * Flags for TSB allocation routines.
362 362 */
363 363 #define TSB_ALLOC 0x01
364 364 #define TSB_FORCEALLOC 0x02
365 365 #define TSB_GROW 0x04
366 366 #define TSB_SHRINK 0x08
367 367 #define TSB_SWAPIN 0x10
368 368
369 369 /*
370 370 * Support for HAT callbacks.
371 371 */
372 372 #define SFMMU_MAX_RELOC_CALLBACKS 10
373 373 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS;
374 374 static id_t sfmmu_cb_nextid = 0;
375 375 static id_t sfmmu_tsb_cb_id;
376 376 struct sfmmu_callback *sfmmu_cb_table;
377 377
378 378 kmutex_t kpr_mutex;
379 379 kmutex_t kpr_suspendlock;
380 380 kthread_t *kreloc_thread;
381 381
382 382 /*
383 383 * Enable VA->PA translation sanity checking on DEBUG kernels.
384 384 * Disabled by default. This is incompatible with some
385 385 * drivers (error injector, RSM) so if it breaks you get
386 386 * to keep both pieces.
387 387 */
388 388 int hat_check_vtop = 0;
389 389
390 390 /*
391 391 * Private sfmmu routines (prototypes)
392 392 */
393 393 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
394 394 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
395 395 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
396 396 uint_t);
397 397 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
398 398 caddr_t, demap_range_t *, uint_t);
399 399 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
400 400 caddr_t, int);
401 401 static void sfmmu_hblk_free(struct hme_blk **);
402 402 static void sfmmu_hblks_list_purge(struct hme_blk **, int);
403 403 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t);
404 404 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t);
405 405 static struct hme_blk *sfmmu_hblk_steal(int);
406 406 static int sfmmu_steal_this_hblk(struct hmehash_bucket *,
407 407 struct hme_blk *, uint64_t, struct hme_blk *);
408 408 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t);
409 409
410 410 static void hat_do_memload_array(struct hat *, caddr_t, size_t,
411 411 struct page **, uint_t, uint_t, uint_t);
412 412 static void hat_do_memload(struct hat *, caddr_t, struct page *,
413 413 uint_t, uint_t, uint_t);
414 414 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **,
415 415 uint_t, uint_t, pgcnt_t, uint_t);
416 416 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,
417 417 uint_t);
418 418 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **,
419 419 uint_t, uint_t);
420 420 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *,
421 421 caddr_t, int, uint_t);
422 422 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *,
423 423 struct hmehash_bucket *, caddr_t, uint_t, uint_t,
424 424 uint_t);
425 425 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *,
426 426 caddr_t, page_t **, uint_t, uint_t);
427 427 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *);
428 428
429 429 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int);
430 430 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *);
431 431 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
432 432 #ifdef VAC
433 433 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *);
434 434 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *);
435 435 int tst_tnc(page_t *pp, pgcnt_t);
436 436 void conv_tnc(page_t *pp, int);
437 437 #endif
438 438
439 439 static void sfmmu_get_ctx(sfmmu_t *);
440 440 static void sfmmu_free_sfmmu(sfmmu_t *);
441 441
442 442 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *);
443 443 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int);
444 444
445 445 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int);
446 446 static void hat_pagereload(struct page *, struct page *);
447 447 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t);
448 448 #ifdef VAC
449 449 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t);
450 450 static void sfmmu_page_cache(page_t *, int, int, int);
451 451 #endif
452 452
453 453 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *,
454 454 struct hme_blk *, int);
455 455 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
456 456 pfn_t, int, int, int, int);
457 457 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
458 458 pfn_t, int);
459 459 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
460 460 static void sfmmu_tlb_range_demap(demap_range_t *);
461 461 static void sfmmu_invalidate_ctx(sfmmu_t *);
462 462 static void sfmmu_sync_mmustate(sfmmu_t *);
463 463
464 464 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
465 465 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
466 466 sfmmu_t *);
467 467 static void sfmmu_tsb_free(struct tsb_info *);
468 468 static void sfmmu_tsbinfo_free(struct tsb_info *);
469 469 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
470 470 sfmmu_t *);
471 471 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
472 472 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
473 473 static int sfmmu_select_tsb_szc(pgcnt_t);
474 474 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
475 475 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \
476 476 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc)
477 477 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \
478 478 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc)
479 479 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *);
480 480 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t,
481 481 hatlock_t *, uint_t);
482 482 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int);
483 483
484 484 #ifdef VAC
485 485 void sfmmu_cache_flush(pfn_t, int);
486 486 void sfmmu_cache_flushcolor(int, pfn_t);
487 487 #endif
488 488 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t,
489 489 caddr_t, demap_range_t *, uint_t, int);
490 490
491 491 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *);
492 492 static uint_t sfmmu_ptov_attr(tte_t *);
493 493 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t,
494 494 caddr_t, demap_range_t *, uint_t);
495 495 static uint_t sfmmu_vtop_prot(uint_t, uint_t *);
496 496 static int sfmmu_idcache_constructor(void *, void *, int);
497 497 static void sfmmu_idcache_destructor(void *, void *);
498 498 static int sfmmu_hblkcache_constructor(void *, void *, int);
499 499 static void sfmmu_hblkcache_destructor(void *, void *);
500 500 static void sfmmu_hblkcache_reclaim(void *);
501 501 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *,
502 502 struct hmehash_bucket *);
503 503 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *,
504 504 struct hme_blk *, struct hme_blk **, int);
505 505 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
506 506 uint64_t);
507 507 static struct hme_blk *sfmmu_check_pending_hblks(int);
508 508 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int);
509 509 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int);
510 510 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t,
511 511 int, caddr_t *);
512 512 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *);
513 513
514 514 static void sfmmu_rm_large_mappings(page_t *, int);
515 515
516 516 static void hat_lock_init(void);
517 517 static void hat_kstat_init(void);
518 518 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw);
519 519 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *);
520 520 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t);
521 521 static void sfmmu_check_page_sizes(sfmmu_t *, int);
522 522 int fnd_mapping_sz(page_t *);
523 523 static void iment_add(struct ism_ment *, struct hat *);
524 524 static void iment_sub(struct ism_ment *, struct hat *);
525 525 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc);
526 526 extern void sfmmu_setup_tsbinfo(sfmmu_t *);
527 527 extern void sfmmu_clear_utsbinfo(void);
528 528
529 529 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t);
530 530
531 531 extern int vpm_enable;
532 532
533 533 /* kpm globals */
534 534 #ifdef DEBUG
535 535 /*
536 536 * Enable trap level tsbmiss handling
537 537 */
538 538 int kpm_tsbmtl = 1;
539 539
540 540 /*
541 541 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the
542 542 * required TLB shootdowns in this case, so handle w/ care. Off by default.
543 543 */
544 544 int kpm_tlb_flush;
545 545 #endif /* DEBUG */
546 546
547 547 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int);
548 548
549 549 #ifdef DEBUG
550 550 static void sfmmu_check_hblk_flist();
551 551 #endif
552 552
553 553 /*
554 554 * Semi-private sfmmu data structures. Some of them are initialize in
555 555 * startup or in hat_init. Some of them are private but accessed by
556 556 * assembly code or mach_sfmmu.c
557 557 */
558 558 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */
559 559 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */
560 560 uint64_t uhme_hash_pa; /* PA of uhme_hash */
561 561 uint64_t khme_hash_pa; /* PA of khme_hash */
562 562 int uhmehash_num; /* # of buckets in user hash table */
563 563 int khmehash_num; /* # of buckets in kernel hash table */
564 564
565 565 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */
566 566 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */
567 567 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */
568 568
569 569 #define DEFAULT_NUM_CTXS_PER_MMU 8192
570 570 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU;
571 571
572 572 int cache; /* describes system cache */
573 573
574 574 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */
575 575 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */
576 576 int ktsb_szcode; /* kernel 8k-indexed tsb size code */
577 577 int ktsb_sz; /* kernel 8k-indexed tsb size */
578 578
579 579 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */
580 580 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */
581 581 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */
582 582 int ktsb4m_sz; /* kernel 4m-indexed tsb size */
583 583
584 584 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */
585 585 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */
586 586 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */
587 587 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */
588 588
589 589 #ifndef sun4v
590 590 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */
591 591 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */
592 592 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */
593 593 caddr_t utsb_vabase; /* reserved kernel virtual memory */
594 594 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */
595 595 #endif /* sun4v */
596 596 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */
597 597 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */
598 598 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */
599 599
600 600 /*
601 601 * Size to use for TSB slabs. Future platforms that support page sizes
602 602 * larger than 4M may wish to change these values, and provide their own
603 603 * assembly macros for building and decoding the TSB base register contents.
604 604 * Note disable_large_pages will override the value set here.
605 605 */
606 606 static uint_t tsb_slab_ttesz = TTE4M;
607 607 size_t tsb_slab_size = MMU_PAGESIZE4M;
608 608 uint_t tsb_slab_shift = MMU_PAGESHIFT4M;
609 609 /* PFN mask for TTE */
610 610 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT;
611 611
612 612 /*
613 613 * Size to use for TSB slabs. These are used only when 256M tsb arenas
614 614 * exist.
615 615 */
616 616 static uint_t bigtsb_slab_ttesz = TTE256M;
617 617 static size_t bigtsb_slab_size = MMU_PAGESIZE256M;
618 618 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M;
619 619 /* 256M page alignment for 8K pfn */
620 620 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT;
621 621
622 622 /* largest TSB size to grow to, will be smaller on smaller memory systems */
623 623 static int tsb_max_growsize = 0;
624 624
625 625 /*
626 626 * Tunable parameters dealing with TSB policies.
627 627 */
628 628
629 629 /*
630 630 * This undocumented tunable forces all 8K TSBs to be allocated from
631 631 * the kernel heap rather than from the kmem_tsb_default_arena arenas.
632 632 */
633 633 #ifdef DEBUG
634 634 int tsb_forceheap = 0;
635 635 #endif /* DEBUG */
636 636
637 637 /*
638 638 * Decide whether to use per-lgroup arenas, or one global set of
639 639 * TSB arenas. The default is not to break up per-lgroup, since
640 640 * most platforms don't recognize any tangible benefit from it.
641 641 */
642 642 int tsb_lgrp_affinity = 0;
643 643
644 644 /*
645 645 * Used for growing the TSB based on the process RSS.
646 646 * tsb_rss_factor is based on the smallest TSB, and is
647 647 * shifted by the TSB size to determine if we need to grow.
648 648 * The default will grow the TSB if the number of TTEs for
649 649 * this page size exceeds 75% of the number of TSB entries,
650 650 * which should _almost_ eliminate all conflict misses
651 651 * (at the expense of using up lots and lots of memory).
652 652 */
653 653 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75)
654 654 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc)
655 655 #define SELECT_TSB_SIZECODE(pgcnt) ( \
656 656 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \
657 657 default_tsb_size)
658 658 #define TSB_OK_SHRINK() \
659 659 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree)
660 660 #define TSB_OK_GROW() \
661 661 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree)
662 662
663 663 int enable_tsb_rss_sizing = 1;
664 664 int tsb_rss_factor = (int)TSB_RSS_FACTOR;
665 665
666 666 /* which TSB size code to use for new address spaces or if rss sizing off */
667 667 int default_tsb_size = TSB_8K_SZCODE;
668 668
669 669 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */
670 670 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */
671 671 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32
672 672
673 673 #ifdef DEBUG
674 674 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */
675 675 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */
676 676 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */
677 677 static int tsb_alloc_fail_mtbf = 0;
678 678 static int tsb_alloc_count = 0;
679 679 #endif /* DEBUG */
680 680
681 681 /* if set to 1, will remap valid TTEs when growing TSB. */
682 682 int tsb_remap_ttes = 1;
683 683
684 684 /*
685 685 * If we have more than this many mappings, allocate a second TSB.
686 686 * This default is chosen because the I/D fully associative TLBs are
687 687 * assumed to have at least 8 available entries. Platforms with a
688 688 * larger fully-associative TLB could probably override the default.
689 689 */
690 690
691 691 #ifdef sun4v
692 692 int tsb_sectsb_threshold = 0;
693 693 #else
694 694 int tsb_sectsb_threshold = 8;
695 695 #endif
696 696
697 697 /*
698 698 * kstat data
699 699 */
700 700 struct sfmmu_global_stat sfmmu_global_stat;
701 701 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
702 702
703 703 /*
704 704 * Global data
705 705 */
706 706 sfmmu_t *ksfmmup; /* kernel's hat id */
707 707
708 708 #ifdef DEBUG
709 709 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
710 710 #endif
711 711
712 712 /* sfmmu locking operations */
713 713 static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
714 714 static int sfmmu_mlspl_held(struct page *, int);
715 715
716 716 kmutex_t *sfmmu_page_enter(page_t *);
717 717 void sfmmu_page_exit(kmutex_t *);
718 718 int sfmmu_page_spl_held(struct page *);
719 719
720 720 /* sfmmu internal locking operations - accessed directly */
721 721 static void sfmmu_mlist_reloc_enter(page_t *, page_t *,
722 722 kmutex_t **, kmutex_t **);
723 723 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *);
724 724 static hatlock_t *
725 725 sfmmu_hat_enter(sfmmu_t *);
726 726 static hatlock_t *
727 727 sfmmu_hat_tryenter(sfmmu_t *);
728 728 static void sfmmu_hat_exit(hatlock_t *);
729 729 static void sfmmu_hat_lock_all(void);
730 730 static void sfmmu_hat_unlock_all(void);
731 731 static void sfmmu_ismhat_enter(sfmmu_t *, int);
732 732 static void sfmmu_ismhat_exit(sfmmu_t *, int);
733 733
734 734 kpm_hlk_t *kpmp_table;
735 735 uint_t kpmp_table_sz; /* must be a power of 2 */
736 736 uchar_t kpmp_shift;
737 737
738 738 kpm_shlk_t *kpmp_stable;
739 739 uint_t kpmp_stable_sz; /* must be a power of 2 */
740 740
741 741 /*
742 742 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128.
743 743 * SPL_SHIFT is log2(SPL_TABLE_SIZE).
744 744 */
745 745 #if ((2*NCPU_P2) > 128)
746 746 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1))
747 747 #else
748 748 #define SPL_SHIFT 7U
749 749 #endif
750 750 #define SPL_TABLE_SIZE (1U << SPL_SHIFT)
751 751 #define SPL_MASK (SPL_TABLE_SIZE - 1)
752 752
753 753 /*
754 754 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t
755 755 * and by multiples of SPL_SHIFT to get as many varied bits as we can.
756 756 */
757 757 #define SPL_INDEX(pp) \
758 758 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \
759 759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \
760 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \
761 761 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \
762 762 SPL_MASK)
763 763
764 764 #define SPL_HASH(pp) \
765 765 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex)
766 766
767 767 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE];
768 768
769 769 /* Array of mutexes protecting a page's mapping list and p_nrm field. */
770 770
771 771 #define MML_TABLE_SIZE SPL_TABLE_SIZE
772 772 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex)
773 773
774 774 static pad_mutex_t mml_table[MML_TABLE_SIZE];
775 775
776 776 /*
777 777 * hat_unload_callback() will group together callbacks in order
778 778 * to avoid xt_sync() calls. This is the maximum size of the group.
779 779 */
780 780 #define MAX_CB_ADDR 32
781 781
782 782 tte_t hw_tte;
783 783 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT;
784 784
785 785 static char *mmu_ctx_kstat_names[] = {
786 786 "mmu_ctx_tsb_exceptions",
787 787 "mmu_ctx_tsb_raise_exception",
788 788 "mmu_ctx_wrap_around",
789 789 };
790 790
791 791 /*
792 792 * Wrapper for vmem_xalloc since vmem_create only allows limited
793 793 * parameters for vm_source_alloc functions. This function allows us
794 794 * to specify alignment consistent with the size of the object being
795 795 * allocated.
796 796 */
797 797 static void *
798 798 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag)
799 799 {
800 800 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag));
801 801 }
802 802
803 803 /* Common code for setting tsb_alloc_hiwater. */
804 804 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \
805 805 ptob(pages) / tsb_alloc_hiwater_factor
806 806
807 807 /*
808 808 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by
809 809 * a single TSB. physmem is the number of physical pages so we need physmem 8K
810 810 * TTEs to represent all those physical pages. We round this up by using
811 811 * 1<<highbit(). To figure out which size code to use, remember that the size
812 812 * code is just an amount to shift the smallest TSB size to get the size of
813 813 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or
814 814 * highbit() - 1) to get the size code for the smallest TSB that can represent
815 815 * all of physical memory, while erring on the side of too much.
816 816 *
817 817 * Restrict tsb_max_growsize to make sure that:
818 818 * 1) TSBs can't grow larger than the TSB slab size
819 819 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE.
820 820 */
821 821 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \
822 822 int _i, _szc, _slabszc, _tsbszc; \
823 823 \
824 824 _i = highbit(pages); \
825 825 if ((1 << (_i - 1)) == (pages)) \
826 826 _i--; /* 2^n case, round down */ \
827 827 _szc = _i - TSB_START_SIZE; \
828 828 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \
829 829 _tsbszc = MIN(_szc, _slabszc); \
830 830 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \
831 831 }
832 832
833 833 /*
834 834 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the
835 835 * tsb_info which handles that TTE size.
836 836 */
837 837 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \
838 838 (tsbinfop) = (sfmmup)->sfmmu_tsb; \
839 839 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \
840 840 sfmmu_hat_lock_held(sfmmup)); \
841 841 if ((tte_szc) >= TTE4M) { \
842 842 ASSERT((tsbinfop) != NULL); \
843 843 (tsbinfop) = (tsbinfop)->tsb_next; \
844 844 } \
845 845 }
846 846
847 847 /*
848 848 * Macro to use to unload entries from the TSB.
849 849 * It has knowledge of which page sizes get replicated in the TSB
850 850 * and will call the appropriate unload routine for the appropriate size.
851 851 */
852 852 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \
853 853 { \
854 854 int ttesz = get_hblk_ttesz(hmeblkp); \
855 855 if (ttesz == TTE8K || ttesz == TTE4M) { \
856 856 sfmmu_unload_tsb(sfmmup, addr, ttesz); \
857 857 } else { \
858 858 caddr_t sva = ismhat ? addr : \
859 859 (caddr_t)get_hblk_base(hmeblkp); \
860 860 caddr_t eva = sva + get_hblk_span(hmeblkp); \
861 861 ASSERT(addr >= sva && addr < eva); \
862 862 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \
863 863 } \
864 864 }
865 865
866 866
867 867 /* Update tsb_alloc_hiwater after memory is configured. */
868 868 /*ARGSUSED*/
869 869 static void
870 870 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages)
871 871 {
872 872 /* Assumes physmem has already been updated. */
873 873 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
874 874 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
875 875 }
876 876
877 877 /*
878 878 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here
879 879 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is
880 880 * deleted.
881 881 */
882 882 /*ARGSUSED*/
883 883 static int
884 884 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages)
885 885 {
886 886 return (0);
887 887 }
888 888
889 889 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */
890 890 /*ARGSUSED*/
891 891 static void
892 892 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
893 893 {
894 894 /*
895 895 * Whether the delete was cancelled or not, just go ahead and update
896 896 * tsb_alloc_hiwater and tsb_max_growsize.
897 897 */
898 898 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
899 899 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
900 900 }
901 901
902 902 static kphysm_setup_vector_t sfmmu_update_vec = {
903 903 KPHYSM_SETUP_VECTOR_VERSION, /* version */
904 904 sfmmu_update_post_add, /* post_add */
905 905 sfmmu_update_pre_del, /* pre_del */
906 906 sfmmu_update_post_del /* post_del */
907 907 };
908 908
909 909
910 910 /*
911 911 * HME_BLK HASH PRIMITIVES
912 912 */
913 913
914 914 /*
915 915 * Enter a hme on the mapping list for page pp.
916 916 * When large pages are more prevalent in the system we might want to
917 917 * keep the mapping list in ascending order by the hment size. For now,
918 918 * small pages are more frequent, so don't slow it down.
919 919 */
920 920 #define HME_ADD(hme, pp) \
921 921 { \
922 922 ASSERT(sfmmu_mlist_held(pp)); \
923 923 \
924 924 hme->hme_prev = NULL; \
925 925 hme->hme_next = pp->p_mapping; \
926 926 hme->hme_page = pp; \
927 927 if (pp->p_mapping) { \
928 928 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\
929 929 ASSERT(pp->p_share > 0); \
930 930 } else { \
931 931 /* EMPTY */ \
932 932 ASSERT(pp->p_share == 0); \
933 933 } \
934 934 pp->p_mapping = hme; \
935 935 pp->p_share++; \
936 936 }
937 937
938 938 /*
939 939 * Enter a hme on the mapping list for page pp.
940 940 * If we are unmapping a large translation, we need to make sure that the
941 941 * change is reflect in the corresponding bit of the p_index field.
942 942 */
943 943 #define HME_SUB(hme, pp) \
944 944 { \
945 945 ASSERT(sfmmu_mlist_held(pp)); \
946 946 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \
947 947 \
948 948 if (pp->p_mapping == NULL) { \
949 949 panic("hme_remove - no mappings"); \
950 950 } \
951 951 \
952 952 membar_stst(); /* ensure previous stores finish */ \
953 953 \
954 954 ASSERT(pp->p_share > 0); \
955 955 pp->p_share--; \
956 956 \
957 957 if (hme->hme_prev) { \
958 958 ASSERT(pp->p_mapping != hme); \
959 959 ASSERT(hme->hme_prev->hme_page == pp || \
960 960 IS_PAHME(hme->hme_prev)); \
961 961 hme->hme_prev->hme_next = hme->hme_next; \
962 962 } else { \
963 963 ASSERT(pp->p_mapping == hme); \
964 964 pp->p_mapping = hme->hme_next; \
965 965 ASSERT((pp->p_mapping == NULL) ? \
966 966 (pp->p_share == 0) : 1); \
967 967 } \
968 968 \
969 969 if (hme->hme_next) { \
970 970 ASSERT(hme->hme_next->hme_page == pp || \
971 971 IS_PAHME(hme->hme_next)); \
972 972 hme->hme_next->hme_prev = hme->hme_prev; \
973 973 } \
974 974 \
975 975 /* zero out the entry */ \
976 976 hme->hme_next = NULL; \
977 977 hme->hme_prev = NULL; \
978 978 hme->hme_page = NULL; \
979 979 \
980 980 if (hme_size(hme) > TTE8K) { \
981 981 /* remove mappings for remainder of large pg */ \
982 982 sfmmu_rm_large_mappings(pp, hme_size(hme)); \
983 983 } \
984 984 }
985 985
986 986 /*
987 987 * This function returns the hment given the hme_blk and a vaddr.
988 988 * It assumes addr has already been checked to belong to hme_blk's
989 989 * range.
990 990 */
991 991 #define HBLKTOHME(hment, hmeblkp, addr) \
992 992 { \
993 993 int index; \
994 994 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \
995 995 }
996 996
997 997 /*
998 998 * Version of HBLKTOHME that also returns the index in hmeblkp
999 999 * of the hment.
1000 1000 */
1001 1001 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \
1002 1002 { \
1003 1003 ASSERT(in_hblk_range((hmeblkp), (addr))); \
1004 1004 \
1005 1005 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \
1006 1006 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \
1007 1007 } else \
1008 1008 idx = 0; \
1009 1009 \
1010 1010 (hment) = &(hmeblkp)->hblk_hme[idx]; \
1011 1011 }
1012 1012
1013 1013 /*
1014 1014 * Disable any page sizes not supported by the CPU
1015 1015 */
1016 1016 void
1017 1017 hat_init_pagesizes()
1018 1018 {
1019 1019 int i;
1020 1020
1021 1021 mmu_exported_page_sizes = 0;
1022 1022 for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1023 1023
1024 1024 szc_2_userszc[i] = (uint_t)-1;
1025 1025 userszc_2_szc[i] = (uint_t)-1;
1026 1026
1027 1027 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1028 1028 disable_large_pages |= (1 << i);
1029 1029 } else {
1030 1030 szc_2_userszc[i] = mmu_exported_page_sizes;
1031 1031 userszc_2_szc[mmu_exported_page_sizes] = i;
1032 1032 mmu_exported_page_sizes++;
1033 1033 }
1034 1034 }
1035 1035
1036 1036 disable_ism_large_pages |= disable_large_pages;
1037 1037 disable_auto_data_large_pages = disable_large_pages;
1038 1038 disable_auto_text_large_pages = disable_large_pages;
1039 1039
1040 1040 /*
1041 1041 * Initialize mmu-specific large page sizes.
1042 1042 */
1043 1043 if (&mmu_large_pages_disabled) {
1044 1044 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD);
1045 1045 disable_ism_large_pages |=
1046 1046 mmu_large_pages_disabled(HAT_LOAD_SHARE);
1047 1047 disable_auto_data_large_pages |=
1048 1048 mmu_large_pages_disabled(HAT_AUTO_DATA);
1049 1049 disable_auto_text_large_pages |=
1050 1050 mmu_large_pages_disabled(HAT_AUTO_TEXT);
1051 1051 }
1052 1052 }
1053 1053
1054 1054 /*
1055 1055 * Initialize the hardware address translation structures.
1056 1056 */
1057 1057 void
1058 1058 hat_init(void)
1059 1059 {
1060 1060 int i;
1061 1061 uint_t sz;
1062 1062 size_t size;
1063 1063
1064 1064 hat_lock_init();
1065 1065 hat_kstat_init();
1066 1066
1067 1067 /*
1068 1068 * Hardware-only bits in a TTE
1069 1069 */
1070 1070 MAKE_TTE_MASK(&hw_tte);
1071 1071
1072 1072 hat_init_pagesizes();
1073 1073
1074 1074 /* Initialize the hash locks */
1075 1075 for (i = 0; i < khmehash_num; i++) {
1076 1076 mutex_init(&khme_hash[i].hmehash_mutex, NULL,
1077 1077 MUTEX_DEFAULT, NULL);
1078 1078 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1079 1079 }
1080 1080 for (i = 0; i < uhmehash_num; i++) {
1081 1081 mutex_init(&uhme_hash[i].hmehash_mutex, NULL,
1082 1082 MUTEX_DEFAULT, NULL);
1083 1083 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA;
1084 1084 }
1085 1085 khmehash_num--; /* make sure counter starts from 0 */
1086 1086 uhmehash_num--; /* make sure counter starts from 0 */
1087 1087
1088 1088 /*
1089 1089 * Allocate context domain structures.
1090 1090 *
1091 1091 * A platform may choose to modify max_mmu_ctxdoms in
1092 1092 * set_platform_defaults(). If a platform does not define
1093 1093 * a set_platform_defaults() or does not choose to modify
1094 1094 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU.
1095 1095 *
1096 1096 * For all platforms that have CPUs sharing MMUs, this
1097 1097 * value must be defined.
1098 1098 */
1099 1099 if (max_mmu_ctxdoms == 0)
1100 1100 max_mmu_ctxdoms = max_ncpus;
1101 1101
1102 1102 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *);
1103 1103 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP);
1104 1104
1105 1105 /* mmu_ctx_t is 64 bytes aligned */
1106 1106 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache",
1107 1107 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
1108 1108 /*
1109 1109 * MMU context domain initialization for the Boot CPU.
1110 1110 * This needs the context domains array allocated above.
1111 1111 */
1112 1112 mutex_enter(&cpu_lock);
1113 1113 sfmmu_cpu_init(CPU);
1114 1114 mutex_exit(&cpu_lock);
1115 1115
1116 1116 /*
1117 1117 * Intialize ism mapping list lock.
1118 1118 */
1119 1119
1120 1120 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL);
1121 1121
1122 1122 /*
1123 1123 * Each sfmmu structure carries an array of MMU context info
1124 1124 * structures, one per context domain. The size of this array depends
1125 1125 * on the maximum number of context domains. So, the size of the
1126 1126 * sfmmu structure varies per platform.
1127 1127 *
1128 1128 * sfmmu is allocated from static arena, because trap
1129 1129 * handler at TL > 0 is not allowed to touch kernel relocatable
1130 1130 * memory. sfmmu's alignment is changed to 64 bytes from
1131 1131 * default 8 bytes, as the lower 6 bits will be used to pass
1132 1132 * pgcnt to vtag_flush_pgcnt_tl1.
1133 1133 */
1134 1134 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1);
1135 1135
1136 1136 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size,
1137 1137 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor,
1138 1138 NULL, NULL, static_arena, 0);
1139 1139
1140 1140 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache",
1141 1141 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0);
1142 1142
1143 1143 /*
1144 1144 * Since we only use the tsb8k cache to "borrow" pages for TSBs
1145 1145 * from the heap when low on memory or when TSB_FORCEALLOC is
1146 1146 * specified, don't use magazines to cache them--we want to return
1147 1147 * them to the system as quickly as possible.
1148 1148 */
1149 1149 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache",
1150 1150 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL,
1151 1151 static_arena, KMC_NOMAGAZINE);
1152 1152
1153 1153 /*
1154 1154 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical
1155 1155 * memory, which corresponds to the old static reserve for TSBs.
1156 1156 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of
1157 1157 * memory we'll allocate for TSB slabs; beyond this point TSB
1158 1158 * allocations will be taken from the kernel heap (via
1159 1159 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem
1160 1160 * consumer.
1161 1161 */
1162 1162 if (tsb_alloc_hiwater_factor == 0) {
1163 1163 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT;
1164 1164 }
1165 1165 SFMMU_SET_TSB_ALLOC_HIWATER(physmem);
1166 1166
1167 1167 for (sz = tsb_slab_ttesz; sz > 0; sz--) {
1168 1168 if (!(disable_large_pages & (1 << sz)))
1169 1169 break;
1170 1170 }
1171 1171
1172 1172 if (sz < tsb_slab_ttesz) {
1173 1173 tsb_slab_ttesz = sz;
1174 1174 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz;
1175 1175 tsb_slab_size = 1 << tsb_slab_shift;
1176 1176 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1;
1177 1177 use_bigtsb_arena = 0;
1178 1178 } else if (use_bigtsb_arena &&
1179 1179 (disable_large_pages & (1 << bigtsb_slab_ttesz))) {
1180 1180 use_bigtsb_arena = 0;
1181 1181 }
1182 1182
1183 1183 if (!use_bigtsb_arena) {
1184 1184 bigtsb_slab_shift = tsb_slab_shift;
1185 1185 }
1186 1186 SFMMU_SET_TSB_MAX_GROWSIZE(physmem);
1187 1187
1188 1188 /*
1189 1189 * On smaller memory systems, allocate TSB memory in smaller chunks
1190 1190 * than the default 4M slab size. We also honor disable_large_pages
1191 1191 * here.
1192 1192 *
1193 1193 * The trap handlers need to be patched with the final slab shift,
1194 1194 * since they need to be able to construct the TSB pointer at runtime.
1195 1195 */
1196 1196 if ((tsb_max_growsize <= TSB_512K_SZCODE) &&
1197 1197 !(disable_large_pages & (1 << TTE512K))) {
1198 1198 tsb_slab_ttesz = TTE512K;
1199 1199 tsb_slab_shift = MMU_PAGESHIFT512K;
1200 1200 tsb_slab_size = MMU_PAGESIZE512K;
1201 1201 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT;
1202 1202 use_bigtsb_arena = 0;
1203 1203 }
1204 1204
1205 1205 if (!use_bigtsb_arena) {
1206 1206 bigtsb_slab_ttesz = tsb_slab_ttesz;
1207 1207 bigtsb_slab_shift = tsb_slab_shift;
1208 1208 bigtsb_slab_size = tsb_slab_size;
1209 1209 bigtsb_slab_mask = tsb_slab_mask;
1210 1210 }
1211 1211
1212 1212
1213 1213 /*
1214 1214 * Set up memory callback to update tsb_alloc_hiwater and
1215 1215 * tsb_max_growsize.
1216 1216 */
1217 1217 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0);
1218 1218 ASSERT(i == 0);
1219 1219
1220 1220 /*
1221 1221 * kmem_tsb_arena is the source from which large TSB slabs are
1222 1222 * drawn. The quantum of this arena corresponds to the largest
1223 1223 * TSB size we can dynamically allocate for user processes.
1224 1224 * Currently it must also be a supported page size since we
1225 1225 * use exactly one translation entry to map each slab page.
1226 1226 *
1227 1227 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from
1228 1228 * which most TSBs are allocated. Since most TSB allocations are
1229 1229 * typically 8K we have a kmem cache we stack on top of each
1230 1230 * kmem_tsb_default_arena to speed up those allocations.
1231 1231 *
1232 1232 * Note the two-level scheme of arenas is required only
1233 1233 * because vmem_create doesn't allow us to specify alignment
1234 1234 * requirements. If this ever changes the code could be
1235 1235 * simplified to use only one level of arenas.
1236 1236 *
1237 1237 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena
1238 1238 * will be provided in addition to the 4M kmem_tsb_arena.
1239 1239 */
1240 1240 if (use_bigtsb_arena) {
1241 1241 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0,
1242 1242 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper,
1243 1243 vmem_xfree, heap_arena, 0, VM_SLEEP);
1244 1244 }
1245 1245
1246 1246 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size,
1247 1247 sfmmu_vmem_xalloc_aligned_wrapper,
1248 1248 vmem_xfree, heap_arena, 0, VM_SLEEP);
1249 1249
1250 1250 if (tsb_lgrp_affinity) {
1251 1251 char s[50];
1252 1252 for (i = 0; i < NLGRPS_MAX; i++) {
1253 1253 if (use_bigtsb_arena) {
1254 1254 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i);
1255 1255 kmem_bigtsb_default_arena[i] = vmem_create(s,
1256 1256 NULL, 0, 2 * tsb_slab_size,
1257 1257 sfmmu_tsb_segkmem_alloc,
1258 1258 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena,
1259 1259 0, VM_SLEEP | VM_BESTFIT);
1260 1260 }
1261 1261
1262 1262 (void) sprintf(s, "kmem_tsb_lgrp%d", i);
1263 1263 kmem_tsb_default_arena[i] = vmem_create(s,
1264 1264 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1265 1265 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1266 1266 VM_SLEEP | VM_BESTFIT);
1267 1267
1268 1268 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i);
1269 1269 sfmmu_tsb_cache[i] = kmem_cache_create(s,
1270 1270 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1271 1271 kmem_tsb_default_arena[i], 0);
1272 1272 }
1273 1273 } else {
1274 1274 if (use_bigtsb_arena) {
1275 1275 kmem_bigtsb_default_arena[0] =
1276 1276 vmem_create("kmem_bigtsb_default", NULL, 0,
1277 1277 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc,
1278 1278 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0,
1279 1279 VM_SLEEP | VM_BESTFIT);
1280 1280 }
1281 1281
1282 1282 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default",
1283 1283 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc,
1284 1284 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0,
1285 1285 VM_SLEEP | VM_BESTFIT);
1286 1286 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache",
1287 1287 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL,
1288 1288 kmem_tsb_default_arena[0], 0);
1289 1289 }
1290 1290
1291 1291 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ,
1292 1292 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1293 1293 sfmmu_hblkcache_destructor,
1294 1294 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ,
1295 1295 hat_memload_arena, KMC_NOHASH);
1296 1296
1297 1297 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE,
1298 1298 segkmem_alloc_permanent, segkmem_free, heap_arena, 0,
1299 1299 VMC_DUMPSAFE | VM_SLEEP);
1300 1300
1301 1301 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ,
1302 1302 HMEBLK_ALIGN, sfmmu_hblkcache_constructor,
1303 1303 sfmmu_hblkcache_destructor,
1304 1304 NULL, (void *)HME1BLK_SZ,
1305 1305 hat_memload1_arena, KMC_NOHASH);
1306 1306
1307 1307 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ,
1308 1308 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH);
1309 1309
1310 1310 ism_blk_cache = kmem_cache_create("ism_blk_cache",
↓ open down ↓ |
1310 lines elided |
↑ open up ↑ |
1311 1311 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL,
1312 1312 NULL, NULL, static_arena, KMC_NOHASH);
1313 1313
1314 1314 ism_ment_cache = kmem_cache_create("ism_ment_cache",
1315 1315 sizeof (ism_ment_t), 0, NULL, NULL,
1316 1316 NULL, NULL, NULL, 0);
1317 1317
1318 1318 /*
1319 1319 * We grab the first hat for the kernel,
1320 1320 */
1321 - AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
1321 + AS_LOCK_ENTER(&kas, RW_WRITER);
1322 1322 kas.a_hat = hat_alloc(&kas);
1323 - AS_LOCK_EXIT(&kas, &kas.a_lock);
1323 + AS_LOCK_EXIT(&kas);
1324 1324
1325 1325 /*
1326 1326 * Initialize hblk_reserve.
1327 1327 */
1328 1328 ((struct hme_blk *)hblk_reserve)->hblk_nextpa =
1329 1329 va_to_pa((caddr_t)hblk_reserve);
1330 1330
1331 1331 #ifndef UTSB_PHYS
1332 1332 /*
1333 1333 * Reserve some kernel virtual address space for the locked TTEs
1334 1334 * that allow us to probe the TSB from TL>0.
1335 1335 */
1336 1336 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1337 1337 0, 0, NULL, NULL, VM_SLEEP);
1338 1338 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size,
1339 1339 0, 0, NULL, NULL, VM_SLEEP);
1340 1340 #endif
1341 1341
1342 1342 #ifdef VAC
1343 1343 /*
1344 1344 * The big page VAC handling code assumes VAC
1345 1345 * will not be bigger than the smallest big
1346 1346 * page- which is 64K.
1347 1347 */
1348 1348 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1349 1349 cmn_err(CE_PANIC, "VAC too big!");
1350 1350 }
1351 1351 #endif
1352 1352
1353 1353 (void) xhat_init();
1354 1354
1355 1355 uhme_hash_pa = va_to_pa(uhme_hash);
1356 1356 khme_hash_pa = va_to_pa(khme_hash);
1357 1357
1358 1358 /*
1359 1359 * Initialize relocation locks. kpr_suspendlock is held
1360 1360 * at PIL_MAX to prevent interrupts from pinning the holder
1361 1361 * of a suspended TTE which may access it leading to a
1362 1362 * deadlock condition.
1363 1363 */
1364 1364 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
1365 1365 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX);
1366 1366
1367 1367 /*
1368 1368 * If Shared context support is disabled via /etc/system
1369 1369 * set shctx_on to 0 here if it was set to 1 earlier in boot
1370 1370 * sequence by cpu module initialization code.
1371 1371 */
1372 1372 if (shctx_on && disable_shctx) {
1373 1373 shctx_on = 0;
1374 1374 }
1375 1375
1376 1376 if (shctx_on) {
1377 1377 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS *
1378 1378 sizeof (srd_buckets[0]), KM_SLEEP);
1379 1379 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) {
1380 1380 mutex_init(&srd_buckets[i].srdb_lock, NULL,
1381 1381 MUTEX_DEFAULT, NULL);
1382 1382 }
1383 1383
1384 1384 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t),
1385 1385 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor,
1386 1386 NULL, NULL, NULL, 0);
1387 1387 region_cache = kmem_cache_create("region_cache",
1388 1388 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor,
1389 1389 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0);
1390 1390 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t),
1391 1391 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor,
1392 1392 NULL, NULL, NULL, 0);
1393 1393 }
1394 1394
1395 1395 /*
1396 1396 * Pre-allocate hrm_hashtab before enabling the collection of
1397 1397 * refmod statistics. Allocating on the fly would mean us
1398 1398 * running the risk of suffering recursive mutex enters or
1399 1399 * deadlocks.
1400 1400 */
1401 1401 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1402 1402 KM_SLEEP);
1403 1403
1404 1404 /* Allocate per-cpu pending freelist of hmeblks */
1405 1405 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64,
1406 1406 KM_SLEEP);
1407 1407 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP(
1408 1408 (uintptr_t)cpu_hme_pend, 64);
1409 1409
1410 1410 for (i = 0; i < NCPU; i++) {
1411 1411 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT,
1412 1412 NULL);
1413 1413 }
1414 1414
1415 1415 if (cpu_hme_pend_thresh == 0) {
1416 1416 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH;
1417 1417 }
1418 1418 }
1419 1419
1420 1420 /*
1421 1421 * Initialize locking for the hat layer, called early during boot.
1422 1422 */
1423 1423 static void
1424 1424 hat_lock_init()
1425 1425 {
1426 1426 int i;
1427 1427
1428 1428 /*
1429 1429 * initialize the array of mutexes protecting a page's mapping
1430 1430 * list and p_nrm field.
1431 1431 */
1432 1432 for (i = 0; i < MML_TABLE_SIZE; i++)
1433 1433 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL);
1434 1434
1435 1435 if (kpm_enable) {
1436 1436 for (i = 0; i < kpmp_table_sz; i++) {
1437 1437 mutex_init(&kpmp_table[i].khl_mutex, NULL,
1438 1438 MUTEX_DEFAULT, NULL);
1439 1439 }
1440 1440 }
1441 1441
1442 1442 /*
1443 1443 * Initialize array of mutex locks that protects sfmmu fields and
1444 1444 * TSB lists.
1445 1445 */
1446 1446 for (i = 0; i < SFMMU_NUM_LOCK; i++)
1447 1447 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT,
1448 1448 NULL);
1449 1449 }
1450 1450
1451 1451 #define SFMMU_KERNEL_MAXVA \
1452 1452 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT))
1453 1453
1454 1454 /*
1455 1455 * Allocate a hat structure.
↓ open down ↓ |
122 lines elided |
↑ open up ↑ |
1456 1456 * Called when an address space first uses a hat.
1457 1457 */
1458 1458 struct hat *
1459 1459 hat_alloc(struct as *as)
1460 1460 {
1461 1461 sfmmu_t *sfmmup;
1462 1462 int i;
1463 1463 uint64_t cnum;
1464 1464 extern uint_t get_color_start(struct as *);
1465 1465
1466 - ASSERT(AS_WRITE_HELD(as, &as->a_lock));
1466 + ASSERT(AS_WRITE_HELD(as));
1467 1467 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
1468 1468 sfmmup->sfmmu_as = as;
1469 1469 sfmmup->sfmmu_flags = 0;
1470 1470 sfmmup->sfmmu_tteflags = 0;
1471 1471 sfmmup->sfmmu_rtteflags = 0;
1472 1472 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock);
1473 1473
1474 1474 if (as == &kas) {
1475 1475 ksfmmup = sfmmup;
1476 1476 sfmmup->sfmmu_cext = 0;
1477 1477 cnum = KCONTEXT;
1478 1478
1479 1479 sfmmup->sfmmu_clrstart = 0;
1480 1480 sfmmup->sfmmu_tsb = NULL;
1481 1481 /*
1482 1482 * hat_kern_setup() will call sfmmu_init_ktsbinfo()
1483 1483 * to setup tsb_info for ksfmmup.
1484 1484 */
1485 1485 } else {
1486 1486
1487 1487 /*
1488 1488 * Just set to invalid ctx. When it faults, it will
1489 1489 * get a valid ctx. This would avoid the situation
1490 1490 * where we get a ctx, but it gets stolen and then
1491 1491 * we fault when we try to run and so have to get
1492 1492 * another ctx.
1493 1493 */
1494 1494 sfmmup->sfmmu_cext = 0;
1495 1495 cnum = INVALID_CONTEXT;
1496 1496
1497 1497 /* initialize original physical page coloring bin */
1498 1498 sfmmup->sfmmu_clrstart = get_color_start(as);
1499 1499 #ifdef DEBUG
1500 1500 if (tsb_random_size) {
1501 1501 uint32_t randval = (uint32_t)gettick() >> 4;
1502 1502 int size = randval % (tsb_max_growsize + 1);
1503 1503
1504 1504 /* chose a random tsb size for stress testing */
1505 1505 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size,
1506 1506 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1507 1507 } else
1508 1508 #endif /* DEBUG */
1509 1509 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb,
1510 1510 default_tsb_size,
1511 1511 TSB8K|TSB64K|TSB512K, 0, sfmmup);
1512 1512 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID;
1513 1513 ASSERT(sfmmup->sfmmu_tsb != NULL);
1514 1514 }
1515 1515
1516 1516 ASSERT(max_mmu_ctxdoms > 0);
1517 1517 for (i = 0; i < max_mmu_ctxdoms; i++) {
1518 1518 sfmmup->sfmmu_ctxs[i].cnum = cnum;
1519 1519 sfmmup->sfmmu_ctxs[i].gnum = 0;
1520 1520 }
1521 1521
1522 1522 for (i = 0; i < max_mmu_page_sizes; i++) {
1523 1523 sfmmup->sfmmu_ttecnt[i] = 0;
1524 1524 sfmmup->sfmmu_scdrttecnt[i] = 0;
1525 1525 sfmmup->sfmmu_ismttecnt[i] = 0;
1526 1526 sfmmup->sfmmu_scdismttecnt[i] = 0;
1527 1527 sfmmup->sfmmu_pgsz[i] = TTE8K;
1528 1528 }
1529 1529 sfmmup->sfmmu_tsb0_4minflcnt = 0;
1530 1530 sfmmup->sfmmu_iblk = NULL;
1531 1531 sfmmup->sfmmu_ismhat = 0;
1532 1532 sfmmup->sfmmu_scdhat = 0;
1533 1533 sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1534 1534 if (sfmmup == ksfmmup) {
1535 1535 CPUSET_ALL(sfmmup->sfmmu_cpusran);
1536 1536 } else {
1537 1537 CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1538 1538 }
1539 1539 sfmmup->sfmmu_free = 0;
1540 1540 sfmmup->sfmmu_rmstat = 0;
1541 1541 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1542 1542 sfmmup->sfmmu_xhat_provider = NULL;
1543 1543 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1544 1544 sfmmup->sfmmu_srdp = NULL;
1545 1545 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1546 1546 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1547 1547 sfmmup->sfmmu_scdp = NULL;
1548 1548 sfmmup->sfmmu_scd_link.next = NULL;
1549 1549 sfmmup->sfmmu_scd_link.prev = NULL;
1550 1550 return (sfmmup);
1551 1551 }
1552 1552
1553 1553 /*
1554 1554 * Create per-MMU context domain kstats for a given MMU ctx.
1555 1555 */
1556 1556 static void
1557 1557 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp)
1558 1558 {
1559 1559 mmu_ctx_stat_t stat;
1560 1560 kstat_t *mmu_kstat;
1561 1561
1562 1562 ASSERT(MUTEX_HELD(&cpu_lock));
1563 1563 ASSERT(mmu_ctxp->mmu_kstat == NULL);
1564 1564
1565 1565 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx",
1566 1566 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL);
1567 1567
1568 1568 if (mmu_kstat == NULL) {
1569 1569 cmn_err(CE_WARN, "kstat_create for MMU %d failed",
1570 1570 mmu_ctxp->mmu_idx);
1571 1571 } else {
1572 1572 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data;
1573 1573 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++)
1574 1574 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat],
1575 1575 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64);
1576 1576 mmu_ctxp->mmu_kstat = mmu_kstat;
1577 1577 kstat_install(mmu_kstat);
1578 1578 }
1579 1579 }
1580 1580
1581 1581 /*
1582 1582 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU
1583 1583 * context domain information for a given CPU. If a platform does not
1584 1584 * specify that interface, then the function below is used instead to return
1585 1585 * default information. The defaults are as follows:
1586 1586 *
1587 1587 * - The number of MMU context IDs supported on any CPU in the
1588 1588 * system is 8K.
1589 1589 * - There is one MMU context domain per CPU.
1590 1590 */
1591 1591 /*ARGSUSED*/
1592 1592 static void
1593 1593 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop)
1594 1594 {
1595 1595 infop->mmu_nctxs = nctxs;
1596 1596 infop->mmu_idx = cpu[cpuid]->cpu_seqid;
1597 1597 }
1598 1598
1599 1599 /*
1600 1600 * Called during CPU initialization to set the MMU context-related information
1601 1601 * for a CPU.
1602 1602 *
1603 1603 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum.
1604 1604 */
1605 1605 void
1606 1606 sfmmu_cpu_init(cpu_t *cp)
1607 1607 {
1608 1608 mmu_ctx_info_t info;
1609 1609 mmu_ctx_t *mmu_ctxp;
1610 1610
1611 1611 ASSERT(MUTEX_HELD(&cpu_lock));
1612 1612
1613 1613 if (&plat_cpuid_to_mmu_ctx_info == NULL)
1614 1614 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1615 1615 else
1616 1616 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info);
1617 1617
1618 1618 ASSERT(info.mmu_idx < max_mmu_ctxdoms);
1619 1619
1620 1620 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) {
1621 1621 /* Each mmu_ctx is cacheline aligned. */
1622 1622 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP);
1623 1623 bzero(mmu_ctxp, sizeof (mmu_ctx_t));
1624 1624
1625 1625 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN,
1626 1626 (void *)ipltospl(DISP_LEVEL));
1627 1627 mmu_ctxp->mmu_idx = info.mmu_idx;
1628 1628 mmu_ctxp->mmu_nctxs = info.mmu_nctxs;
1629 1629 /*
1630 1630 * Globally for lifetime of a system,
1631 1631 * gnum must always increase.
1632 1632 * mmu_saved_gnum is protected by the cpu_lock.
1633 1633 */
1634 1634 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1;
1635 1635 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
1636 1636
1637 1637 sfmmu_mmu_kstat_create(mmu_ctxp);
1638 1638
1639 1639 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp;
1640 1640 } else {
1641 1641 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx);
1642 1642 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs);
1643 1643 }
1644 1644
1645 1645 /*
1646 1646 * The mmu_lock is acquired here to prevent races with
1647 1647 * the wrap-around code.
1648 1648 */
1649 1649 mutex_enter(&mmu_ctxp->mmu_lock);
1650 1650
1651 1651
1652 1652 mmu_ctxp->mmu_ncpus++;
1653 1653 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1654 1654 CPU_MMU_IDX(cp) = info.mmu_idx;
1655 1655 CPU_MMU_CTXP(cp) = mmu_ctxp;
1656 1656
1657 1657 mutex_exit(&mmu_ctxp->mmu_lock);
1658 1658 }
1659 1659
1660 1660 static void
1661 1661 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp)
1662 1662 {
1663 1663 ASSERT(MUTEX_HELD(&cpu_lock));
1664 1664 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock));
1665 1665
1666 1666 mutex_destroy(&mmu_ctxp->mmu_lock);
1667 1667
1668 1668 if (mmu_ctxp->mmu_kstat)
1669 1669 kstat_delete(mmu_ctxp->mmu_kstat);
1670 1670
1671 1671 /* mmu_saved_gnum is protected by the cpu_lock. */
1672 1672 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum)
1673 1673 mmu_saved_gnum = mmu_ctxp->mmu_gnum;
1674 1674
1675 1675 kmem_cache_free(mmuctxdom_cache, mmu_ctxp);
1676 1676 }
1677 1677
1678 1678 /*
1679 1679 * Called to perform MMU context-related cleanup for a CPU.
1680 1680 */
1681 1681 void
1682 1682 sfmmu_cpu_cleanup(cpu_t *cp)
1683 1683 {
1684 1684 mmu_ctx_t *mmu_ctxp;
1685 1685
1686 1686 ASSERT(MUTEX_HELD(&cpu_lock));
1687 1687
1688 1688 mmu_ctxp = CPU_MMU_CTXP(cp);
1689 1689 ASSERT(mmu_ctxp != NULL);
1690 1690
1691 1691 /*
1692 1692 * The mmu_lock is acquired here to prevent races with
1693 1693 * the wrap-around code.
1694 1694 */
1695 1695 mutex_enter(&mmu_ctxp->mmu_lock);
1696 1696
1697 1697 CPU_MMU_CTXP(cp) = NULL;
1698 1698
1699 1699 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id);
1700 1700 if (--mmu_ctxp->mmu_ncpus == 0) {
1701 1701 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL;
1702 1702 mutex_exit(&mmu_ctxp->mmu_lock);
1703 1703 sfmmu_ctxdom_free(mmu_ctxp);
1704 1704 return;
1705 1705 }
1706 1706
1707 1707 mutex_exit(&mmu_ctxp->mmu_lock);
1708 1708 }
1709 1709
1710 1710 uint_t
1711 1711 sfmmu_ctxdom_nctxs(int idx)
1712 1712 {
1713 1713 return (mmu_ctxs_tbl[idx]->mmu_nctxs);
1714 1714 }
1715 1715
1716 1716 #ifdef sun4v
1717 1717 /*
1718 1718 * sfmmu_ctxdoms_* is an interface provided to help keep context domains
1719 1719 * consistant after suspend/resume on system that can resume on a different
1720 1720 * hardware than it was suspended.
1721 1721 *
1722 1722 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts
1723 1723 * from being allocated. It acquires all hat_locks, which blocks most access to
1724 1724 * context data, except for a few cases that are handled separately or are
1725 1725 * harmless. It wraps each domain to increment gnum and invalidate on-CPU
1726 1726 * contexts, and forces cnum to its max. As a result of this call all user
1727 1727 * threads that are running on CPUs trap and try to perform wrap around but
1728 1728 * can't because hat_locks are taken. Threads that were not on CPUs but started
1729 1729 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking
1730 1730 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block
1731 1731 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs
1732 1732 * are paused, else it could deadlock acquiring locks held by paused CPUs.
1733 1733 *
1734 1734 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records
1735 1735 * the CPUs that had them. It must be called after CPUs have been paused. This
1736 1736 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data,
1737 1737 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx
1738 1738 * runs with interrupts disabled. When CPUs are later resumed, they may enter
1739 1739 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately
1740 1740 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus
1741 1741 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is
1742 1742 * accessing the old context domains.
1743 1743 *
1744 1744 * sfmmu_ctxdoms_update(void) frees space used by old context domains and
1745 1745 * allocates new context domains based on hardware layout. It initializes
1746 1746 * every CPU that had context domain before migration to have one again.
1747 1747 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it
1748 1748 * could deadlock acquiring locks held by paused CPUs.
1749 1749 *
1750 1750 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads
1751 1751 * acquire new context ids and continue execution.
1752 1752 *
1753 1753 * Therefore functions should be called in the following order:
1754 1754 * suspend_routine()
1755 1755 * sfmmu_ctxdom_lock()
1756 1756 * pause_cpus()
1757 1757 * suspend()
1758 1758 * if (suspend failed)
1759 1759 * sfmmu_ctxdom_unlock()
1760 1760 * ...
1761 1761 * sfmmu_ctxdom_remove()
1762 1762 * resume_cpus()
1763 1763 * sfmmu_ctxdom_update()
1764 1764 * sfmmu_ctxdom_unlock()
1765 1765 */
1766 1766 static cpuset_t sfmmu_ctxdoms_pset;
1767 1767
1768 1768 void
1769 1769 sfmmu_ctxdoms_remove()
1770 1770 {
1771 1771 processorid_t id;
1772 1772 cpu_t *cp;
1773 1773
1774 1774 /*
1775 1775 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can
1776 1776 * be restored post-migration. A CPU may be powered off and not have a
1777 1777 * domain, for example.
1778 1778 */
1779 1779 CPUSET_ZERO(sfmmu_ctxdoms_pset);
1780 1780
1781 1781 for (id = 0; id < NCPU; id++) {
1782 1782 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) {
1783 1783 CPUSET_ADD(sfmmu_ctxdoms_pset, id);
1784 1784 CPU_MMU_CTXP(cp) = NULL;
1785 1785 }
1786 1786 }
1787 1787 }
1788 1788
1789 1789 void
1790 1790 sfmmu_ctxdoms_lock(void)
1791 1791 {
1792 1792 int idx;
1793 1793 mmu_ctx_t *mmu_ctxp;
1794 1794
1795 1795 sfmmu_hat_lock_all();
1796 1796
1797 1797 /*
1798 1798 * At this point, no thread can be in sfmmu_ctx_wrap_around, because
1799 1799 * hat_lock is always taken before calling it.
1800 1800 *
1801 1801 * For each domain, set mmu_cnum to max so no more contexts can be
1802 1802 * allocated, and wrap to flush on-CPU contexts and force threads to
1803 1803 * acquire a new context when we later drop hat_lock after migration.
1804 1804 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum,
1805 1805 * but the latter uses CAS and will miscompare and not overwrite it.
1806 1806 */
1807 1807 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */
1808 1808 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1809 1809 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) {
1810 1810 mutex_enter(&mmu_ctxp->mmu_lock);
1811 1811 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs;
1812 1812 /* make sure updated cnum visible */
1813 1813 membar_enter();
1814 1814 mutex_exit(&mmu_ctxp->mmu_lock);
1815 1815 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE);
1816 1816 }
1817 1817 }
1818 1818 kpreempt_enable();
1819 1819 }
1820 1820
1821 1821 void
1822 1822 sfmmu_ctxdoms_unlock(void)
1823 1823 {
1824 1824 sfmmu_hat_unlock_all();
1825 1825 }
1826 1826
1827 1827 void
1828 1828 sfmmu_ctxdoms_update(void)
1829 1829 {
1830 1830 processorid_t id;
1831 1831 cpu_t *cp;
1832 1832 uint_t idx;
1833 1833 mmu_ctx_t *mmu_ctxp;
1834 1834
1835 1835 /*
1836 1836 * Free all context domains. As side effect, this increases
1837 1837 * mmu_saved_gnum to the maximum gnum over all domains, which is used to
1838 1838 * init gnum in the new domains, which therefore will be larger than the
1839 1839 * sfmmu gnum for any process, guaranteeing that every process will see
1840 1840 * a new generation and allocate a new context regardless of what new
1841 1841 * domain it runs in.
1842 1842 */
1843 1843 mutex_enter(&cpu_lock);
1844 1844
1845 1845 for (idx = 0; idx < max_mmu_ctxdoms; idx++) {
1846 1846 if (mmu_ctxs_tbl[idx] != NULL) {
1847 1847 mmu_ctxp = mmu_ctxs_tbl[idx];
1848 1848 mmu_ctxs_tbl[idx] = NULL;
1849 1849 sfmmu_ctxdom_free(mmu_ctxp);
1850 1850 }
1851 1851 }
1852 1852
1853 1853 for (id = 0; id < NCPU; id++) {
1854 1854 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) &&
1855 1855 (cp = cpu[id]) != NULL)
1856 1856 sfmmu_cpu_init(cp);
1857 1857 }
1858 1858 mutex_exit(&cpu_lock);
1859 1859 }
1860 1860 #endif
1861 1861
1862 1862 /*
1863 1863 * Hat_setup, makes an address space context the current active one.
1864 1864 * In sfmmu this translates to setting the secondary context with the
1865 1865 * corresponding context.
1866 1866 */
1867 1867 void
1868 1868 hat_setup(struct hat *sfmmup, int allocflag)
1869 1869 {
1870 1870 hatlock_t *hatlockp;
1871 1871
1872 1872 /* Init needs some special treatment. */
1873 1873 if (allocflag == HAT_INIT) {
1874 1874 /*
1875 1875 * Make sure that we have
1876 1876 * 1. a TSB
1877 1877 * 2. a valid ctx that doesn't get stolen after this point.
1878 1878 */
1879 1879 hatlockp = sfmmu_hat_enter(sfmmup);
1880 1880
1881 1881 /*
1882 1882 * Swap in the TSB. hat_init() allocates tsbinfos without
1883 1883 * TSBs, but we need one for init, since the kernel does some
1884 1884 * special things to set up its stack and needs the TSB to
1885 1885 * resolve page faults.
1886 1886 */
1887 1887 sfmmu_tsb_swapin(sfmmup, hatlockp);
1888 1888
1889 1889 sfmmu_get_ctx(sfmmup);
1890 1890
1891 1891 sfmmu_hat_exit(hatlockp);
1892 1892 } else {
1893 1893 ASSERT(allocflag == HAT_ALLOC);
1894 1894
1895 1895 hatlockp = sfmmu_hat_enter(sfmmup);
1896 1896 kpreempt_disable();
1897 1897
1898 1898 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id);
1899 1899 /*
1900 1900 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter,
1901 1901 * pagesize bits don't matter in this case since we are passing
1902 1902 * INVALID_CONTEXT to it.
1903 1903 * Compatibility Note: hw takes care of MMU_SCONTEXT1
1904 1904 */
1905 1905 sfmmu_setctx_sec(INVALID_CONTEXT);
1906 1906 sfmmu_clear_utsbinfo();
1907 1907
1908 1908 kpreempt_enable();
1909 1909 sfmmu_hat_exit(hatlockp);
↓ open down ↓ |
433 lines elided |
↑ open up ↑ |
1910 1910 }
1911 1911 }
1912 1912
1913 1913 /*
1914 1914 * Free all the translation resources for the specified address space.
1915 1915 * Called from as_free when an address space is being destroyed.
1916 1916 */
1917 1917 void
1918 1918 hat_free_start(struct hat *sfmmup)
1919 1919 {
1920 - ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1920 + ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
1921 1921 ASSERT(sfmmup != ksfmmup);
1922 1922 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1923 1923
1924 1924 sfmmup->sfmmu_free = 1;
1925 1925 if (sfmmup->sfmmu_scdp != NULL) {
1926 1926 sfmmu_leave_scd(sfmmup, 0);
1927 1927 }
1928 1928
1929 1929 ASSERT(sfmmup->sfmmu_scdp == NULL);
1930 1930 }
1931 1931
1932 1932 void
1933 1933 hat_free_end(struct hat *sfmmup)
1934 1934 {
1935 1935 int i;
1936 1936
1937 1937 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1938 1938 ASSERT(sfmmup->sfmmu_free == 1);
1939 1939 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1940 1940 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1941 1941 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1942 1942 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1943 1943 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1944 1944 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1945 1945
1946 1946 if (sfmmup->sfmmu_rmstat) {
1947 1947 hat_freestat(sfmmup->sfmmu_as, NULL);
1948 1948 }
1949 1949
1950 1950 while (sfmmup->sfmmu_tsb != NULL) {
1951 1951 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next;
1952 1952 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb);
1953 1953 sfmmup->sfmmu_tsb = next;
1954 1954 }
1955 1955
1956 1956 if (sfmmup->sfmmu_srdp != NULL) {
1957 1957 sfmmu_leave_srd(sfmmup);
1958 1958 ASSERT(sfmmup->sfmmu_srdp == NULL);
1959 1959 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1960 1960 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) {
1961 1961 kmem_free(sfmmup->sfmmu_hmeregion_links[i],
1962 1962 SFMMU_L2_HMERLINKS_SIZE);
1963 1963 sfmmup->sfmmu_hmeregion_links[i] = NULL;
1964 1964 }
1965 1965 }
1966 1966 }
1967 1967 sfmmu_free_sfmmu(sfmmup);
1968 1968
1969 1969 #ifdef DEBUG
1970 1970 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1971 1971 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1972 1972 }
1973 1973 #endif
1974 1974
1975 1975 kmem_cache_free(sfmmuid_cache, sfmmup);
1976 1976 }
1977 1977
1978 1978 /*
1979 1979 * Set up any translation structures, for the specified address space,
1980 1980 * that are needed or preferred when the process is being swapped in.
1981 1981 */
1982 1982 /* ARGSUSED */
1983 1983 void
1984 1984 hat_swapin(struct hat *hat)
1985 1985 {
1986 1986 ASSERT(hat->sfmmu_xhat_provider == NULL);
1987 1987 }
1988 1988
1989 1989 /*
1990 1990 * Free all of the translation resources, for the specified address space,
1991 1991 * that can be freed while the process is swapped out. Called from as_swapout.
1992 1992 * Also, free up the ctx that this process was using.
1993 1993 */
1994 1994 void
1995 1995 hat_swapout(struct hat *sfmmup)
1996 1996 {
1997 1997 struct hmehash_bucket *hmebp;
1998 1998 struct hme_blk *hmeblkp;
1999 1999 struct hme_blk *pr_hblk = NULL;
2000 2000 struct hme_blk *nx_hblk;
2001 2001 int i;
2002 2002 struct hme_blk *list = NULL;
2003 2003 hatlock_t *hatlockp;
2004 2004 struct tsb_info *tsbinfop;
2005 2005 struct free_tsb {
2006 2006 struct free_tsb *next;
2007 2007 struct tsb_info *tsbinfop;
2008 2008 }; /* free list of TSBs */
2009 2009 struct free_tsb *freelist, *last, *next;
2010 2010
2011 2011 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
2012 2012 SFMMU_STAT(sf_swapout);
2013 2013
2014 2014 /*
2015 2015 * There is no way to go from an as to all its translations in sfmmu.
2016 2016 * Here is one of the times when we take the big hit and traverse
2017 2017 * the hash looking for hme_blks to free up. Not only do we free up
2018 2018 * this as hme_blks but all those that are free. We are obviously
2019 2019 * swapping because we need memory so let's free up as much
2020 2020 * as we can.
2021 2021 *
2022 2022 * Note that we don't flush TLB/TSB here -- it's not necessary
2023 2023 * because:
2024 2024 * 1) we free the ctx we're using and throw away the TSB(s);
2025 2025 * 2) processes aren't runnable while being swapped out.
2026 2026 */
2027 2027 ASSERT(sfmmup != KHATID);
2028 2028 for (i = 0; i <= UHMEHASH_SZ; i++) {
2029 2029 hmebp = &uhme_hash[i];
2030 2030 SFMMU_HASH_LOCK(hmebp);
2031 2031 hmeblkp = hmebp->hmeblkp;
2032 2032 pr_hblk = NULL;
2033 2033 while (hmeblkp) {
2034 2034
2035 2035 ASSERT(!hmeblkp->hblk_xhat_bit);
2036 2036
2037 2037 if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2038 2038 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2039 2039 ASSERT(!hmeblkp->hblk_shared);
2040 2040 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2041 2041 (caddr_t)get_hblk_base(hmeblkp),
2042 2042 get_hblk_endaddr(hmeblkp),
2043 2043 NULL, HAT_UNLOAD);
2044 2044 }
2045 2045 nx_hblk = hmeblkp->hblk_next;
2046 2046 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2047 2047 ASSERT(!hmeblkp->hblk_lckcnt);
2048 2048 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2049 2049 &list, 0);
2050 2050 } else {
2051 2051 pr_hblk = hmeblkp;
2052 2052 }
2053 2053 hmeblkp = nx_hblk;
2054 2054 }
2055 2055 SFMMU_HASH_UNLOCK(hmebp);
2056 2056 }
2057 2057
2058 2058 sfmmu_hblks_list_purge(&list, 0);
2059 2059
2060 2060 /*
2061 2061 * Now free up the ctx so that others can reuse it.
2062 2062 */
2063 2063 hatlockp = sfmmu_hat_enter(sfmmup);
2064 2064
2065 2065 sfmmu_invalidate_ctx(sfmmup);
2066 2066
2067 2067 /*
2068 2068 * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2069 2069 * If TSBs were never swapped in, just return.
2070 2070 * This implies that we don't support partial swapping
2071 2071 * of TSBs -- either all are swapped out, or none are.
2072 2072 *
2073 2073 * We must hold the HAT lock here to prevent racing with another
2074 2074 * thread trying to unmap TTEs from the TSB or running the post-
2075 2075 * relocator after relocating the TSB's memory. Unfortunately, we
2076 2076 * can't free memory while holding the HAT lock or we could
2077 2077 * deadlock, so we build a list of TSBs to be freed after marking
2078 2078 * the tsbinfos as swapped out and free them after dropping the
2079 2079 * lock.
2080 2080 */
2081 2081 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2082 2082 sfmmu_hat_exit(hatlockp);
2083 2083 return;
2084 2084 }
2085 2085
2086 2086 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2087 2087 last = freelist = NULL;
2088 2088 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2089 2089 tsbinfop = tsbinfop->tsb_next) {
2090 2090 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2091 2091
2092 2092 /*
2093 2093 * Cast the TSB into a struct free_tsb and put it on the free
2094 2094 * list.
2095 2095 */
2096 2096 if (freelist == NULL) {
2097 2097 last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2098 2098 } else {
2099 2099 last->next = (struct free_tsb *)tsbinfop->tsb_va;
2100 2100 last = last->next;
2101 2101 }
2102 2102 last->next = NULL;
2103 2103 last->tsbinfop = tsbinfop;
2104 2104 tsbinfop->tsb_flags |= TSB_SWAPPED;
2105 2105 /*
2106 2106 * Zero out the TTE to clear the valid bit.
2107 2107 * Note we can't use a value like 0xbad because we want to
2108 2108 * ensure diagnostic bits are NEVER set on TTEs that might
2109 2109 * be loaded. The intent is to catch any invalid access
2110 2110 * to the swapped TSB, such as a thread running with a valid
2111 2111 * context without first calling sfmmu_tsb_swapin() to
2112 2112 * allocate TSB memory.
2113 2113 */
2114 2114 tsbinfop->tsb_tte.ll = 0;
2115 2115 }
2116 2116
2117 2117 /* Now we can drop the lock and free the TSB memory. */
2118 2118 sfmmu_hat_exit(hatlockp);
2119 2119 for (; freelist != NULL; freelist = next) {
2120 2120 next = freelist->next;
2121 2121 sfmmu_tsb_free(freelist->tsbinfop);
2122 2122 }
2123 2123 }
2124 2124
2125 2125 /*
2126 2126 * Duplicate the translations of an as into another newas
2127 2127 */
2128 2128 /* ARGSUSED */
2129 2129 int
2130 2130 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2131 2131 uint_t flag)
2132 2132 {
2133 2133 sf_srd_t *srdp;
2134 2134 sf_scd_t *scdp;
2135 2135 int i;
2136 2136 extern uint_t get_color_start(struct as *);
2137 2137
2138 2138 ASSERT(hat->sfmmu_xhat_provider == NULL);
2139 2139 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2140 2140 (flag == HAT_DUP_SRD));
2141 2141 ASSERT(hat != ksfmmup);
2142 2142 ASSERT(newhat != ksfmmup);
2143 2143 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2144 2144
2145 2145 if (flag == HAT_DUP_COW) {
2146 2146 panic("hat_dup: HAT_DUP_COW not supported");
2147 2147 }
2148 2148
2149 2149 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) {
2150 2150 ASSERT(srdp->srd_evp != NULL);
2151 2151 VN_HOLD(srdp->srd_evp);
2152 2152 ASSERT(srdp->srd_refcnt > 0);
2153 2153 newhat->sfmmu_srdp = srdp;
2154 2154 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
2155 2155 }
2156 2156
2157 2157 /*
2158 2158 * HAT_DUP_ALL flag is used after as duplication is done.
2159 2159 */
2160 2160 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) {
2161 2161 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2);
2162 2162 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags;
2163 2163 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) {
2164 2164 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG;
2165 2165 }
2166 2166
2167 2167 /* check if need to join scd */
2168 2168 if ((scdp = hat->sfmmu_scdp) != NULL &&
2169 2169 newhat->sfmmu_scdp != scdp) {
2170 2170 int ret;
2171 2171 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map,
2172 2172 &scdp->scd_region_map, ret);
2173 2173 ASSERT(ret);
2174 2174 sfmmu_join_scd(scdp, newhat);
2175 2175 ASSERT(newhat->sfmmu_scdp == scdp &&
2176 2176 scdp->scd_refcnt >= 2);
2177 2177 for (i = 0; i < max_mmu_page_sizes; i++) {
2178 2178 newhat->sfmmu_ismttecnt[i] =
2179 2179 hat->sfmmu_ismttecnt[i];
2180 2180 newhat->sfmmu_scdismttecnt[i] =
2181 2181 hat->sfmmu_scdismttecnt[i];
2182 2182 }
2183 2183 }
2184 2184
2185 2185 sfmmu_check_page_sizes(newhat, 1);
2186 2186 }
2187 2187
2188 2188 if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2189 2189 update_proc_pgcolorbase_after_fork != 0) {
2190 2190 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2191 2191 }
2192 2192 return (0);
2193 2193 }
2194 2194
2195 2195 void
2196 2196 hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2197 2197 uint_t attr, uint_t flags)
2198 2198 {
2199 2199 hat_do_memload(hat, addr, pp, attr, flags,
2200 2200 SFMMU_INVALID_SHMERID);
2201 2201 }
2202 2202
2203 2203 void
2204 2204 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2205 2205 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2206 2206 {
2207 2207 uint_t rid;
2208 2208 if (rcookie == HAT_INVALID_REGION_COOKIE ||
2209 2209 hat->sfmmu_xhat_provider != NULL) {
2210 2210 hat_do_memload(hat, addr, pp, attr, flags,
2211 2211 SFMMU_INVALID_SHMERID);
2212 2212 return;
2213 2213 }
2214 2214 rid = (uint_t)((uint64_t)rcookie);
2215 2215 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2216 2216 hat_do_memload(hat, addr, pp, attr, flags, rid);
2217 2217 }
2218 2218
2219 2219 /*
2220 2220 * Set up addr to map to page pp with protection prot.
2221 2221 * As an optimization we also load the TSB with the
2222 2222 * corresponding tte but it is no big deal if the tte gets kicked out.
2223 2223 */
2224 2224 static void
2225 2225 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2226 2226 uint_t attr, uint_t flags, uint_t rid)
2227 2227 {
2228 2228 tte_t tte;
2229 2229
2230 2230
2231 2231 ASSERT(hat != NULL);
2232 2232 ASSERT(PAGE_LOCKED(pp));
2233 2233 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2234 2234 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2235 2235 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2236 2236 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2237 2237
2238 2238 if (PP_ISFREE(pp)) {
2239 2239 panic("hat_memload: loading a mapping to free page %p",
↓ open down ↓ |
309 lines elided |
↑ open up ↑ |
2240 2240 (void *)pp);
2241 2241 }
2242 2242
2243 2243 if (hat->sfmmu_xhat_provider) {
2244 2244 /* no regions for xhats */
2245 2245 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2246 2246 XHAT_MEMLOAD(hat, addr, pp, attr, flags);
2247 2247 return;
2248 2248 }
2249 2249
2250 - ASSERT((hat == ksfmmup) ||
2251 - AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2250 + ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2252 2251
2253 2252 if (flags & ~SFMMU_LOAD_ALLFLAG)
2254 2253 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2255 2254 flags & ~SFMMU_LOAD_ALLFLAG);
2256 2255
2257 2256 if (hat->sfmmu_rmstat)
2258 2257 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2259 2258
2260 2259 #if defined(SF_ERRATA_57)
2261 2260 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2262 2261 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2263 2262 !(flags & HAT_LOAD_SHARE)) {
2264 2263 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user "
2265 2264 " page executable");
2266 2265 attr &= ~PROT_EXEC;
2267 2266 }
2268 2267 #endif
2269 2268
2270 2269 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2271 2270 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid);
2272 2271
2273 2272 /*
2274 2273 * Check TSB and TLB page sizes.
2275 2274 */
2276 2275 if ((flags & HAT_LOAD_SHARE) == 0) {
2277 2276 sfmmu_check_page_sizes(hat, 1);
2278 2277 }
2279 2278 }
2280 2279
2281 2280 /*
2282 2281 * hat_devload can be called to map real memory (e.g.
2283 2282 * /dev/kmem) and even though hat_devload will determine pf is
2284 2283 * for memory, it will be unable to get a shared lock on the
2285 2284 * page (because someone else has it exclusively) and will
2286 2285 * pass dp = NULL. If tteload doesn't get a non-NULL
2287 2286 * page pointer it can't cache memory.
2288 2287 */
2289 2288 void
2290 2289 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2291 2290 uint_t attr, int flags)
2292 2291 {
2293 2292 tte_t tte;
2294 2293 struct page *pp = NULL;
2295 2294 int use_lgpg = 0;
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
2296 2295
2297 2296 ASSERT(hat != NULL);
2298 2297
2299 2298 if (hat->sfmmu_xhat_provider) {
2300 2299 XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
2301 2300 return;
2302 2301 }
2303 2302
2304 2303 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2305 2304 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2306 - ASSERT((hat == ksfmmup) ||
2307 - AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2305 + ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
2308 2306 if (len == 0)
2309 2307 panic("hat_devload: zero len");
2310 2308 if (flags & ~SFMMU_LOAD_ALLFLAG)
2311 2309 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2312 2310 flags & ~SFMMU_LOAD_ALLFLAG);
2313 2311
2314 2312 #if defined(SF_ERRATA_57)
2315 2313 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2316 2314 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2317 2315 !(flags & HAT_LOAD_SHARE)) {
2318 2316 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user "
2319 2317 " page executable");
2320 2318 attr &= ~PROT_EXEC;
2321 2319 }
2322 2320 #endif
2323 2321
2324 2322 /*
2325 2323 * If it's a memory page find its pp
2326 2324 */
2327 2325 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) {
2328 2326 pp = page_numtopp_nolock(pfn);
2329 2327 if (pp == NULL) {
2330 2328 flags |= HAT_LOAD_NOCONSIST;
2331 2329 } else {
2332 2330 if (PP_ISFREE(pp)) {
2333 2331 panic("hat_memload: loading "
2334 2332 "a mapping to free page %p",
2335 2333 (void *)pp);
2336 2334 }
2337 2335 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2338 2336 panic("hat_memload: loading a mapping "
2339 2337 "to unlocked relocatable page %p",
2340 2338 (void *)pp);
2341 2339 }
2342 2340 ASSERT(len == MMU_PAGESIZE);
2343 2341 }
2344 2342 }
2345 2343
2346 2344 if (hat->sfmmu_rmstat)
2347 2345 hat_resvstat(len, hat->sfmmu_as, addr);
2348 2346
2349 2347 if (flags & HAT_LOAD_NOCONSIST) {
2350 2348 attr |= SFMMU_UNCACHEVTTE;
2351 2349 use_lgpg = 1;
2352 2350 }
2353 2351 if (!pf_is_memory(pfn)) {
2354 2352 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC;
2355 2353 use_lgpg = 1;
2356 2354 switch (attr & HAT_ORDER_MASK) {
2357 2355 case HAT_STRICTORDER:
2358 2356 case HAT_UNORDERED_OK:
2359 2357 /*
2360 2358 * we set the side effect bit for all non
2361 2359 * memory mappings unless merging is ok
2362 2360 */
2363 2361 attr |= SFMMU_SIDEFFECT;
2364 2362 break;
2365 2363 case HAT_MERGING_OK:
2366 2364 case HAT_LOADCACHING_OK:
2367 2365 case HAT_STORECACHING_OK:
2368 2366 break;
2369 2367 default:
2370 2368 panic("hat_devload: bad attr");
2371 2369 break;
2372 2370 }
2373 2371 }
2374 2372 while (len) {
2375 2373 if (!use_lgpg) {
2376 2374 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2377 2375 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2378 2376 flags, SFMMU_INVALID_SHMERID);
2379 2377 len -= MMU_PAGESIZE;
2380 2378 addr += MMU_PAGESIZE;
2381 2379 pfn++;
2382 2380 continue;
2383 2381 }
2384 2382 /*
2385 2383 * try to use large pages, check va/pa alignments
2386 2384 * Note that 32M/256M page sizes are not (yet) supported.
2387 2385 */
2388 2386 if ((len >= MMU_PAGESIZE4M) &&
2389 2387 !((uintptr_t)addr & MMU_PAGEOFFSET4M) &&
2390 2388 !(disable_large_pages & (1 << TTE4M)) &&
2391 2389 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) {
2392 2390 sfmmu_memtte(&tte, pfn, attr, TTE4M);
2393 2391 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2394 2392 flags, SFMMU_INVALID_SHMERID);
2395 2393 len -= MMU_PAGESIZE4M;
2396 2394 addr += MMU_PAGESIZE4M;
2397 2395 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE;
2398 2396 } else if ((len >= MMU_PAGESIZE512K) &&
2399 2397 !((uintptr_t)addr & MMU_PAGEOFFSET512K) &&
2400 2398 !(disable_large_pages & (1 << TTE512K)) &&
2401 2399 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) {
2402 2400 sfmmu_memtte(&tte, pfn, attr, TTE512K);
2403 2401 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2404 2402 flags, SFMMU_INVALID_SHMERID);
2405 2403 len -= MMU_PAGESIZE512K;
2406 2404 addr += MMU_PAGESIZE512K;
2407 2405 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE;
2408 2406 } else if ((len >= MMU_PAGESIZE64K) &&
2409 2407 !((uintptr_t)addr & MMU_PAGEOFFSET64K) &&
2410 2408 !(disable_large_pages & (1 << TTE64K)) &&
2411 2409 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) {
2412 2410 sfmmu_memtte(&tte, pfn, attr, TTE64K);
2413 2411 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2414 2412 flags, SFMMU_INVALID_SHMERID);
2415 2413 len -= MMU_PAGESIZE64K;
2416 2414 addr += MMU_PAGESIZE64K;
2417 2415 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE;
2418 2416 } else {
2419 2417 sfmmu_memtte(&tte, pfn, attr, TTE8K);
2420 2418 (void) sfmmu_tteload_array(hat, &tte, addr, &pp,
2421 2419 flags, SFMMU_INVALID_SHMERID);
2422 2420 len -= MMU_PAGESIZE;
2423 2421 addr += MMU_PAGESIZE;
2424 2422 pfn++;
2425 2423 }
2426 2424 }
2427 2425
2428 2426 /*
2429 2427 * Check TSB and TLB page sizes.
2430 2428 */
2431 2429 if ((flags & HAT_LOAD_SHARE) == 0) {
2432 2430 sfmmu_check_page_sizes(hat, 1);
2433 2431 }
2434 2432 }
2435 2433
2436 2434 void
2437 2435 hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2438 2436 struct page **pps, uint_t attr, uint_t flags)
2439 2437 {
2440 2438 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2441 2439 SFMMU_INVALID_SHMERID);
2442 2440 }
2443 2441
2444 2442 void
2445 2443 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2446 2444 struct page **pps, uint_t attr, uint_t flags,
2447 2445 hat_region_cookie_t rcookie)
2448 2446 {
2449 2447 uint_t rid;
2450 2448 if (rcookie == HAT_INVALID_REGION_COOKIE ||
2451 2449 hat->sfmmu_xhat_provider != NULL) {
2452 2450 hat_do_memload_array(hat, addr, len, pps, attr, flags,
2453 2451 SFMMU_INVALID_SHMERID);
2454 2452 return;
2455 2453 }
2456 2454 rid = (uint_t)((uint64_t)rcookie);
2457 2455 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2458 2456 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2459 2457 }
2460 2458
2461 2459 /*
2462 2460 * Map the largest extend possible out of the page array. The array may NOT
2463 2461 * be in order. The largest possible mapping a page can have
2464 2462 * is specified in the p_szc field. The p_szc field
2465 2463 * cannot change as long as there any mappings (large or small)
2466 2464 * to any of the pages that make up the large page. (ie. any
2467 2465 * promotion/demotion of page size is not up to the hat but up to
2468 2466 * the page free list manager). The array
2469 2467 * should consist of properly aligned contigous pages that are
2470 2468 * part of a big page for a large mapping to be created.
2471 2469 */
2472 2470 static void
2473 2471 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2474 2472 struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2475 2473 {
2476 2474 int ttesz;
2477 2475 size_t mapsz;
2478 2476 pgcnt_t numpg, npgs;
2479 2477 tte_t tte;
2480 2478 page_t *pp;
2481 2479 uint_t large_pages_disable;
2482 2480
2483 2481 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2484 2482 SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2485 2483
2486 2484 if (hat->sfmmu_xhat_provider) {
2487 2485 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2488 2486 XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
2489 2487 return;
2490 2488 }
2491 2489
2492 2490 if (hat->sfmmu_rmstat)
2493 2491 hat_resvstat(len, hat->sfmmu_as, addr);
2494 2492
2495 2493 #if defined(SF_ERRATA_57)
2496 2494 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2497 2495 (addr < errata57_limit) && (attr & PROT_EXEC) &&
2498 2496 !(flags & HAT_LOAD_SHARE)) {
2499 2497 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2500 2498 "user page executable");
2501 2499 attr &= ~PROT_EXEC;
2502 2500 }
2503 2501 #endif
2504 2502
2505 2503 /* Get number of pages */
2506 2504 npgs = len >> MMU_PAGESHIFT;
2507 2505
2508 2506 if (flags & HAT_LOAD_SHARE) {
2509 2507 large_pages_disable = disable_ism_large_pages;
2510 2508 } else {
2511 2509 large_pages_disable = disable_large_pages;
2512 2510 }
2513 2511
2514 2512 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) {
2515 2513 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2516 2514 rid);
2517 2515 return;
2518 2516 }
2519 2517
2520 2518 while (npgs >= NHMENTS) {
2521 2519 pp = *pps;
2522 2520 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) {
2523 2521 /*
2524 2522 * Check if this page size is disabled.
2525 2523 */
2526 2524 if (large_pages_disable & (1 << ttesz))
2527 2525 continue;
2528 2526
2529 2527 numpg = TTEPAGES(ttesz);
2530 2528 mapsz = numpg << MMU_PAGESHIFT;
2531 2529 if ((npgs >= numpg) &&
2532 2530 IS_P2ALIGNED(addr, mapsz) &&
2533 2531 IS_P2ALIGNED(pp->p_pagenum, numpg)) {
2534 2532 /*
2535 2533 * At this point we have enough pages and
2536 2534 * we know the virtual address and the pfn
2537 2535 * are properly aligned. We still need
2538 2536 * to check for physical contiguity but since
2539 2537 * it is very likely that this is the case
2540 2538 * we will assume they are so and undo
2541 2539 * the request if necessary. It would
2542 2540 * be great if we could get a hint flag
2543 2541 * like HAT_CONTIG which would tell us
2544 2542 * the pages are contigous for sure.
2545 2543 */
2546 2544 sfmmu_memtte(&tte, (*pps)->p_pagenum,
2547 2545 attr, ttesz);
2548 2546 if (!sfmmu_tteload_array(hat, &tte, addr,
2549 2547 pps, flags, rid)) {
2550 2548 break;
2551 2549 }
2552 2550 }
2553 2551 }
2554 2552 if (ttesz == TTE8K) {
2555 2553 /*
2556 2554 * We were not able to map array using a large page
2557 2555 * batch a hmeblk or fraction at a time.
2558 2556 */
2559 2557 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT)
2560 2558 & (NHMENTS-1);
2561 2559 numpg = NHMENTS - numpg;
2562 2560 ASSERT(numpg <= npgs);
2563 2561 mapsz = numpg * MMU_PAGESIZE;
2564 2562 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags,
2565 2563 numpg, rid);
2566 2564 }
2567 2565 addr += mapsz;
2568 2566 npgs -= numpg;
2569 2567 pps += numpg;
2570 2568 }
2571 2569
2572 2570 if (npgs) {
2573 2571 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs,
2574 2572 rid);
2575 2573 }
2576 2574
2577 2575 /*
2578 2576 * Check TSB and TLB page sizes.
2579 2577 */
2580 2578 if ((flags & HAT_LOAD_SHARE) == 0) {
2581 2579 sfmmu_check_page_sizes(hat, 1);
2582 2580 }
2583 2581 }
2584 2582
2585 2583 /*
2586 2584 * Function tries to batch 8K pages into the same hme blk.
2587 2585 */
2588 2586 static void
2589 2587 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2590 2588 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2591 2589 {
2592 2590 tte_t tte;
2593 2591 page_t *pp;
2594 2592 struct hmehash_bucket *hmebp;
2595 2593 struct hme_blk *hmeblkp;
2596 2594 int index;
2597 2595
2598 2596 while (npgs) {
2599 2597 /*
2600 2598 * Acquire the hash bucket.
2601 2599 */
2602 2600 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K,
2603 2601 rid);
2604 2602 ASSERT(hmebp);
2605 2603
2606 2604 /*
2607 2605 * Find the hment block.
2608 2606 */
2609 2607 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr,
2610 2608 TTE8K, flags, rid);
2611 2609 ASSERT(hmeblkp);
2612 2610
2613 2611 do {
2614 2612 /*
2615 2613 * Make the tte.
2616 2614 */
2617 2615 pp = *pps;
2618 2616 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K);
2619 2617
2620 2618 /*
2621 2619 * Add the translation.
2622 2620 */
2623 2621 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte,
2624 2622 vaddr, pps, flags, rid);
2625 2623
2626 2624 /*
2627 2625 * Goto next page.
2628 2626 */
2629 2627 pps++;
2630 2628 npgs--;
2631 2629
2632 2630 /*
2633 2631 * Goto next address.
2634 2632 */
2635 2633 vaddr += MMU_PAGESIZE;
2636 2634
2637 2635 /*
2638 2636 * Don't crossover into a different hmentblk.
2639 2637 */
2640 2638 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) &
2641 2639 (NHMENTS-1));
2642 2640
2643 2641 } while (index != 0 && npgs != 0);
2644 2642
2645 2643 /*
2646 2644 * Release the hash bucket.
2647 2645 */
2648 2646
2649 2647 sfmmu_tteload_release_hashbucket(hmebp);
2650 2648 }
2651 2649 }
2652 2650
2653 2651 /*
2654 2652 * Construct a tte for a page:
2655 2653 *
2656 2654 * tte_valid = 1
2657 2655 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only)
2658 2656 * tte_size = size
2659 2657 * tte_nfo = attr & HAT_NOFAULT
2660 2658 * tte_ie = attr & HAT_STRUCTURE_LE
2661 2659 * tte_hmenum = hmenum
2662 2660 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT;
2663 2661 * tte_palo = pp->p_pagenum & TTE_PALOMASK;
2664 2662 * tte_ref = 1 (optimization)
2665 2663 * tte_wr_perm = attr & PROT_WRITE;
2666 2664 * tte_no_sync = attr & HAT_NOSYNC
2667 2665 * tte_lock = attr & SFMMU_LOCKTTE
2668 2666 * tte_cp = !(attr & SFMMU_UNCACHEPTTE)
2669 2667 * tte_cv = !(attr & SFMMU_UNCACHEVTTE)
2670 2668 * tte_e = attr & SFMMU_SIDEFFECT
2671 2669 * tte_priv = !(attr & PROT_USER)
2672 2670 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt)
2673 2671 * tte_glb = 0
2674 2672 */
2675 2673 void
2676 2674 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz)
2677 2675 {
2678 2676 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2679 2677
2680 2678 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */);
2681 2679 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */);
2682 2680
2683 2681 if (TTE_IS_NOSYNC(ttep)) {
2684 2682 TTE_SET_REF(ttep);
2685 2683 if (TTE_IS_WRITABLE(ttep)) {
2686 2684 TTE_SET_MOD(ttep);
2687 2685 }
2688 2686 }
2689 2687 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) {
2690 2688 panic("sfmmu_memtte: can't set both NFO and EXEC bits");
2691 2689 }
2692 2690 }
2693 2691
2694 2692 /*
2695 2693 * This function will add a translation to the hme_blk and allocate the
2696 2694 * hme_blk if one does not exist.
2697 2695 * If a page structure is specified then it will add the
2698 2696 * corresponding hment to the mapping list.
2699 2697 * It will also update the hmenum field for the tte.
2700 2698 *
2701 2699 * Currently this function is only used for kernel mappings.
2702 2700 * So pass invalid region to sfmmu_tteload_array().
2703 2701 */
2704 2702 void
2705 2703 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2706 2704 uint_t flags)
2707 2705 {
2708 2706 ASSERT(sfmmup == ksfmmup);
2709 2707 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2710 2708 SFMMU_INVALID_SHMERID);
2711 2709 }
2712 2710
2713 2711 /*
2714 2712 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2715 2713 * Assumes that a particular page size may only be resident in one TSB.
2716 2714 */
2717 2715 static void
2718 2716 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz)
2719 2717 {
2720 2718 struct tsb_info *tsbinfop = NULL;
2721 2719 uint64_t tag;
2722 2720 struct tsbe *tsbe_addr;
2723 2721 uint64_t tsb_base;
2724 2722 uint_t tsb_size;
2725 2723 int vpshift = MMU_PAGESHIFT;
2726 2724 int phys = 0;
2727 2725
2728 2726 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */
2729 2727 phys = ktsb_phys;
2730 2728 if (ttesz >= TTE4M) {
2731 2729 #ifndef sun4v
2732 2730 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2733 2731 #endif
2734 2732 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2735 2733 tsb_size = ktsb4m_szcode;
2736 2734 } else {
2737 2735 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2738 2736 tsb_size = ktsb_szcode;
2739 2737 }
2740 2738 } else {
2741 2739 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2742 2740
2743 2741 /*
2744 2742 * If there isn't a TSB for this page size, or the TSB is
2745 2743 * swapped out, there is nothing to do. Note that the latter
2746 2744 * case seems impossible but can occur if hat_pageunload()
2747 2745 * is called on an ISM mapping while the process is swapped
2748 2746 * out.
2749 2747 */
2750 2748 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2751 2749 return;
2752 2750
2753 2751 /*
2754 2752 * If another thread is in the middle of relocating a TSB
2755 2753 * we can't unload the entry so set a flag so that the
2756 2754 * TSB will be flushed before it can be accessed by the
2757 2755 * process.
2758 2756 */
2759 2757 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2760 2758 if (ttep == NULL)
2761 2759 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2762 2760 return;
2763 2761 }
2764 2762 #if defined(UTSB_PHYS)
2765 2763 phys = 1;
2766 2764 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2767 2765 #else
2768 2766 tsb_base = (uint64_t)tsbinfop->tsb_va;
2769 2767 #endif
2770 2768 tsb_size = tsbinfop->tsb_szc;
2771 2769 }
2772 2770 if (ttesz >= TTE4M)
2773 2771 vpshift = MMU_PAGESHIFT4M;
2774 2772
2775 2773 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2776 2774 tag = sfmmu_make_tsbtag(vaddr);
2777 2775
2778 2776 if (ttep == NULL) {
2779 2777 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2780 2778 } else {
2781 2779 if (ttesz >= TTE4M) {
2782 2780 SFMMU_STAT(sf_tsb_load4m);
2783 2781 } else {
2784 2782 SFMMU_STAT(sf_tsb_load8k);
2785 2783 }
2786 2784
2787 2785 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys);
2788 2786 }
2789 2787 }
2790 2788
2791 2789 /*
2792 2790 * Unmap all entries from [start, end) matching the given page size.
2793 2791 *
2794 2792 * This function is used primarily to unmap replicated 64K or 512K entries
2795 2793 * from the TSB that are inserted using the base page size TSB pointer, but
2796 2794 * it may also be called to unmap a range of addresses from the TSB.
2797 2795 */
2798 2796 void
2799 2797 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz)
2800 2798 {
2801 2799 struct tsb_info *tsbinfop;
2802 2800 uint64_t tag;
2803 2801 struct tsbe *tsbe_addr;
2804 2802 caddr_t vaddr;
2805 2803 uint64_t tsb_base;
2806 2804 int vpshift, vpgsz;
2807 2805 uint_t tsb_size;
2808 2806 int phys = 0;
2809 2807
2810 2808 /*
2811 2809 * Assumptions:
2812 2810 * If ttesz == 8K, 64K or 512K, we walk through the range 8K
2813 2811 * at a time shooting down any valid entries we encounter.
2814 2812 *
2815 2813 * If ttesz >= 4M we walk the range 4M at a time shooting
2816 2814 * down any valid mappings we find.
2817 2815 */
2818 2816 if (sfmmup == ksfmmup) {
2819 2817 phys = ktsb_phys;
2820 2818 if (ttesz >= TTE4M) {
2821 2819 #ifndef sun4v
2822 2820 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M));
2823 2821 #endif
2824 2822 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base;
2825 2823 tsb_size = ktsb4m_szcode;
2826 2824 } else {
2827 2825 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base;
2828 2826 tsb_size = ktsb_szcode;
2829 2827 }
2830 2828 } else {
2831 2829 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz);
2832 2830
2833 2831 /*
2834 2832 * If there isn't a TSB for this page size, or the TSB is
2835 2833 * swapped out, there is nothing to do. Note that the latter
2836 2834 * case seems impossible but can occur if hat_pageunload()
2837 2835 * is called on an ISM mapping while the process is swapped
2838 2836 * out.
2839 2837 */
2840 2838 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED))
2841 2839 return;
2842 2840
2843 2841 /*
2844 2842 * If another thread is in the middle of relocating a TSB
2845 2843 * we can't unload the entry so set a flag so that the
2846 2844 * TSB will be flushed before it can be accessed by the
2847 2845 * process.
2848 2846 */
2849 2847 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) {
2850 2848 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED;
2851 2849 return;
2852 2850 }
2853 2851 #if defined(UTSB_PHYS)
2854 2852 phys = 1;
2855 2853 tsb_base = (uint64_t)tsbinfop->tsb_pa;
2856 2854 #else
2857 2855 tsb_base = (uint64_t)tsbinfop->tsb_va;
2858 2856 #endif
2859 2857 tsb_size = tsbinfop->tsb_szc;
2860 2858 }
2861 2859 if (ttesz >= TTE4M) {
2862 2860 vpshift = MMU_PAGESHIFT4M;
2863 2861 vpgsz = MMU_PAGESIZE4M;
2864 2862 } else {
2865 2863 vpshift = MMU_PAGESHIFT;
2866 2864 vpgsz = MMU_PAGESIZE;
2867 2865 }
2868 2866
2869 2867 for (vaddr = start; vaddr < end; vaddr += vpgsz) {
2870 2868 tag = sfmmu_make_tsbtag(vaddr);
2871 2869 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size);
2872 2870 sfmmu_unload_tsbe(tsbe_addr, tag, phys);
2873 2871 }
2874 2872 }
2875 2873
2876 2874 /*
2877 2875 * Select the optimum TSB size given the number of mappings
2878 2876 * that need to be cached.
2879 2877 */
2880 2878 static int
2881 2879 sfmmu_select_tsb_szc(pgcnt_t pgcnt)
2882 2880 {
2883 2881 int szc = 0;
2884 2882
2885 2883 #ifdef DEBUG
2886 2884 if (tsb_grow_stress) {
2887 2885 uint32_t randval = (uint32_t)gettick() >> 4;
2888 2886 return (randval % (tsb_max_growsize + 1));
2889 2887 }
2890 2888 #endif /* DEBUG */
2891 2889
2892 2890 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc)))
2893 2891 szc++;
2894 2892 return (szc);
2895 2893 }
2896 2894
2897 2895 /*
2898 2896 * This function will add a translation to the hme_blk and allocate the
2899 2897 * hme_blk if one does not exist.
2900 2898 * If a page structure is specified then it will add the
2901 2899 * corresponding hment to the mapping list.
2902 2900 * It will also update the hmenum field for the tte.
2903 2901 * Furthermore, it attempts to create a large page translation
2904 2902 * for <addr,hat> at page array pps. It assumes addr and first
2905 2903 * pp is correctly aligned. It returns 0 if successful and 1 otherwise.
2906 2904 */
2907 2905 static int
2908 2906 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2909 2907 page_t **pps, uint_t flags, uint_t rid)
2910 2908 {
2911 2909 struct hmehash_bucket *hmebp;
2912 2910 struct hme_blk *hmeblkp;
2913 2911 int ret;
2914 2912 uint_t size;
2915 2913
2916 2914 /*
2917 2915 * Get mapping size.
2918 2916 */
2919 2917 size = TTE_CSZ(ttep);
2920 2918 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2921 2919
2922 2920 /*
2923 2921 * Acquire the hash bucket.
2924 2922 */
2925 2923 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid);
2926 2924 ASSERT(hmebp);
2927 2925
2928 2926 /*
2929 2927 * Find the hment block.
2930 2928 */
2931 2929 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags,
2932 2930 rid);
2933 2931 ASSERT(hmeblkp);
2934 2932
2935 2933 /*
2936 2934 * Add the translation.
2937 2935 */
2938 2936 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags,
2939 2937 rid);
2940 2938
2941 2939 /*
2942 2940 * Release the hash bucket.
2943 2941 */
2944 2942 sfmmu_tteload_release_hashbucket(hmebp);
2945 2943
2946 2944 return (ret);
2947 2945 }
2948 2946
2949 2947 /*
2950 2948 * Function locks and returns a pointer to the hash bucket for vaddr and size.
2951 2949 */
2952 2950 static struct hmehash_bucket *
2953 2951 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size,
2954 2952 uint_t rid)
2955 2953 {
2956 2954 struct hmehash_bucket *hmebp;
2957 2955 int hmeshift;
2958 2956 void *htagid = sfmmutohtagid(sfmmup, rid);
2959 2957
2960 2958 ASSERT(htagid != NULL);
2961 2959
2962 2960 hmeshift = HME_HASH_SHIFT(size);
2963 2961
2964 2962 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift);
2965 2963
2966 2964 SFMMU_HASH_LOCK(hmebp);
2967 2965
2968 2966 return (hmebp);
2969 2967 }
2970 2968
2971 2969 /*
2972 2970 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2973 2971 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2974 2972 * allocated.
2975 2973 */
2976 2974 static struct hme_blk *
2977 2975 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2978 2976 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2979 2977 {
2980 2978 hmeblk_tag hblktag;
2981 2979 int hmeshift;
2982 2980 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2983 2981
2984 2982 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2985 2983
2986 2984 hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2987 2985 ASSERT(hblktag.htag_id != NULL);
2988 2986 hmeshift = HME_HASH_SHIFT(size);
2989 2987 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
2990 2988 hblktag.htag_rehash = HME_HASH_REHASH(size);
2991 2989 hblktag.htag_rid = rid;
2992 2990
2993 2991 ttearray_realloc:
2994 2992
2995 2993 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
2996 2994
2997 2995 /*
2998 2996 * We block until hblk_reserve_lock is released; it's held by
2999 2997 * the thread, temporarily using hblk_reserve, until hblk_reserve is
3000 2998 * replaced by a hblk from sfmmu8_cache.
3001 2999 */
3002 3000 if (hmeblkp == (struct hme_blk *)hblk_reserve &&
3003 3001 hblk_reserve_thread != curthread) {
3004 3002 SFMMU_HASH_UNLOCK(hmebp);
3005 3003 mutex_enter(&hblk_reserve_lock);
3006 3004 mutex_exit(&hblk_reserve_lock);
3007 3005 SFMMU_STAT(sf_hblk_reserve_hit);
3008 3006 SFMMU_HASH_LOCK(hmebp);
3009 3007 goto ttearray_realloc;
3010 3008 }
3011 3009
3012 3010 if (hmeblkp == NULL) {
3013 3011 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3014 3012 hblktag, flags, rid);
3015 3013 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3016 3014 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3017 3015 } else {
3018 3016 /*
3019 3017 * It is possible for 8k and 64k hblks to collide since they
3020 3018 * have the same rehash value. This is because we
3021 3019 * lazily free hblks and 8K/64K blks could be lingering.
3022 3020 * If we find size mismatch we free the block and & try again.
3023 3021 */
3024 3022 if (get_hblk_ttesz(hmeblkp) != size) {
3025 3023 ASSERT(!hmeblkp->hblk_vcnt);
3026 3024 ASSERT(!hmeblkp->hblk_hmecnt);
3027 3025 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3028 3026 &list, 0);
3029 3027 goto ttearray_realloc;
3030 3028 }
3031 3029 if (hmeblkp->hblk_shw_bit) {
3032 3030 /*
3033 3031 * if the hblk was previously used as a shadow hblk then
3034 3032 * we will change it to a normal hblk
3035 3033 */
3036 3034 ASSERT(!hmeblkp->hblk_shared);
3037 3035 if (hmeblkp->hblk_shw_mask) {
3038 3036 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp);
3039 3037 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3040 3038 goto ttearray_realloc;
3041 3039 } else {
3042 3040 hmeblkp->hblk_shw_bit = 0;
3043 3041 }
3044 3042 }
3045 3043 SFMMU_STAT(sf_hblk_hit);
3046 3044 }
3047 3045
3048 3046 /*
3049 3047 * hat_memload() should never call kmem_cache_free() for kernel hmeblks;
3050 3048 * see block comment showing the stacktrace in sfmmu_hblk_alloc();
3051 3049 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will
3052 3050 * just add these hmeblks to the per-cpu pending queue.
3053 3051 */
3054 3052 sfmmu_hblks_list_purge(&list, 1);
3055 3053
3056 3054 ASSERT(get_hblk_ttesz(hmeblkp) == size);
3057 3055 ASSERT(!hmeblkp->hblk_shw_bit);
3058 3056 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3059 3057 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3060 3058 ASSERT(hmeblkp->hblk_tag.htag_rid == rid);
3061 3059
3062 3060 return (hmeblkp);
3063 3061 }
3064 3062
3065 3063 /*
3066 3064 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3067 3065 * otherwise.
3068 3066 */
3069 3067 static int
3070 3068 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3071 3069 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3072 3070 {
3073 3071 page_t *pp = *pps;
3074 3072 int hmenum, size, remap;
3075 3073 tte_t tteold, flush_tte;
3076 3074 #ifdef DEBUG
3077 3075 tte_t orig_old;
3078 3076 #endif /* DEBUG */
3079 3077 struct sf_hment *sfhme;
3080 3078 kmutex_t *pml, *pmtx;
3081 3079 hatlock_t *hatlockp;
3082 3080 int myflt;
3083 3081
3084 3082 /*
3085 3083 * remove this panic when we decide to let user virtual address
3086 3084 * space be >= USERLIMIT.
3087 3085 */
3088 3086 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT)
3089 3087 panic("user addr %p in kernel space", (void *)vaddr);
3090 3088 #if defined(TTE_IS_GLOBAL)
3091 3089 if (TTE_IS_GLOBAL(ttep))
3092 3090 panic("sfmmu_tteload: creating global tte");
3093 3091 #endif
3094 3092
3095 3093 #ifdef DEBUG
3096 3094 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) &&
3097 3095 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans)
3098 3096 panic("sfmmu_tteload: non cacheable memory tte");
3099 3097 #endif /* DEBUG */
3100 3098
3101 3099 /* don't simulate dirty bit for writeable ISM/DISM mappings */
3102 3100 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) {
3103 3101 TTE_SET_REF(ttep);
3104 3102 TTE_SET_MOD(ttep);
3105 3103 }
3106 3104
3107 3105 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) ||
3108 3106 !TTE_IS_MOD(ttep)) {
3109 3107 /*
3110 3108 * Don't load TSB for dummy as in ISM. Also don't preload
3111 3109 * the TSB if the TTE isn't writable since we're likely to
3112 3110 * fault on it again -- preloading can be fairly expensive.
3113 3111 */
3114 3112 flags |= SFMMU_NO_TSBLOAD;
3115 3113 }
3116 3114
3117 3115 size = TTE_CSZ(ttep);
3118 3116 switch (size) {
3119 3117 case TTE8K:
3120 3118 SFMMU_STAT(sf_tteload8k);
3121 3119 break;
3122 3120 case TTE64K:
3123 3121 SFMMU_STAT(sf_tteload64k);
3124 3122 break;
3125 3123 case TTE512K:
3126 3124 SFMMU_STAT(sf_tteload512k);
3127 3125 break;
3128 3126 case TTE4M:
3129 3127 SFMMU_STAT(sf_tteload4m);
3130 3128 break;
3131 3129 case (TTE32M):
3132 3130 SFMMU_STAT(sf_tteload32m);
3133 3131 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3134 3132 break;
3135 3133 case (TTE256M):
3136 3134 SFMMU_STAT(sf_tteload256m);
3137 3135 ASSERT(mmu_page_sizes == max_mmu_page_sizes);
3138 3136 break;
3139 3137 }
3140 3138
3141 3139 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
3142 3140 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
3143 3141 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared);
3144 3142 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared);
3145 3143
3146 3144 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum);
3147 3145
3148 3146 /*
3149 3147 * Need to grab mlist lock here so that pageunload
3150 3148 * will not change tte behind us.
3151 3149 */
3152 3150 if (pp) {
3153 3151 pml = sfmmu_mlist_enter(pp);
3154 3152 }
3155 3153
3156 3154 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3157 3155 /*
3158 3156 * Look for corresponding hment and if valid verify
3159 3157 * pfns are equal.
3160 3158 */
3161 3159 remap = TTE_IS_VALID(&tteold);
3162 3160 if (remap) {
3163 3161 pfn_t new_pfn, old_pfn;
3164 3162
3165 3163 old_pfn = TTE_TO_PFN(vaddr, &tteold);
3166 3164 new_pfn = TTE_TO_PFN(vaddr, ttep);
3167 3165
3168 3166 if (flags & HAT_LOAD_REMAP) {
3169 3167 /* make sure we are remapping same type of pages */
3170 3168 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) {
3171 3169 panic("sfmmu_tteload - tte remap io<->memory");
3172 3170 }
3173 3171 if (old_pfn != new_pfn &&
3174 3172 (pp != NULL || sfhme->hme_page != NULL)) {
3175 3173 panic("sfmmu_tteload - tte remap pp != NULL");
3176 3174 }
3177 3175 } else if (old_pfn != new_pfn) {
3178 3176 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p",
3179 3177 (void *)hmeblkp);
3180 3178 }
3181 3179 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep));
3182 3180 }
3183 3181
3184 3182 if (pp) {
3185 3183 if (size == TTE8K) {
3186 3184 #ifdef VAC
3187 3185 /*
3188 3186 * Handle VAC consistency
3189 3187 */
3190 3188 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) {
3191 3189 sfmmu_vac_conflict(sfmmup, vaddr, pp);
3192 3190 }
3193 3191 #endif
3194 3192
3195 3193 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3196 3194 pmtx = sfmmu_page_enter(pp);
3197 3195 PP_CLRRO(pp);
3198 3196 sfmmu_page_exit(pmtx);
3199 3197 } else if (!PP_ISMAPPED(pp) &&
3200 3198 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) {
3201 3199 pmtx = sfmmu_page_enter(pp);
3202 3200 if (!(PP_ISMOD(pp))) {
3203 3201 PP_SETRO(pp);
3204 3202 }
3205 3203 sfmmu_page_exit(pmtx);
3206 3204 }
3207 3205
3208 3206 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) {
3209 3207 /*
3210 3208 * sfmmu_pagearray_setup failed so return
3211 3209 */
3212 3210 sfmmu_mlist_exit(pml);
3213 3211 return (1);
3214 3212 }
3215 3213 }
3216 3214
3217 3215 /*
3218 3216 * Make sure hment is not on a mapping list.
3219 3217 */
3220 3218 ASSERT(remap || (sfhme->hme_page == NULL));
3221 3219
3222 3220 /* if it is not a remap then hme->next better be NULL */
3223 3221 ASSERT((!remap) ? sfhme->hme_next == NULL : 1);
3224 3222
3225 3223 if (flags & HAT_LOAD_LOCK) {
3226 3224 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) {
3227 3225 panic("too high lckcnt-hmeblk %p",
3228 3226 (void *)hmeblkp);
3229 3227 }
3230 3228 atomic_inc_32(&hmeblkp->hblk_lckcnt);
3231 3229
3232 3230 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK);
3233 3231 }
3234 3232
3235 3233 #ifdef VAC
3236 3234 if (pp && PP_ISNC(pp)) {
3237 3235 /*
3238 3236 * If the physical page is marked to be uncacheable, like
3239 3237 * by a vac conflict, make sure the new mapping is also
3240 3238 * uncacheable.
3241 3239 */
3242 3240 TTE_CLR_VCACHEABLE(ttep);
3243 3241 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
3244 3242 }
3245 3243 #endif
3246 3244 ttep->tte_hmenum = hmenum;
3247 3245
3248 3246 #ifdef DEBUG
3249 3247 orig_old = tteold;
3250 3248 #endif /* DEBUG */
3251 3249
3252 3250 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) {
3253 3251 if ((sfmmup == KHATID) &&
3254 3252 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) {
3255 3253 sfmmu_copytte(&sfhme->hme_tte, &tteold);
3256 3254 }
3257 3255 #ifdef DEBUG
3258 3256 chk_tte(&orig_old, &tteold, ttep, hmeblkp);
3259 3257 #endif /* DEBUG */
3260 3258 }
3261 3259 ASSERT(TTE_IS_VALID(&sfhme->hme_tte));
3262 3260
3263 3261 if (!TTE_IS_VALID(&tteold)) {
3264 3262
3265 3263 atomic_inc_16(&hmeblkp->hblk_vcnt);
3266 3264 if (rid == SFMMU_INVALID_SHMERID) {
3267 3265 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]);
3268 3266 } else {
3269 3267 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
3270 3268 sf_region_t *rgnp = srdp->srd_hmergnp[rid];
3271 3269 /*
3272 3270 * We already accounted for region ttecnt's in sfmmu
3273 3271 * during hat_join_region() processing. Here we
3274 3272 * only update ttecnt's in region struture.
3275 3273 */
3276 3274 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]);
3277 3275 }
3278 3276 }
3279 3277
3280 3278 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup);
3281 3279 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 &&
3282 3280 sfmmup != ksfmmup) {
3283 3281 uchar_t tteflag = 1 << size;
3284 3282 if (rid == SFMMU_INVALID_SHMERID) {
3285 3283 if (!(sfmmup->sfmmu_tteflags & tteflag)) {
3286 3284 hatlockp = sfmmu_hat_enter(sfmmup);
3287 3285 sfmmup->sfmmu_tteflags |= tteflag;
3288 3286 sfmmu_hat_exit(hatlockp);
3289 3287 }
3290 3288 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) {
3291 3289 hatlockp = sfmmu_hat_enter(sfmmup);
3292 3290 sfmmup->sfmmu_rtteflags |= tteflag;
3293 3291 sfmmu_hat_exit(hatlockp);
3294 3292 }
3295 3293 /*
3296 3294 * Update the current CPU tsbmiss area, so the current thread
3297 3295 * won't need to take the tsbmiss for the new pagesize.
3298 3296 * The other threads in the process will update their tsb
3299 3297 * miss area lazily in sfmmu_tsbmiss_exception() when they
3300 3298 * fail to find the translation for a newly added pagesize.
3301 3299 */
3302 3300 if (size > TTE64K && myflt) {
3303 3301 struct tsbmiss *tsbmp;
3304 3302 kpreempt_disable();
3305 3303 tsbmp = &tsbmiss_area[CPU->cpu_id];
3306 3304 if (rid == SFMMU_INVALID_SHMERID) {
3307 3305 if (!(tsbmp->uhat_tteflags & tteflag)) {
3308 3306 tsbmp->uhat_tteflags |= tteflag;
3309 3307 }
3310 3308 } else {
3311 3309 if (!(tsbmp->uhat_rtteflags & tteflag)) {
3312 3310 tsbmp->uhat_rtteflags |= tteflag;
3313 3311 }
3314 3312 }
3315 3313 kpreempt_enable();
3316 3314 }
3317 3315 }
3318 3316
3319 3317 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) &&
3320 3318 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
3321 3319 hatlockp = sfmmu_hat_enter(sfmmup);
3322 3320 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
3323 3321 sfmmu_hat_exit(hatlockp);
3324 3322 }
3325 3323
3326 3324 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) &
3327 3325 hw_tte.tte_intlo;
3328 3326 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) &
3329 3327 hw_tte.tte_inthi;
3330 3328
3331 3329 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) {
3332 3330 /*
3333 3331 * If remap and new tte differs from old tte we need
3334 3332 * to sync the mod bit and flush TLB/TSB. We don't
3335 3333 * need to sync ref bit because we currently always set
3336 3334 * ref bit in tteload.
3337 3335 */
3338 3336 ASSERT(TTE_IS_REF(ttep));
3339 3337 if (TTE_IS_MOD(&tteold)) {
3340 3338 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp);
3341 3339 }
3342 3340 /*
3343 3341 * hwtte bits shouldn't change for SRD hmeblks as long as SRD
3344 3342 * hmes are only used for read only text. Adding this code for
3345 3343 * completeness and future use of shared hmeblks with writable
3346 3344 * mappings of VMODSORT vnodes.
3347 3345 */
3348 3346 if (hmeblkp->hblk_shared) {
3349 3347 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr,
3350 3348 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1);
3351 3349 xt_sync(cpuset);
3352 3350 SFMMU_STAT_ADD(sf_region_remap_demap, 1);
3353 3351 } else {
3354 3352 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0);
3355 3353 xt_sync(sfmmup->sfmmu_cpusran);
3356 3354 }
3357 3355 }
3358 3356
3359 3357 if ((flags & SFMMU_NO_TSBLOAD) == 0) {
3360 3358 /*
3361 3359 * We only preload 8K and 4M mappings into the TSB, since
3362 3360 * 64K and 512K mappings are replicated and hence don't
3363 3361 * have a single, unique TSB entry. Ditto for 32M/256M.
3364 3362 */
3365 3363 if (size == TTE8K || size == TTE4M) {
3366 3364 sf_scd_t *scdp;
3367 3365 hatlockp = sfmmu_hat_enter(sfmmup);
3368 3366 /*
3369 3367 * Don't preload private TSB if the mapping is used
3370 3368 * by the shctx in the SCD.
3371 3369 */
3372 3370 scdp = sfmmup->sfmmu_scdp;
3373 3371 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL ||
3374 3372 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
3375 3373 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte,
3376 3374 size);
3377 3375 }
3378 3376 sfmmu_hat_exit(hatlockp);
3379 3377 }
3380 3378 }
3381 3379 if (pp) {
3382 3380 if (!remap) {
3383 3381 HME_ADD(sfhme, pp);
3384 3382 atomic_inc_16(&hmeblkp->hblk_hmecnt);
3385 3383 ASSERT(hmeblkp->hblk_hmecnt > 0);
3386 3384
3387 3385 /*
3388 3386 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
3389 3387 * see pageunload() for comment.
3390 3388 */
3391 3389 }
3392 3390 sfmmu_mlist_exit(pml);
3393 3391 }
3394 3392
3395 3393 return (0);
3396 3394 }
3397 3395 /*
3398 3396 * Function unlocks hash bucket.
3399 3397 */
3400 3398 static void
3401 3399 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp)
3402 3400 {
3403 3401 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3404 3402 SFMMU_HASH_UNLOCK(hmebp);
3405 3403 }
3406 3404
3407 3405 /*
3408 3406 * function which checks and sets up page array for a large
3409 3407 * translation. Will set p_vcolor, p_index, p_ro fields.
3410 3408 * Assumes addr and pfnum of first page are properly aligned.
3411 3409 * Will check for physical contiguity. If check fails it return
3412 3410 * non null.
3413 3411 */
3414 3412 static int
3415 3413 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3416 3414 {
3417 3415 int i, index, ttesz;
3418 3416 pfn_t pfnum;
3419 3417 pgcnt_t npgs;
3420 3418 page_t *pp, *pp1;
3421 3419 kmutex_t *pmtx;
3422 3420 #ifdef VAC
3423 3421 int osz;
3424 3422 int cflags = 0;
3425 3423 int vac_err = 0;
3426 3424 #endif
3427 3425 int newidx = 0;
3428 3426
3429 3427 ttesz = TTE_CSZ(ttep);
3430 3428
3431 3429 ASSERT(ttesz > TTE8K);
3432 3430
3433 3431 npgs = TTEPAGES(ttesz);
3434 3432 index = PAGESZ_TO_INDEX(ttesz);
3435 3433
3436 3434 pfnum = (*pps)->p_pagenum;
3437 3435 ASSERT(IS_P2ALIGNED(pfnum, npgs));
3438 3436
3439 3437 /*
3440 3438 * Save the first pp so we can do HAT_TMPNC at the end.
3441 3439 */
3442 3440 pp1 = *pps;
3443 3441 #ifdef VAC
3444 3442 osz = fnd_mapping_sz(pp1);
3445 3443 #endif
3446 3444
3447 3445 for (i = 0; i < npgs; i++, pps++) {
3448 3446 pp = *pps;
3449 3447 ASSERT(PAGE_LOCKED(pp));
3450 3448 ASSERT(pp->p_szc >= ttesz);
3451 3449 ASSERT(pp->p_szc == pp1->p_szc);
3452 3450 ASSERT(sfmmu_mlist_held(pp));
3453 3451
3454 3452 /*
3455 3453 * XXX is it possible to maintain P_RO on the root only?
3456 3454 */
3457 3455 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) {
3458 3456 pmtx = sfmmu_page_enter(pp);
3459 3457 PP_CLRRO(pp);
3460 3458 sfmmu_page_exit(pmtx);
3461 3459 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) &&
3462 3460 !PP_ISMOD(pp)) {
3463 3461 pmtx = sfmmu_page_enter(pp);
3464 3462 if (!(PP_ISMOD(pp))) {
3465 3463 PP_SETRO(pp);
3466 3464 }
3467 3465 sfmmu_page_exit(pmtx);
3468 3466 }
3469 3467
3470 3468 /*
3471 3469 * If this is a remap we skip vac & contiguity checks.
3472 3470 */
3473 3471 if (remap)
3474 3472 continue;
3475 3473
3476 3474 /*
3477 3475 * set p_vcolor and detect any vac conflicts.
3478 3476 */
3479 3477 #ifdef VAC
3480 3478 if (vac_err == 0) {
3481 3479 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags);
3482 3480
3483 3481 }
3484 3482 #endif
3485 3483
3486 3484 /*
3487 3485 * Save current index in case we need to undo it.
3488 3486 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))"
3489 3487 * "SFMMU_INDEX_SHIFT 6"
3490 3488 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)"
3491 3489 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)"
3492 3490 *
3493 3491 * So: index = PAGESZ_TO_INDEX(ttesz);
3494 3492 * if ttesz == 1 then index = 0x2
3495 3493 * 2 then index = 0x4
3496 3494 * 3 then index = 0x8
3497 3495 * 4 then index = 0x10
3498 3496 * 5 then index = 0x20
3499 3497 * The code below checks if it's a new pagesize (ie, newidx)
3500 3498 * in case we need to take it back out of p_index,
3501 3499 * and then or's the new index into the existing index.
3502 3500 */
3503 3501 if ((PP_MAPINDEX(pp) & index) == 0)
3504 3502 newidx = 1;
3505 3503 pp->p_index = (PP_MAPINDEX(pp) | index);
3506 3504
3507 3505 /*
3508 3506 * contiguity check
3509 3507 */
3510 3508 if (pp->p_pagenum != pfnum) {
3511 3509 /*
3512 3510 * If we fail the contiguity test then
3513 3511 * the only thing we need to fix is the p_index field.
3514 3512 * We might get a few extra flushes but since this
3515 3513 * path is rare that is ok. The p_ro field will
3516 3514 * get automatically fixed on the next tteload to
3517 3515 * the page. NO TNC bit is set yet.
3518 3516 */
3519 3517 while (i >= 0) {
3520 3518 pp = *pps;
3521 3519 if (newidx)
3522 3520 pp->p_index = (PP_MAPINDEX(pp) &
3523 3521 ~index);
3524 3522 pps--;
3525 3523 i--;
3526 3524 }
3527 3525 return (1);
3528 3526 }
3529 3527 pfnum++;
3530 3528 addr += MMU_PAGESIZE;
3531 3529 }
3532 3530
3533 3531 #ifdef VAC
3534 3532 if (vac_err) {
3535 3533 if (ttesz > osz) {
3536 3534 /*
3537 3535 * There are some smaller mappings that causes vac
3538 3536 * conflicts. Convert all existing small mappings to
3539 3537 * TNC.
3540 3538 */
3541 3539 SFMMU_STAT_ADD(sf_uncache_conflict, npgs);
3542 3540 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH,
3543 3541 npgs);
3544 3542 } else {
3545 3543 /* EMPTY */
3546 3544 /*
3547 3545 * If there exists an big page mapping,
3548 3546 * that means the whole existing big page
3549 3547 * has TNC setting already. No need to covert to
3550 3548 * TNC again.
3551 3549 */
3552 3550 ASSERT(PP_ISTNC(pp1));
3553 3551 }
3554 3552 }
3555 3553 #endif /* VAC */
3556 3554
3557 3555 return (0);
3558 3556 }
3559 3557
3560 3558 #ifdef VAC
3561 3559 /*
3562 3560 * Routine that detects vac consistency for a large page. It also
3563 3561 * sets virtual color for all pp's for this big mapping.
3564 3562 */
3565 3563 static int
3566 3564 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags)
3567 3565 {
3568 3566 int vcolor, ocolor;
3569 3567
3570 3568 ASSERT(sfmmu_mlist_held(pp));
3571 3569
3572 3570 if (PP_ISNC(pp)) {
3573 3571 return (HAT_TMPNC);
3574 3572 }
3575 3573
3576 3574 vcolor = addr_to_vcolor(addr);
3577 3575 if (PP_NEWPAGE(pp)) {
3578 3576 PP_SET_VCOLOR(pp, vcolor);
3579 3577 return (0);
3580 3578 }
3581 3579
3582 3580 ocolor = PP_GET_VCOLOR(pp);
3583 3581 if (ocolor == vcolor) {
3584 3582 return (0);
3585 3583 }
3586 3584
3587 3585 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
3588 3586 /*
3589 3587 * Previous user of page had a differnet color
3590 3588 * but since there are no current users
3591 3589 * we just flush the cache and change the color.
3592 3590 * As an optimization for large pages we flush the
3593 3591 * entire cache of that color and set a flag.
3594 3592 */
3595 3593 SFMMU_STAT(sf_pgcolor_conflict);
3596 3594 if (!CacheColor_IsFlushed(*cflags, ocolor)) {
3597 3595 CacheColor_SetFlushed(*cflags, ocolor);
3598 3596 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum);
3599 3597 }
3600 3598 PP_SET_VCOLOR(pp, vcolor);
3601 3599 return (0);
3602 3600 }
3603 3601
3604 3602 /*
3605 3603 * We got a real conflict with a current mapping.
3606 3604 * set flags to start unencaching all mappings
3607 3605 * and return failure so we restart looping
3608 3606 * the pp array from the beginning.
3609 3607 */
3610 3608 return (HAT_TMPNC);
3611 3609 }
3612 3610 #endif /* VAC */
3613 3611
3614 3612 /*
3615 3613 * creates a large page shadow hmeblk for a tte.
3616 3614 * The purpose of this routine is to allow us to do quick unloads because
3617 3615 * the vm layer can easily pass a very large but sparsely populated range.
3618 3616 */
3619 3617 static struct hme_blk *
3620 3618 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags)
3621 3619 {
3622 3620 struct hmehash_bucket *hmebp;
3623 3621 hmeblk_tag hblktag;
3624 3622 int hmeshift, size, vshift;
3625 3623 uint_t shw_mask, newshw_mask;
3626 3624 struct hme_blk *hmeblkp;
3627 3625
3628 3626 ASSERT(sfmmup != KHATID);
3629 3627 if (mmu_page_sizes == max_mmu_page_sizes) {
3630 3628 ASSERT(ttesz < TTE256M);
3631 3629 } else {
3632 3630 ASSERT(ttesz < TTE4M);
3633 3631 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
3634 3632 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
3635 3633 }
3636 3634
3637 3635 if (ttesz == TTE8K) {
3638 3636 size = TTE512K;
3639 3637 } else {
3640 3638 size = ++ttesz;
3641 3639 }
3642 3640
3643 3641 hblktag.htag_id = sfmmup;
3644 3642 hmeshift = HME_HASH_SHIFT(size);
3645 3643 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
3646 3644 hblktag.htag_rehash = HME_HASH_REHASH(size);
3647 3645 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3648 3646 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
3649 3647
3650 3648 SFMMU_HASH_LOCK(hmebp);
3651 3649
3652 3650 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
3653 3651 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
3654 3652 if (hmeblkp == NULL) {
3655 3653 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size,
3656 3654 hblktag, flags, SFMMU_INVALID_SHMERID);
3657 3655 }
3658 3656 ASSERT(hmeblkp);
3659 3657 if (!hmeblkp->hblk_shw_mask) {
3660 3658 /*
3661 3659 * if this is a unused hblk it was just allocated or could
3662 3660 * potentially be a previous large page hblk so we need to
3663 3661 * set the shadow bit.
3664 3662 */
3665 3663 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3666 3664 hmeblkp->hblk_shw_bit = 1;
3667 3665 } else if (hmeblkp->hblk_shw_bit == 0) {
3668 3666 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p",
3669 3667 (void *)hmeblkp);
3670 3668 }
3671 3669 ASSERT(hmeblkp->hblk_shw_bit == 1);
3672 3670 ASSERT(!hmeblkp->hblk_shared);
3673 3671 vshift = vaddr_to_vshift(hblktag, vaddr, size);
3674 3672 ASSERT(vshift < 8);
3675 3673 /*
3676 3674 * Atomically set shw mask bit
3677 3675 */
3678 3676 do {
3679 3677 shw_mask = hmeblkp->hblk_shw_mask;
3680 3678 newshw_mask = shw_mask | (1 << vshift);
3681 3679 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask,
3682 3680 newshw_mask);
3683 3681 } while (newshw_mask != shw_mask);
3684 3682
3685 3683 SFMMU_HASH_UNLOCK(hmebp);
3686 3684
3687 3685 return (hmeblkp);
3688 3686 }
3689 3687
3690 3688 /*
3691 3689 * This routine cleanup a previous shadow hmeblk and changes it to
3692 3690 * a regular hblk. This happens rarely but it is possible
3693 3691 * when a process wants to use large pages and there are hblks still
3694 3692 * lying around from the previous as that used these hmeblks.
3695 3693 * The alternative was to cleanup the shadow hblks at unload time
3696 3694 * but since so few user processes actually use large pages, it is
3697 3695 * better to be lazy and cleanup at this time.
3698 3696 */
3699 3697 static void
3700 3698 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3701 3699 struct hmehash_bucket *hmebp)
3702 3700 {
3703 3701 caddr_t addr, endaddr;
3704 3702 int hashno, size;
3705 3703
3706 3704 ASSERT(hmeblkp->hblk_shw_bit);
3707 3705 ASSERT(!hmeblkp->hblk_shared);
3708 3706
3709 3707 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3710 3708
3711 3709 if (!hmeblkp->hblk_shw_mask) {
3712 3710 hmeblkp->hblk_shw_bit = 0;
3713 3711 return;
3714 3712 }
3715 3713 addr = (caddr_t)get_hblk_base(hmeblkp);
3716 3714 endaddr = get_hblk_endaddr(hmeblkp);
3717 3715 size = get_hblk_ttesz(hmeblkp);
3718 3716 hashno = size - 1;
3719 3717 ASSERT(hashno > 0);
3720 3718 SFMMU_HASH_UNLOCK(hmebp);
3721 3719
3722 3720 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3723 3721
3724 3722 SFMMU_HASH_LOCK(hmebp);
3725 3723 }
3726 3724
3727 3725 static void
3728 3726 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3729 3727 int hashno)
3730 3728 {
3731 3729 int hmeshift, shadow = 0;
3732 3730 hmeblk_tag hblktag;
3733 3731 struct hmehash_bucket *hmebp;
3734 3732 struct hme_blk *hmeblkp;
3735 3733 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3736 3734
3737 3735 ASSERT(hashno > 0);
3738 3736 hblktag.htag_id = sfmmup;
3739 3737 hblktag.htag_rehash = hashno;
3740 3738 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3741 3739
3742 3740 hmeshift = HME_HASH_SHIFT(hashno);
3743 3741
3744 3742 while (addr < endaddr) {
3745 3743 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3746 3744 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3747 3745 SFMMU_HASH_LOCK(hmebp);
3748 3746 /* inline HME_HASH_SEARCH */
3749 3747 hmeblkp = hmebp->hmeblkp;
3750 3748 pr_hblk = NULL;
3751 3749 while (hmeblkp) {
3752 3750 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) {
3753 3751 /* found hme_blk */
3754 3752 ASSERT(!hmeblkp->hblk_shared);
3755 3753 if (hmeblkp->hblk_shw_bit) {
3756 3754 if (hmeblkp->hblk_shw_mask) {
3757 3755 shadow = 1;
3758 3756 sfmmu_shadow_hcleanup(sfmmup,
3759 3757 hmeblkp, hmebp);
3760 3758 break;
3761 3759 } else {
3762 3760 hmeblkp->hblk_shw_bit = 0;
3763 3761 }
3764 3762 }
3765 3763
3766 3764 /*
3767 3765 * Hblk_hmecnt and hblk_vcnt could be non zero
3768 3766 * since hblk_unload() does not gurantee that.
3769 3767 *
3770 3768 * XXX - this could cause tteload() to spin
3771 3769 * where sfmmu_shadow_hcleanup() is called.
3772 3770 */
3773 3771 }
3774 3772
3775 3773 nx_hblk = hmeblkp->hblk_next;
3776 3774 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
3777 3775 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3778 3776 &list, 0);
3779 3777 } else {
3780 3778 pr_hblk = hmeblkp;
3781 3779 }
3782 3780 hmeblkp = nx_hblk;
3783 3781 }
3784 3782
3785 3783 SFMMU_HASH_UNLOCK(hmebp);
3786 3784
3787 3785 if (shadow) {
3788 3786 /*
3789 3787 * We found another shadow hblk so cleaned its
3790 3788 * children. We need to go back and cleanup
3791 3789 * the original hblk so we don't change the
3792 3790 * addr.
3793 3791 */
3794 3792 shadow = 0;
3795 3793 } else {
3796 3794 addr = (caddr_t)roundup((uintptr_t)addr + 1,
3797 3795 (1 << hmeshift));
3798 3796 }
3799 3797 }
3800 3798 sfmmu_hblks_list_purge(&list, 0);
3801 3799 }
3802 3800
3803 3801 /*
3804 3802 * This routine's job is to delete stale invalid shared hmeregions hmeblks that
3805 3803 * may still linger on after pageunload.
3806 3804 */
3807 3805 static void
3808 3806 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz)
3809 3807 {
3810 3808 int hmeshift;
3811 3809 hmeblk_tag hblktag;
3812 3810 struct hmehash_bucket *hmebp;
3813 3811 struct hme_blk *hmeblkp;
3814 3812 struct hme_blk *pr_hblk;
3815 3813 struct hme_blk *list = NULL;
3816 3814
3817 3815 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3818 3816 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3819 3817
3820 3818 hmeshift = HME_HASH_SHIFT(ttesz);
3821 3819 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3822 3820 hblktag.htag_rehash = ttesz;
3823 3821 hblktag.htag_rid = rid;
3824 3822 hblktag.htag_id = srdp;
3825 3823 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3826 3824
3827 3825 SFMMU_HASH_LOCK(hmebp);
3828 3826 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3829 3827 if (hmeblkp != NULL) {
3830 3828 ASSERT(hmeblkp->hblk_shared);
3831 3829 ASSERT(!hmeblkp->hblk_shw_bit);
3832 3830 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3833 3831 panic("sfmmu_cleanup_rhblk: valid hmeblk");
3834 3832 }
3835 3833 ASSERT(!hmeblkp->hblk_lckcnt);
3836 3834 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3837 3835 &list, 0);
3838 3836 }
3839 3837 SFMMU_HASH_UNLOCK(hmebp);
3840 3838 sfmmu_hblks_list_purge(&list, 0);
3841 3839 }
3842 3840
3843 3841 /* ARGSUSED */
3844 3842 static void
3845 3843 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
3846 3844 size_t r_size, void *r_obj, u_offset_t r_objoff)
3847 3845 {
3848 3846 }
3849 3847
3850 3848 /*
3851 3849 * Searches for an hmeblk which maps addr, then unloads this mapping
3852 3850 * and updates *eaddrp, if the hmeblk is found.
3853 3851 */
3854 3852 static void
3855 3853 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr,
3856 3854 caddr_t eaddr, int ttesz, caddr_t *eaddrp)
3857 3855 {
3858 3856 int hmeshift;
3859 3857 hmeblk_tag hblktag;
3860 3858 struct hmehash_bucket *hmebp;
3861 3859 struct hme_blk *hmeblkp;
3862 3860 struct hme_blk *pr_hblk;
3863 3861 struct hme_blk *list = NULL;
3864 3862
3865 3863 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3866 3864 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3867 3865 ASSERT(ttesz >= HBLK_MIN_TTESZ);
3868 3866
3869 3867 hmeshift = HME_HASH_SHIFT(ttesz);
3870 3868 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3871 3869 hblktag.htag_rehash = ttesz;
3872 3870 hblktag.htag_rid = rid;
3873 3871 hblktag.htag_id = srdp;
3874 3872 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift);
3875 3873
3876 3874 SFMMU_HASH_LOCK(hmebp);
3877 3875 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
3878 3876 if (hmeblkp != NULL) {
3879 3877 ASSERT(hmeblkp->hblk_shared);
3880 3878 ASSERT(!hmeblkp->hblk_lckcnt);
3881 3879 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
3882 3880 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr,
3883 3881 eaddr, NULL, HAT_UNLOAD);
3884 3882 ASSERT(*eaddrp > addr);
3885 3883 }
3886 3884 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt);
3887 3885 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
3888 3886 &list, 0);
3889 3887 }
3890 3888 SFMMU_HASH_UNLOCK(hmebp);
3891 3889 sfmmu_hblks_list_purge(&list, 0);
3892 3890 }
3893 3891
3894 3892 static void
3895 3893 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp)
3896 3894 {
3897 3895 int ttesz = rgnp->rgn_pgszc;
3898 3896 size_t rsz = rgnp->rgn_size;
3899 3897 caddr_t rsaddr = rgnp->rgn_saddr;
3900 3898 caddr_t readdr = rsaddr + rsz;
3901 3899 caddr_t rhsaddr;
3902 3900 caddr_t va;
3903 3901 uint_t rid = rgnp->rgn_id;
3904 3902 caddr_t cbsaddr;
3905 3903 caddr_t cbeaddr;
3906 3904 hat_rgn_cb_func_t rcbfunc;
3907 3905 ulong_t cnt;
3908 3906
3909 3907 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
3910 3908 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
3911 3909
3912 3910 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz)));
3913 3911 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz)));
3914 3912 if (ttesz < HBLK_MIN_TTESZ) {
3915 3913 ttesz = HBLK_MIN_TTESZ;
3916 3914 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES);
3917 3915 } else {
3918 3916 rhsaddr = rsaddr;
3919 3917 }
3920 3918
3921 3919 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) {
3922 3920 rcbfunc = sfmmu_rgn_cb_noop;
3923 3921 }
3924 3922
3925 3923 while (ttesz >= HBLK_MIN_TTESZ) {
3926 3924 cbsaddr = rsaddr;
3927 3925 cbeaddr = rsaddr;
3928 3926 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
3929 3927 ttesz--;
3930 3928 continue;
3931 3929 }
3932 3930 cnt = 0;
3933 3931 va = rsaddr;
3934 3932 while (va < readdr) {
3935 3933 ASSERT(va >= rhsaddr);
3936 3934 if (va != cbeaddr) {
3937 3935 if (cbeaddr != cbsaddr) {
3938 3936 ASSERT(cbeaddr > cbsaddr);
3939 3937 (*rcbfunc)(cbsaddr, cbeaddr,
3940 3938 rsaddr, rsz, rgnp->rgn_obj,
3941 3939 rgnp->rgn_objoff);
3942 3940 }
3943 3941 cbsaddr = va;
3944 3942 cbeaddr = va;
3945 3943 }
3946 3944 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr,
3947 3945 ttesz, &cbeaddr);
3948 3946 cnt++;
3949 3947 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz));
3950 3948 }
3951 3949 if (cbeaddr != cbsaddr) {
3952 3950 ASSERT(cbeaddr > cbsaddr);
3953 3951 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr,
3954 3952 rsz, rgnp->rgn_obj,
3955 3953 rgnp->rgn_objoff);
3956 3954 }
3957 3955 ttesz--;
3958 3956 }
3959 3957 }
3960 3958
3961 3959 /*
3962 3960 * Release one hardware address translation lock on the given address range.
3963 3961 */
3964 3962 void
3965 3963 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
↓ open down ↓ |
1648 lines elided |
↑ open up ↑ |
3966 3964 {
3967 3965 struct hmehash_bucket *hmebp;
3968 3966 hmeblk_tag hblktag;
3969 3967 int hmeshift, hashno = 1;
3970 3968 struct hme_blk *hmeblkp, *list = NULL;
3971 3969 caddr_t endaddr;
3972 3970
3973 3971 ASSERT(sfmmup != NULL);
3974 3972 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3975 3973
3976 - ASSERT((sfmmup == ksfmmup) ||
3977 - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3974 + ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
3978 3975 ASSERT((len & MMU_PAGEOFFSET) == 0);
3979 3976 endaddr = addr + len;
3980 3977 hblktag.htag_id = sfmmup;
3981 3978 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3982 3979
3983 3980 /*
3984 3981 * Spitfire supports 4 page sizes.
3985 3982 * Most pages are expected to be of the smallest page size (8K) and
3986 3983 * these will not need to be rehashed. 64K pages also don't need to be
3987 3984 * rehashed because an hmeblk spans 64K of address space. 512K pages
3988 3985 * might need 1 rehash and and 4M pages might need 2 rehashes.
3989 3986 */
3990 3987 while (addr < endaddr) {
3991 3988 hmeshift = HME_HASH_SHIFT(hashno);
3992 3989 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
3993 3990 hblktag.htag_rehash = hashno;
3994 3991 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
3995 3992
3996 3993 SFMMU_HASH_LOCK(hmebp);
3997 3994
3998 3995 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
3999 3996 if (hmeblkp != NULL) {
4000 3997 ASSERT(!hmeblkp->hblk_shared);
4001 3998 /*
4002 3999 * If we encounter a shadow hmeblk then
4003 4000 * we know there are no valid hmeblks mapping
4004 4001 * this address at this size or larger.
4005 4002 * Just increment address by the smallest
4006 4003 * page size.
4007 4004 */
4008 4005 if (hmeblkp->hblk_shw_bit) {
4009 4006 addr += MMU_PAGESIZE;
4010 4007 } else {
4011 4008 addr = sfmmu_hblk_unlock(hmeblkp, addr,
4012 4009 endaddr);
4013 4010 }
4014 4011 SFMMU_HASH_UNLOCK(hmebp);
4015 4012 hashno = 1;
4016 4013 continue;
4017 4014 }
4018 4015 SFMMU_HASH_UNLOCK(hmebp);
4019 4016
4020 4017 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4021 4018 /*
4022 4019 * We have traversed the whole list and rehashed
4023 4020 * if necessary without finding the address to unlock
4024 4021 * which should never happen.
4025 4022 */
4026 4023 panic("sfmmu_unlock: addr not found. "
4027 4024 "addr %p hat %p", (void *)addr, (void *)sfmmup);
4028 4025 } else {
4029 4026 hashno++;
4030 4027 }
4031 4028 }
4032 4029
4033 4030 sfmmu_hblks_list_purge(&list, 0);
4034 4031 }
4035 4032
4036 4033 void
4037 4034 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len,
4038 4035 hat_region_cookie_t rcookie)
4039 4036 {
4040 4037 sf_srd_t *srdp;
4041 4038 sf_region_t *rgnp;
4042 4039 int ttesz;
4043 4040 uint_t rid;
4044 4041 caddr_t eaddr;
4045 4042 caddr_t va;
4046 4043 int hmeshift;
4047 4044 hmeblk_tag hblktag;
4048 4045 struct hmehash_bucket *hmebp;
4049 4046 struct hme_blk *hmeblkp;
4050 4047 struct hme_blk *pr_hblk;
4051 4048 struct hme_blk *list;
4052 4049
4053 4050 if (rcookie == HAT_INVALID_REGION_COOKIE) {
4054 4051 hat_unlock(sfmmup, addr, len);
4055 4052 return;
4056 4053 }
4057 4054
4058 4055 ASSERT(sfmmup != NULL);
4059 4056 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4060 4057 ASSERT(sfmmup != ksfmmup);
4061 4058
4062 4059 srdp = sfmmup->sfmmu_srdp;
4063 4060 rid = (uint_t)((uint64_t)rcookie);
4064 4061 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4065 4062 eaddr = addr + len;
4066 4063 va = addr;
4067 4064 list = NULL;
4068 4065 rgnp = srdp->srd_hmergnp[rid];
4069 4066 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
4070 4067
4071 4068 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc)));
4072 4069 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc)));
4073 4070 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) {
4074 4071 ttesz = HBLK_MIN_TTESZ;
4075 4072 } else {
4076 4073 ttesz = rgnp->rgn_pgszc;
4077 4074 }
4078 4075 while (va < eaddr) {
4079 4076 while (ttesz < rgnp->rgn_pgszc &&
4080 4077 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) {
4081 4078 ttesz++;
4082 4079 }
4083 4080 while (ttesz >= HBLK_MIN_TTESZ) {
4084 4081 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) {
4085 4082 ttesz--;
4086 4083 continue;
4087 4084 }
4088 4085 hmeshift = HME_HASH_SHIFT(ttesz);
4089 4086 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift);
4090 4087 hblktag.htag_rehash = ttesz;
4091 4088 hblktag.htag_rid = rid;
4092 4089 hblktag.htag_id = srdp;
4093 4090 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift);
4094 4091 SFMMU_HASH_LOCK(hmebp);
4095 4092 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk,
4096 4093 &list);
4097 4094 if (hmeblkp == NULL) {
4098 4095 SFMMU_HASH_UNLOCK(hmebp);
4099 4096 ttesz--;
4100 4097 continue;
4101 4098 }
4102 4099 ASSERT(hmeblkp->hblk_shared);
4103 4100 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr);
4104 4101 ASSERT(va >= eaddr ||
4105 4102 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz)));
4106 4103 SFMMU_HASH_UNLOCK(hmebp);
4107 4104 break;
4108 4105 }
4109 4106 if (ttesz < HBLK_MIN_TTESZ) {
4110 4107 panic("hat_unlock_region: addr not found "
4111 4108 "addr %p hat %p", (void *)va, (void *)sfmmup);
4112 4109 }
4113 4110 }
4114 4111 sfmmu_hblks_list_purge(&list, 0);
4115 4112 }
4116 4113
4117 4114 /*
4118 4115 * Function to unlock a range of addresses in an hmeblk. It returns the
4119 4116 * next address that needs to be unlocked.
4120 4117 * Should be called with the hash lock held.
4121 4118 */
4122 4119 static caddr_t
4123 4120 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr)
4124 4121 {
4125 4122 struct sf_hment *sfhme;
4126 4123 tte_t tteold, ttemod;
4127 4124 int ttesz, ret;
4128 4125
4129 4126 ASSERT(in_hblk_range(hmeblkp, addr));
4130 4127 ASSERT(hmeblkp->hblk_shw_bit == 0);
4131 4128
4132 4129 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
4133 4130 ttesz = get_hblk_ttesz(hmeblkp);
4134 4131
4135 4132 HBLKTOHME(sfhme, hmeblkp, addr);
4136 4133 while (addr < endaddr) {
4137 4134 readtte:
4138 4135 sfmmu_copytte(&sfhme->hme_tte, &tteold);
4139 4136 if (TTE_IS_VALID(&tteold)) {
4140 4137
4141 4138 ttemod = tteold;
4142 4139
4143 4140 ret = sfmmu_modifytte_try(&tteold, &ttemod,
4144 4141 &sfhme->hme_tte);
4145 4142
4146 4143 if (ret < 0)
4147 4144 goto readtte;
4148 4145
4149 4146 if (hmeblkp->hblk_lckcnt == 0)
4150 4147 panic("zero hblk lckcnt");
4151 4148
4152 4149 if (((uintptr_t)addr + TTEBYTES(ttesz)) >
4153 4150 (uintptr_t)endaddr)
4154 4151 panic("can't unlock large tte");
4155 4152
4156 4153 ASSERT(hmeblkp->hblk_lckcnt > 0);
4157 4154 atomic_dec_32(&hmeblkp->hblk_lckcnt);
4158 4155 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
4159 4156 } else {
4160 4157 panic("sfmmu_hblk_unlock: invalid tte");
4161 4158 }
4162 4159 addr += TTEBYTES(ttesz);
4163 4160 sfhme++;
4164 4161 }
4165 4162 return (addr);
4166 4163 }
4167 4164
4168 4165 /*
4169 4166 * Physical Address Mapping Framework
4170 4167 *
4171 4168 * General rules:
4172 4169 *
4173 4170 * (1) Applies only to seg_kmem memory pages. To make things easier,
4174 4171 * seg_kpm addresses are also accepted by the routines, but nothing
4175 4172 * is done with them since by definition their PA mappings are static.
4176 4173 * (2) hat_add_callback() may only be called while holding the page lock
4177 4174 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()),
4178 4175 * or passing HAC_PAGELOCK flag.
4179 4176 * (3) prehandler() and posthandler() may not call hat_add_callback() or
4180 4177 * hat_delete_callback(), nor should they allocate memory. Post quiesce
4181 4178 * callbacks may not sleep or acquire adaptive mutex locks.
4182 4179 * (4) Either prehandler() or posthandler() (but not both) may be specified
4183 4180 * as being NULL. Specifying an errhandler() is optional.
4184 4181 *
4185 4182 * Details of using the framework:
4186 4183 *
4187 4184 * registering a callback (hat_register_callback())
4188 4185 *
4189 4186 * Pass prehandler, posthandler, errhandler addresses
4190 4187 * as described below. If capture_cpus argument is nonzero,
4191 4188 * suspend callback to the prehandler will occur with CPUs
4192 4189 * captured and executing xc_loop() and CPUs will remain
4193 4190 * captured until after the posthandler suspend callback
4194 4191 * occurs.
4195 4192 *
4196 4193 * adding a callback (hat_add_callback())
4197 4194 *
4198 4195 * as_pagelock();
4199 4196 * hat_add_callback();
4200 4197 * save returned pfn in private data structures or program registers;
4201 4198 * as_pageunlock();
4202 4199 *
4203 4200 * prehandler()
4204 4201 *
4205 4202 * Stop all accesses by physical address to this memory page.
4206 4203 * Called twice: the first, PRESUSPEND, is a context safe to acquire
4207 4204 * adaptive locks. The second, SUSPEND, is called at high PIL with
4208 4205 * CPUs captured so adaptive locks may NOT be acquired (and all spin
4209 4206 * locks must be XCALL_PIL or higher locks).
4210 4207 *
4211 4208 * May return the following errors:
4212 4209 * EIO: A fatal error has occurred. This will result in panic.
4213 4210 * EAGAIN: The page cannot be suspended. This will fail the
4214 4211 * relocation.
4215 4212 * 0: Success.
4216 4213 *
4217 4214 * posthandler()
4218 4215 *
4219 4216 * Save new pfn in private data structures or program registers;
4220 4217 * not allowed to fail (non-zero return values will result in panic).
4221 4218 *
4222 4219 * errhandler()
4223 4220 *
4224 4221 * called when an error occurs related to the callback. Currently
4225 4222 * the only such error is HAT_CB_ERR_LEAKED which indicates that
4226 4223 * a page is being freed, but there are still outstanding callback(s)
4227 4224 * registered on the page.
4228 4225 *
4229 4226 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory)
4230 4227 *
4231 4228 * stop using physical address
4232 4229 * hat_delete_callback();
4233 4230 *
4234 4231 */
4235 4232
4236 4233 /*
4237 4234 * Register a callback class. Each subsystem should do this once and
4238 4235 * cache the id_t returned for use in setting up and tearing down callbacks.
4239 4236 *
4240 4237 * There is no facility for removing callback IDs once they are created;
4241 4238 * the "key" should be unique for each module, so in case a module is unloaded
4242 4239 * and subsequently re-loaded, we can recycle the module's previous entry.
4243 4240 */
4244 4241 id_t
4245 4242 hat_register_callback(int key,
4246 4243 int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4247 4244 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4248 4245 int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4249 4246 int capture_cpus)
4250 4247 {
4251 4248 id_t id;
4252 4249
4253 4250 /*
4254 4251 * Search the table for a pre-existing callback associated with
4255 4252 * the identifier "key". If one exists, we re-use that entry in
4256 4253 * the table for this instance, otherwise we assign the next
4257 4254 * available table slot.
4258 4255 */
4259 4256 for (id = 0; id < sfmmu_max_cb_id; id++) {
4260 4257 if (sfmmu_cb_table[id].key == key)
4261 4258 break;
4262 4259 }
4263 4260
4264 4261 if (id == sfmmu_max_cb_id) {
4265 4262 id = sfmmu_cb_nextid++;
4266 4263 if (id >= sfmmu_max_cb_id)
4267 4264 panic("hat_register_callback: out of callback IDs");
4268 4265 }
4269 4266
4270 4267 ASSERT(prehandler != NULL || posthandler != NULL);
4271 4268
4272 4269 sfmmu_cb_table[id].key = key;
4273 4270 sfmmu_cb_table[id].prehandler = prehandler;
4274 4271 sfmmu_cb_table[id].posthandler = posthandler;
4275 4272 sfmmu_cb_table[id].errhandler = errhandler;
4276 4273 sfmmu_cb_table[id].capture_cpus = capture_cpus;
4277 4274
4278 4275 return (id);
4279 4276 }
4280 4277
4281 4278 #define HAC_COOKIE_NONE (void *)-1
4282 4279
4283 4280 /*
4284 4281 * Add relocation callbacks to the specified addr/len which will be called
4285 4282 * when relocating the associated page. See the description of pre and
4286 4283 * posthandler above for more details.
4287 4284 *
4288 4285 * If HAC_PAGELOCK is included in flags, the underlying memory page is
4289 4286 * locked internally so the caller must be able to deal with the callback
4290 4287 * running even before this function has returned. If HAC_PAGELOCK is not
4291 4288 * set, it is assumed that the underlying memory pages are locked.
4292 4289 *
4293 4290 * Since the caller must track the individual page boundaries anyway,
4294 4291 * we only allow a callback to be added to a single page (large
4295 4292 * or small). Thus [addr, addr + len) MUST be contained within a single
4296 4293 * page.
4297 4294 *
4298 4295 * Registering multiple callbacks on the same [addr, addr+len) is supported,
4299 4296 * _provided_that_ a unique parameter is specified for each callback.
4300 4297 * If multiple callbacks are registered on the same range the callback will
4301 4298 * be invoked with each unique parameter. Registering the same callback with
4302 4299 * the same argument more than once will result in corrupted kernel state.
4303 4300 *
4304 4301 * Returns the pfn of the underlying kernel page in *rpfn
4305 4302 * on success, or PFN_INVALID on failure.
4306 4303 *
4307 4304 * cookiep (if passed) provides storage space for an opaque cookie
4308 4305 * to return later to hat_delete_callback(). This cookie makes the callback
4309 4306 * deletion significantly quicker by avoiding a potentially lengthy hash
4310 4307 * search.
4311 4308 *
4312 4309 * Returns values:
4313 4310 * 0: success
4314 4311 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4315 4312 * EINVAL: callback ID is not valid
4316 4313 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4317 4314 * space
4318 4315 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4319 4316 */
4320 4317 int
4321 4318 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4322 4319 void *pvt, pfn_t *rpfn, void **cookiep)
4323 4320 {
4324 4321 struct hmehash_bucket *hmebp;
4325 4322 hmeblk_tag hblktag;
4326 4323 struct hme_blk *hmeblkp;
4327 4324 int hmeshift, hashno;
4328 4325 caddr_t saddr, eaddr, baseaddr;
4329 4326 struct pa_hment *pahmep;
4330 4327 struct sf_hment *sfhmep, *osfhmep;
4331 4328 kmutex_t *pml;
4332 4329 tte_t tte;
4333 4330 page_t *pp;
4334 4331 vnode_t *vp;
4335 4332 u_offset_t off;
4336 4333 pfn_t pfn;
4337 4334 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4338 4335 int locked = 0;
4339 4336
4340 4337 /*
4341 4338 * For KPM mappings, just return the physical address since we
4342 4339 * don't need to register any callbacks.
4343 4340 */
4344 4341 if (IS_KPM_ADDR(vaddr)) {
4345 4342 uint64_t paddr;
4346 4343 SFMMU_KPM_VTOP(vaddr, paddr);
4347 4344 *rpfn = btop(paddr);
4348 4345 if (cookiep != NULL)
4349 4346 *cookiep = HAC_COOKIE_NONE;
4350 4347 return (0);
4351 4348 }
4352 4349
4353 4350 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) {
4354 4351 *rpfn = PFN_INVALID;
4355 4352 return (EINVAL);
4356 4353 }
4357 4354
4358 4355 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) {
4359 4356 *rpfn = PFN_INVALID;
4360 4357 return (ENOMEM);
4361 4358 }
4362 4359
4363 4360 sfhmep = &pahmep->sfment;
4364 4361
4365 4362 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4366 4363 eaddr = saddr + len;
4367 4364
4368 4365 rehash:
4369 4366 /* Find the mapping(s) for this page */
4370 4367 for (hashno = TTE64K, hmeblkp = NULL;
4371 4368 hmeblkp == NULL && hashno <= mmu_hashcnt;
4372 4369 hashno++) {
4373 4370 hmeshift = HME_HASH_SHIFT(hashno);
4374 4371 hblktag.htag_id = ksfmmup;
4375 4372 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4376 4373 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4377 4374 hblktag.htag_rehash = hashno;
4378 4375 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4379 4376
4380 4377 SFMMU_HASH_LOCK(hmebp);
4381 4378
4382 4379 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4383 4380
4384 4381 if (hmeblkp == NULL)
4385 4382 SFMMU_HASH_UNLOCK(hmebp);
4386 4383 }
4387 4384
4388 4385 if (hmeblkp == NULL) {
4389 4386 kmem_cache_free(pa_hment_cache, pahmep);
4390 4387 *rpfn = PFN_INVALID;
4391 4388 return (ENXIO);
4392 4389 }
4393 4390
4394 4391 ASSERT(!hmeblkp->hblk_shared);
4395 4392
4396 4393 HBLKTOHME(osfhmep, hmeblkp, saddr);
4397 4394 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4398 4395
4399 4396 if (!TTE_IS_VALID(&tte)) {
4400 4397 SFMMU_HASH_UNLOCK(hmebp);
4401 4398 kmem_cache_free(pa_hment_cache, pahmep);
4402 4399 *rpfn = PFN_INVALID;
4403 4400 return (ENXIO);
4404 4401 }
4405 4402
4406 4403 /*
4407 4404 * Make sure the boundaries for the callback fall within this
4408 4405 * single mapping.
4409 4406 */
4410 4407 baseaddr = (caddr_t)get_hblk_base(hmeblkp);
4411 4408 ASSERT(saddr >= baseaddr);
4412 4409 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) {
4413 4410 SFMMU_HASH_UNLOCK(hmebp);
4414 4411 kmem_cache_free(pa_hment_cache, pahmep);
4415 4412 *rpfn = PFN_INVALID;
4416 4413 return (ERANGE);
4417 4414 }
4418 4415
4419 4416 pfn = sfmmu_ttetopfn(&tte, vaddr);
4420 4417
4421 4418 /*
4422 4419 * The pfn may not have a page_t underneath in which case we
4423 4420 * just return it. This can happen if we are doing I/O to a
4424 4421 * static portion of the kernel's address space, for instance.
4425 4422 */
4426 4423 pp = osfhmep->hme_page;
4427 4424 if (pp == NULL) {
4428 4425 SFMMU_HASH_UNLOCK(hmebp);
4429 4426 kmem_cache_free(pa_hment_cache, pahmep);
4430 4427 *rpfn = pfn;
4431 4428 if (cookiep)
4432 4429 *cookiep = HAC_COOKIE_NONE;
4433 4430 return (0);
4434 4431 }
4435 4432 ASSERT(pp == PP_PAGEROOT(pp));
4436 4433
4437 4434 vp = pp->p_vnode;
4438 4435 off = pp->p_offset;
4439 4436
4440 4437 pml = sfmmu_mlist_enter(pp);
4441 4438
4442 4439 if (flags & HAC_PAGELOCK) {
4443 4440 if (!page_trylock(pp, SE_SHARED)) {
4444 4441 /*
4445 4442 * Somebody is holding SE_EXCL lock. Might
4446 4443 * even be hat_page_relocate(). Drop all
4447 4444 * our locks, lookup the page in &kvp, and
4448 4445 * retry. If it doesn't exist in &kvp and &zvp,
4449 4446 * then we must be dealing with a kernel mapped
4450 4447 * page which doesn't actually belong to
4451 4448 * segkmem so we punt.
4452 4449 */
4453 4450 sfmmu_mlist_exit(pml);
4454 4451 SFMMU_HASH_UNLOCK(hmebp);
4455 4452 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4456 4453
4457 4454 /* check zvp before giving up */
4458 4455 if (pp == NULL)
4459 4456 pp = page_lookup(&zvp, (u_offset_t)saddr,
4460 4457 SE_SHARED);
4461 4458
4462 4459 /* Okay, we didn't find it, give up */
4463 4460 if (pp == NULL) {
4464 4461 kmem_cache_free(pa_hment_cache, pahmep);
4465 4462 *rpfn = pfn;
4466 4463 if (cookiep)
4467 4464 *cookiep = HAC_COOKIE_NONE;
4468 4465 return (0);
4469 4466 }
4470 4467 page_unlock(pp);
4471 4468 goto rehash;
4472 4469 }
4473 4470 locked = 1;
4474 4471 }
4475 4472
4476 4473 if (!PAGE_LOCKED(pp) && !panicstr)
4477 4474 panic("hat_add_callback: page 0x%p not locked", (void *)pp);
4478 4475
4479 4476 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4480 4477 pp->p_offset != off) {
4481 4478 /*
4482 4479 * The page moved before we got our hands on it. Drop
4483 4480 * all the locks and try again.
4484 4481 */
4485 4482 ASSERT((flags & HAC_PAGELOCK) != 0);
4486 4483 sfmmu_mlist_exit(pml);
4487 4484 SFMMU_HASH_UNLOCK(hmebp);
4488 4485 page_unlock(pp);
4489 4486 locked = 0;
4490 4487 goto rehash;
4491 4488 }
4492 4489
4493 4490 if (!VN_ISKAS(vp)) {
4494 4491 /*
4495 4492 * This is not a segkmem page but another page which
4496 4493 * has been kernel mapped. It had better have at least
4497 4494 * a share lock on it. Return the pfn.
4498 4495 */
4499 4496 sfmmu_mlist_exit(pml);
4500 4497 SFMMU_HASH_UNLOCK(hmebp);
4501 4498 if (locked)
4502 4499 page_unlock(pp);
4503 4500 kmem_cache_free(pa_hment_cache, pahmep);
4504 4501 ASSERT(PAGE_LOCKED(pp));
4505 4502 *rpfn = pfn;
4506 4503 if (cookiep)
4507 4504 *cookiep = HAC_COOKIE_NONE;
4508 4505 return (0);
4509 4506 }
4510 4507
4511 4508 /*
4512 4509 * Setup this pa_hment and link its embedded dummy sf_hment into
4513 4510 * the mapping list.
4514 4511 */
4515 4512 pp->p_share++;
4516 4513 pahmep->cb_id = callback_id;
4517 4514 pahmep->addr = vaddr;
4518 4515 pahmep->len = len;
4519 4516 pahmep->refcnt = 1;
4520 4517 pahmep->flags = 0;
4521 4518 pahmep->pvt = pvt;
4522 4519
4523 4520 sfhmep->hme_tte.ll = 0;
4524 4521 sfhmep->hme_data = pahmep;
4525 4522 sfhmep->hme_prev = osfhmep;
4526 4523 sfhmep->hme_next = osfhmep->hme_next;
4527 4524
4528 4525 if (osfhmep->hme_next)
4529 4526 osfhmep->hme_next->hme_prev = sfhmep;
4530 4527
4531 4528 osfhmep->hme_next = sfhmep;
4532 4529
4533 4530 sfmmu_mlist_exit(pml);
4534 4531 SFMMU_HASH_UNLOCK(hmebp);
4535 4532
4536 4533 if (locked)
4537 4534 page_unlock(pp);
4538 4535
4539 4536 *rpfn = pfn;
4540 4537 if (cookiep)
4541 4538 *cookiep = (void *)pahmep;
4542 4539
4543 4540 return (0);
4544 4541 }
4545 4542
4546 4543 /*
4547 4544 * Remove the relocation callbacks from the specified addr/len.
4548 4545 */
4549 4546 void
4550 4547 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4551 4548 void *cookie)
4552 4549 {
4553 4550 struct hmehash_bucket *hmebp;
4554 4551 hmeblk_tag hblktag;
4555 4552 struct hme_blk *hmeblkp;
4556 4553 int hmeshift, hashno;
4557 4554 caddr_t saddr;
4558 4555 struct pa_hment *pahmep;
4559 4556 struct sf_hment *sfhmep, *osfhmep;
4560 4557 kmutex_t *pml;
4561 4558 tte_t tte;
4562 4559 page_t *pp;
4563 4560 vnode_t *vp;
4564 4561 u_offset_t off;
4565 4562 int locked = 0;
4566 4563
4567 4564 /*
4568 4565 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to
4569 4566 * remove so just return.
4570 4567 */
4571 4568 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr))
4572 4569 return;
4573 4570
4574 4571 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK);
4575 4572
4576 4573 rehash:
4577 4574 /* Find the mapping(s) for this page */
4578 4575 for (hashno = TTE64K, hmeblkp = NULL;
4579 4576 hmeblkp == NULL && hashno <= mmu_hashcnt;
4580 4577 hashno++) {
4581 4578 hmeshift = HME_HASH_SHIFT(hashno);
4582 4579 hblktag.htag_id = ksfmmup;
4583 4580 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4584 4581 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift);
4585 4582 hblktag.htag_rehash = hashno;
4586 4583 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift);
4587 4584
4588 4585 SFMMU_HASH_LOCK(hmebp);
4589 4586
4590 4587 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
4591 4588
4592 4589 if (hmeblkp == NULL)
4593 4590 SFMMU_HASH_UNLOCK(hmebp);
4594 4591 }
4595 4592
4596 4593 if (hmeblkp == NULL)
4597 4594 return;
4598 4595
4599 4596 ASSERT(!hmeblkp->hblk_shared);
4600 4597
4601 4598 HBLKTOHME(osfhmep, hmeblkp, saddr);
4602 4599
4603 4600 sfmmu_copytte(&osfhmep->hme_tte, &tte);
4604 4601 if (!TTE_IS_VALID(&tte)) {
4605 4602 SFMMU_HASH_UNLOCK(hmebp);
4606 4603 return;
4607 4604 }
4608 4605
4609 4606 pp = osfhmep->hme_page;
4610 4607 if (pp == NULL) {
4611 4608 SFMMU_HASH_UNLOCK(hmebp);
4612 4609 ASSERT(cookie == NULL);
4613 4610 return;
4614 4611 }
4615 4612
4616 4613 vp = pp->p_vnode;
4617 4614 off = pp->p_offset;
4618 4615
4619 4616 pml = sfmmu_mlist_enter(pp);
4620 4617
4621 4618 if (flags & HAC_PAGELOCK) {
4622 4619 if (!page_trylock(pp, SE_SHARED)) {
4623 4620 /*
4624 4621 * Somebody is holding SE_EXCL lock. Might
4625 4622 * even be hat_page_relocate(). Drop all
4626 4623 * our locks, lookup the page in &kvp, and
4627 4624 * retry. If it doesn't exist in &kvp and &zvp,
4628 4625 * then we must be dealing with a kernel mapped
4629 4626 * page which doesn't actually belong to
4630 4627 * segkmem so we punt.
4631 4628 */
4632 4629 sfmmu_mlist_exit(pml);
4633 4630 SFMMU_HASH_UNLOCK(hmebp);
4634 4631 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED);
4635 4632 /* check zvp before giving up */
4636 4633 if (pp == NULL)
4637 4634 pp = page_lookup(&zvp, (u_offset_t)saddr,
4638 4635 SE_SHARED);
4639 4636
4640 4637 if (pp == NULL) {
4641 4638 ASSERT(cookie == NULL);
4642 4639 return;
4643 4640 }
4644 4641 page_unlock(pp);
4645 4642 goto rehash;
4646 4643 }
4647 4644 locked = 1;
4648 4645 }
4649 4646
4650 4647 ASSERT(PAGE_LOCKED(pp));
4651 4648
4652 4649 if (osfhmep->hme_page != pp || pp->p_vnode != vp ||
4653 4650 pp->p_offset != off) {
4654 4651 /*
4655 4652 * The page moved before we got our hands on it. Drop
4656 4653 * all the locks and try again.
4657 4654 */
4658 4655 ASSERT((flags & HAC_PAGELOCK) != 0);
4659 4656 sfmmu_mlist_exit(pml);
4660 4657 SFMMU_HASH_UNLOCK(hmebp);
4661 4658 page_unlock(pp);
4662 4659 locked = 0;
4663 4660 goto rehash;
4664 4661 }
4665 4662
4666 4663 if (!VN_ISKAS(vp)) {
4667 4664 /*
4668 4665 * This is not a segkmem page but another page which
4669 4666 * has been kernel mapped.
4670 4667 */
4671 4668 sfmmu_mlist_exit(pml);
4672 4669 SFMMU_HASH_UNLOCK(hmebp);
4673 4670 if (locked)
4674 4671 page_unlock(pp);
4675 4672 ASSERT(cookie == NULL);
4676 4673 return;
4677 4674 }
4678 4675
4679 4676 if (cookie != NULL) {
4680 4677 pahmep = (struct pa_hment *)cookie;
4681 4678 sfhmep = &pahmep->sfment;
4682 4679 } else {
4683 4680 for (sfhmep = pp->p_mapping; sfhmep != NULL;
4684 4681 sfhmep = sfhmep->hme_next) {
4685 4682
4686 4683 /*
4687 4684 * skip va<->pa mappings
4688 4685 */
4689 4686 if (!IS_PAHME(sfhmep))
4690 4687 continue;
4691 4688
4692 4689 pahmep = sfhmep->hme_data;
4693 4690 ASSERT(pahmep != NULL);
4694 4691
4695 4692 /*
4696 4693 * if pa_hment matches, remove it
4697 4694 */
4698 4695 if ((pahmep->pvt == pvt) &&
4699 4696 (pahmep->addr == vaddr) &&
4700 4697 (pahmep->len == len)) {
4701 4698 break;
4702 4699 }
4703 4700 }
4704 4701 }
4705 4702
4706 4703 if (sfhmep == NULL) {
4707 4704 if (!panicstr) {
4708 4705 panic("hat_delete_callback: pa_hment not found, pp %p",
4709 4706 (void *)pp);
4710 4707 }
4711 4708 return;
4712 4709 }
4713 4710
4714 4711 /*
4715 4712 * Note: at this point a valid kernel mapping must still be
4716 4713 * present on this page.
4717 4714 */
4718 4715 pp->p_share--;
4719 4716 if (pp->p_share <= 0)
4720 4717 panic("hat_delete_callback: zero p_share");
4721 4718
4722 4719 if (--pahmep->refcnt == 0) {
4723 4720 if (pahmep->flags != 0)
4724 4721 panic("hat_delete_callback: pa_hment is busy");
4725 4722
4726 4723 /*
4727 4724 * Remove sfhmep from the mapping list for the page.
4728 4725 */
4729 4726 if (sfhmep->hme_prev) {
4730 4727 sfhmep->hme_prev->hme_next = sfhmep->hme_next;
4731 4728 } else {
4732 4729 pp->p_mapping = sfhmep->hme_next;
4733 4730 }
4734 4731
4735 4732 if (sfhmep->hme_next)
4736 4733 sfhmep->hme_next->hme_prev = sfhmep->hme_prev;
4737 4734
4738 4735 sfmmu_mlist_exit(pml);
4739 4736 SFMMU_HASH_UNLOCK(hmebp);
4740 4737
4741 4738 if (locked)
4742 4739 page_unlock(pp);
4743 4740
4744 4741 kmem_cache_free(pa_hment_cache, pahmep);
4745 4742 return;
4746 4743 }
4747 4744
4748 4745 sfmmu_mlist_exit(pml);
4749 4746 SFMMU_HASH_UNLOCK(hmebp);
4750 4747 if (locked)
4751 4748 page_unlock(pp);
4752 4749 }
4753 4750
4754 4751 /*
4755 4752 * hat_probe returns 1 if the translation for the address 'addr' is
4756 4753 * loaded, zero otherwise.
4757 4754 *
4758 4755 * hat_probe should be used only for advisorary purposes because it may
4759 4756 * occasionally return the wrong value. The implementation must guarantee that
4760 4757 * returning the wrong value is a very rare event. hat_probe is used
4761 4758 * to implement optimizations in the segment drivers.
4762 4759 *
↓ open down ↓ |
775 lines elided |
↑ open up ↑ |
4763 4760 */
4764 4761 int
4765 4762 hat_probe(struct hat *sfmmup, caddr_t addr)
4766 4763 {
4767 4764 pfn_t pfn;
4768 4765 tte_t tte;
4769 4766
4770 4767 ASSERT(sfmmup != NULL);
4771 4768 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4772 4769
4773 - ASSERT((sfmmup == ksfmmup) ||
4774 - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4770 + ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4775 4771
4776 4772 if (sfmmup == ksfmmup) {
4777 4773 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4778 4774 == PFN_SUSPENDED) {
4779 4775 sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4780 4776 }
4781 4777 } else {
4782 4778 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL);
4783 4779 }
4784 4780
4785 4781 if (pfn != PFN_INVALID)
4786 4782 return (1);
4787 4783 else
4788 4784 return (0);
4789 4785 }
4790 4786
4791 4787 ssize_t
4792 4788 hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4793 4789 {
4794 4790 tte_t tte;
4795 4791
4796 4792 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4797 4793
4798 4794 if (sfmmup == ksfmmup) {
4799 4795 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4800 4796 return (-1);
4801 4797 }
4802 4798 } else {
4803 4799 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4804 4800 return (-1);
4805 4801 }
4806 4802 }
4807 4803
4808 4804 ASSERT(TTE_IS_VALID(&tte));
4809 4805 return (TTEBYTES(TTE_CSZ(&tte)));
4810 4806 }
4811 4807
4812 4808 uint_t
4813 4809 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4814 4810 {
4815 4811 tte_t tte;
4816 4812
4817 4813 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4818 4814
4819 4815 if (sfmmup == ksfmmup) {
4820 4816 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4821 4817 tte.ll = 0;
4822 4818 }
4823 4819 } else {
4824 4820 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4825 4821 tte.ll = 0;
4826 4822 }
4827 4823 }
4828 4824 if (TTE_IS_VALID(&tte)) {
4829 4825 *attr = sfmmu_ptov_attr(&tte);
4830 4826 return (0);
4831 4827 }
4832 4828 *attr = 0;
4833 4829 return ((uint_t)0xffffffff);
4834 4830 }
4835 4831
4836 4832 /*
4837 4833 * Enables more attributes on specified address range (ie. logical OR)
4838 4834 */
4839 4835 void
4840 4836 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4841 4837 {
4842 4838 if (hat->sfmmu_xhat_provider) {
4843 4839 XHAT_SETATTR(hat, addr, len, attr);
4844 4840 return;
4845 4841 } else {
4846 4842 /*
4847 4843 * This must be a CPU HAT. If the address space has
4848 4844 * XHATs attached, change attributes for all of them,
4849 4845 * just in case
4850 4846 */
4851 4847 ASSERT(hat->sfmmu_as != NULL);
4852 4848 if (hat->sfmmu_as->a_xhat != NULL)
4853 4849 xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
4854 4850 }
4855 4851
4856 4852 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4857 4853 }
4858 4854
4859 4855 /*
4860 4856 * Assigns attributes to the specified address range. All the attributes
4861 4857 * are specified.
4862 4858 */
4863 4859 void
4864 4860 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4865 4861 {
4866 4862 if (hat->sfmmu_xhat_provider) {
4867 4863 XHAT_CHGATTR(hat, addr, len, attr);
4868 4864 return;
4869 4865 } else {
4870 4866 /*
4871 4867 * This must be a CPU HAT. If the address space has
4872 4868 * XHATs attached, change attributes for all of them,
4873 4869 * just in case
4874 4870 */
4875 4871 ASSERT(hat->sfmmu_as != NULL);
4876 4872 if (hat->sfmmu_as->a_xhat != NULL)
4877 4873 xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
4878 4874 }
4879 4875
4880 4876 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4881 4877 }
4882 4878
4883 4879 /*
4884 4880 * Remove attributes on the specified address range (ie. loginal NAND)
4885 4881 */
4886 4882 void
4887 4883 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4888 4884 {
4889 4885 if (hat->sfmmu_xhat_provider) {
4890 4886 XHAT_CLRATTR(hat, addr, len, attr);
4891 4887 return;
4892 4888 } else {
4893 4889 /*
4894 4890 * This must be a CPU HAT. If the address space has
4895 4891 * XHATs attached, change attributes for all of them,
4896 4892 * just in case
4897 4893 */
4898 4894 ASSERT(hat->sfmmu_as != NULL);
4899 4895 if (hat->sfmmu_as->a_xhat != NULL)
4900 4896 xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
4901 4897 }
4902 4898
4903 4899 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4904 4900 }
4905 4901
4906 4902 /*
4907 4903 * Change attributes on an address range to that specified by attr and mode.
4908 4904 */
4909 4905 static void
4910 4906 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4911 4907 int mode)
4912 4908 {
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
4913 4909 struct hmehash_bucket *hmebp;
4914 4910 hmeblk_tag hblktag;
4915 4911 int hmeshift, hashno = 1;
4916 4912 struct hme_blk *hmeblkp, *list = NULL;
4917 4913 caddr_t endaddr;
4918 4914 cpuset_t cpuset;
4919 4915 demap_range_t dmr;
4920 4916
4921 4917 CPUSET_ZERO(cpuset);
4922 4918
4923 - ASSERT((sfmmup == ksfmmup) ||
4924 - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4919 + ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
4925 4920 ASSERT((len & MMU_PAGEOFFSET) == 0);
4926 4921 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
4927 4922
4928 4923 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) &&
4929 4924 ((addr + len) > (caddr_t)USERLIMIT)) {
4930 4925 panic("user addr %p in kernel space",
4931 4926 (void *)addr);
4932 4927 }
4933 4928
4934 4929 endaddr = addr + len;
4935 4930 hblktag.htag_id = sfmmup;
4936 4931 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
4937 4932 DEMAP_RANGE_INIT(sfmmup, &dmr);
4938 4933
4939 4934 while (addr < endaddr) {
4940 4935 hmeshift = HME_HASH_SHIFT(hashno);
4941 4936 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
4942 4937 hblktag.htag_rehash = hashno;
4943 4938 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
4944 4939
4945 4940 SFMMU_HASH_LOCK(hmebp);
4946 4941
4947 4942 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
4948 4943 if (hmeblkp != NULL) {
4949 4944 ASSERT(!hmeblkp->hblk_shared);
4950 4945 /*
4951 4946 * We've encountered a shadow hmeblk so skip the range
4952 4947 * of the next smaller mapping size.
4953 4948 */
4954 4949 if (hmeblkp->hblk_shw_bit) {
4955 4950 ASSERT(sfmmup != ksfmmup);
4956 4951 ASSERT(hashno > 1);
4957 4952 addr = (caddr_t)P2END((uintptr_t)addr,
4958 4953 TTEBYTES(hashno - 1));
4959 4954 } else {
4960 4955 addr = sfmmu_hblk_chgattr(sfmmup,
4961 4956 hmeblkp, addr, endaddr, &dmr, attr, mode);
4962 4957 }
4963 4958 SFMMU_HASH_UNLOCK(hmebp);
4964 4959 hashno = 1;
4965 4960 continue;
4966 4961 }
4967 4962 SFMMU_HASH_UNLOCK(hmebp);
4968 4963
4969 4964 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
4970 4965 /*
4971 4966 * We have traversed the whole list and rehashed
4972 4967 * if necessary without finding the address to chgattr.
4973 4968 * This is ok, so we increment the address by the
4974 4969 * smallest hmeblk range for kernel mappings or for
4975 4970 * user mappings with no large pages, and the largest
4976 4971 * hmeblk range, to account for shadow hmeblks, for
4977 4972 * user mappings with large pages and continue.
4978 4973 */
4979 4974 if (sfmmup == ksfmmup)
4980 4975 addr = (caddr_t)P2END((uintptr_t)addr,
4981 4976 TTEBYTES(1));
4982 4977 else
4983 4978 addr = (caddr_t)P2END((uintptr_t)addr,
4984 4979 TTEBYTES(hashno));
4985 4980 hashno = 1;
4986 4981 } else {
4987 4982 hashno++;
4988 4983 }
4989 4984 }
4990 4985
4991 4986 sfmmu_hblks_list_purge(&list, 0);
4992 4987 DEMAP_RANGE_FLUSH(&dmr);
4993 4988 cpuset = sfmmup->sfmmu_cpusran;
4994 4989 xt_sync(cpuset);
4995 4990 }
4996 4991
4997 4992 /*
4998 4993 * This function chgattr on a range of addresses in an hmeblk. It returns the
4999 4994 * next addres that needs to be chgattr.
5000 4995 * It should be called with the hash lock held.
5001 4996 * XXX It should be possible to optimize chgattr by not flushing every time but
5002 4997 * on the other hand:
5003 4998 * 1. do one flush crosscall.
5004 4999 * 2. only flush if we are increasing permissions (make sure this will work)
5005 5000 */
5006 5001 static caddr_t
5007 5002 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5008 5003 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
5009 5004 {
5010 5005 tte_t tte, tteattr, tteflags, ttemod;
5011 5006 struct sf_hment *sfhmep;
5012 5007 int ttesz;
5013 5008 struct page *pp = NULL;
5014 5009 kmutex_t *pml, *pmtx;
5015 5010 int ret;
5016 5011 int use_demap_range;
5017 5012 #if defined(SF_ERRATA_57)
5018 5013 int check_exec;
5019 5014 #endif
5020 5015
5021 5016 ASSERT(in_hblk_range(hmeblkp, addr));
5022 5017 ASSERT(hmeblkp->hblk_shw_bit == 0);
5023 5018 ASSERT(!hmeblkp->hblk_shared);
5024 5019
5025 5020 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5026 5021 ttesz = get_hblk_ttesz(hmeblkp);
5027 5022
5028 5023 /*
5029 5024 * Flush the current demap region if addresses have been
5030 5025 * skipped or the page size doesn't match.
5031 5026 */
5032 5027 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp));
5033 5028 if (use_demap_range) {
5034 5029 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5035 5030 } else if (dmrp != NULL) {
5036 5031 DEMAP_RANGE_FLUSH(dmrp);
5037 5032 }
5038 5033
5039 5034 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags);
5040 5035 #if defined(SF_ERRATA_57)
5041 5036 check_exec = (sfmmup != ksfmmup) &&
5042 5037 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5043 5038 TTE_IS_EXECUTABLE(&tteattr);
5044 5039 #endif
5045 5040 HBLKTOHME(sfhmep, hmeblkp, addr);
5046 5041 while (addr < endaddr) {
5047 5042 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5048 5043 if (TTE_IS_VALID(&tte)) {
5049 5044 if ((tte.ll & tteflags.ll) == tteattr.ll) {
5050 5045 /*
5051 5046 * if the new attr is the same as old
5052 5047 * continue
5053 5048 */
5054 5049 goto next_addr;
5055 5050 }
5056 5051 if (!TTE_IS_WRITABLE(&tteattr)) {
5057 5052 /*
5058 5053 * make sure we clear hw modify bit if we
5059 5054 * removing write protections
5060 5055 */
5061 5056 tteflags.tte_intlo |= TTE_HWWR_INT;
5062 5057 }
5063 5058
5064 5059 pml = NULL;
5065 5060 pp = sfhmep->hme_page;
5066 5061 if (pp) {
5067 5062 pml = sfmmu_mlist_enter(pp);
5068 5063 }
5069 5064
5070 5065 if (pp != sfhmep->hme_page) {
5071 5066 /*
5072 5067 * tte must have been unloaded.
5073 5068 */
5074 5069 ASSERT(pml);
5075 5070 sfmmu_mlist_exit(pml);
5076 5071 continue;
5077 5072 }
5078 5073
5079 5074 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5080 5075
5081 5076 ttemod = tte;
5082 5077 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll;
5083 5078 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte));
5084 5079
5085 5080 #if defined(SF_ERRATA_57)
5086 5081 if (check_exec && addr < errata57_limit)
5087 5082 ttemod.tte_exec_perm = 0;
5088 5083 #endif
5089 5084 ret = sfmmu_modifytte_try(&tte, &ttemod,
5090 5085 &sfhmep->hme_tte);
5091 5086
5092 5087 if (ret < 0) {
5093 5088 /* tte changed underneath us */
5094 5089 if (pml) {
5095 5090 sfmmu_mlist_exit(pml);
5096 5091 }
5097 5092 continue;
5098 5093 }
5099 5094
5100 5095 if (tteflags.tte_intlo & TTE_HWWR_INT) {
5101 5096 /*
5102 5097 * need to sync if we are clearing modify bit.
5103 5098 */
5104 5099 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5105 5100 }
5106 5101
5107 5102 if (pp && PP_ISRO(pp)) {
5108 5103 if (tteattr.tte_intlo & TTE_WRPRM_INT) {
5109 5104 pmtx = sfmmu_page_enter(pp);
5110 5105 PP_CLRRO(pp);
5111 5106 sfmmu_page_exit(pmtx);
5112 5107 }
5113 5108 }
5114 5109
5115 5110 if (ret > 0 && use_demap_range) {
5116 5111 DEMAP_RANGE_MARKPG(dmrp, addr);
5117 5112 } else if (ret > 0) {
5118 5113 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5119 5114 }
5120 5115
5121 5116 if (pml) {
5122 5117 sfmmu_mlist_exit(pml);
5123 5118 }
5124 5119 }
5125 5120 next_addr:
5126 5121 addr += TTEBYTES(ttesz);
5127 5122 sfhmep++;
5128 5123 DEMAP_RANGE_NEXTPG(dmrp);
5129 5124 }
5130 5125 return (addr);
5131 5126 }
5132 5127
5133 5128 /*
5134 5129 * This routine converts virtual attributes to physical ones. It will
5135 5130 * update the tteflags field with the tte mask corresponding to the attributes
5136 5131 * affected and it returns the new attributes. It will also clear the modify
5137 5132 * bit if we are taking away write permission. This is necessary since the
5138 5133 * modify bit is the hardware permission bit and we need to clear it in order
5139 5134 * to detect write faults.
5140 5135 */
5141 5136 static uint64_t
5142 5137 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp)
5143 5138 {
5144 5139 tte_t ttevalue;
5145 5140
5146 5141 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
5147 5142
5148 5143 switch (mode) {
5149 5144 case SFMMU_CHGATTR:
5150 5145 /* all attributes specified */
5151 5146 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr);
5152 5147 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr);
5153 5148 ttemaskp->tte_inthi = TTEINTHI_ATTR;
5154 5149 ttemaskp->tte_intlo = TTEINTLO_ATTR;
5155 5150 break;
5156 5151 case SFMMU_SETATTR:
5157 5152 ASSERT(!(attr & ~HAT_PROT_MASK));
5158 5153 ttemaskp->ll = 0;
5159 5154 ttevalue.ll = 0;
5160 5155 /*
5161 5156 * a valid tte implies exec and read for sfmmu
5162 5157 * so no need to do anything about them.
5163 5158 * since priviledged access implies user access
5164 5159 * PROT_USER doesn't make sense either.
5165 5160 */
5166 5161 if (attr & PROT_WRITE) {
5167 5162 ttemaskp->tte_intlo |= TTE_WRPRM_INT;
5168 5163 ttevalue.tte_intlo |= TTE_WRPRM_INT;
5169 5164 }
5170 5165 break;
5171 5166 case SFMMU_CLRATTR:
5172 5167 /* attributes will be nand with current ones */
5173 5168 if (attr & ~(PROT_WRITE | PROT_USER)) {
5174 5169 panic("sfmmu: attr %x not supported", attr);
5175 5170 }
5176 5171 ttemaskp->ll = 0;
5177 5172 ttevalue.ll = 0;
5178 5173 if (attr & PROT_WRITE) {
5179 5174 /* clear both writable and modify bit */
5180 5175 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT;
5181 5176 }
5182 5177 if (attr & PROT_USER) {
5183 5178 ttemaskp->tte_intlo |= TTE_PRIV_INT;
5184 5179 ttevalue.tte_intlo |= TTE_PRIV_INT;
5185 5180 }
5186 5181 break;
5187 5182 default:
5188 5183 panic("sfmmu_vtop_attr: bad mode %x", mode);
5189 5184 }
5190 5185 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0);
5191 5186 return (ttevalue.ll);
5192 5187 }
5193 5188
5194 5189 static uint_t
5195 5190 sfmmu_ptov_attr(tte_t *ttep)
5196 5191 {
5197 5192 uint_t attr;
5198 5193
5199 5194 ASSERT(TTE_IS_VALID(ttep));
5200 5195
5201 5196 attr = PROT_READ;
5202 5197
5203 5198 if (TTE_IS_WRITABLE(ttep)) {
5204 5199 attr |= PROT_WRITE;
5205 5200 }
5206 5201 if (TTE_IS_EXECUTABLE(ttep)) {
5207 5202 attr |= PROT_EXEC;
5208 5203 }
5209 5204 if (!TTE_IS_PRIVILEGED(ttep)) {
5210 5205 attr |= PROT_USER;
5211 5206 }
5212 5207 if (TTE_IS_NFO(ttep)) {
5213 5208 attr |= HAT_NOFAULT;
5214 5209 }
5215 5210 if (TTE_IS_NOSYNC(ttep)) {
5216 5211 attr |= HAT_NOSYNC;
5217 5212 }
5218 5213 if (TTE_IS_SIDEFFECT(ttep)) {
5219 5214 attr |= SFMMU_SIDEFFECT;
5220 5215 }
5221 5216 if (!TTE_IS_VCACHEABLE(ttep)) {
5222 5217 attr |= SFMMU_UNCACHEVTTE;
5223 5218 }
5224 5219 if (!TTE_IS_PCACHEABLE(ttep)) {
5225 5220 attr |= SFMMU_UNCACHEPTTE;
5226 5221 }
5227 5222 return (attr);
5228 5223 }
5229 5224
5230 5225 /*
5231 5226 * hat_chgprot is a deprecated hat call. New segment drivers
5232 5227 * should store all attributes and use hat_*attr calls.
5233 5228 *
5234 5229 * Change the protections in the virtual address range
5235 5230 * given to the specified virtual protection. If vprot is ~PROT_WRITE,
5236 5231 * then remove write permission, leaving the other
5237 5232 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions.
5238 5233 *
5239 5234 */
5240 5235 void
5241 5236 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot)
5242 5237 {
5243 5238 struct hmehash_bucket *hmebp;
5244 5239 hmeblk_tag hblktag;
5245 5240 int hmeshift, hashno = 1;
5246 5241 struct hme_blk *hmeblkp, *list = NULL;
5247 5242 caddr_t endaddr;
5248 5243 cpuset_t cpuset;
5249 5244 demap_range_t dmr;
5250 5245
5251 5246 ASSERT((len & MMU_PAGEOFFSET) == 0);
5252 5247 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5253 5248
5254 5249 if (sfmmup->sfmmu_xhat_provider) {
5255 5250 XHAT_CHGPROT(sfmmup, addr, len, vprot);
5256 5251 return;
5257 5252 } else {
5258 5253 /*
5259 5254 * This must be a CPU HAT. If the address space has
5260 5255 * XHATs attached, change attributes for all of them,
5261 5256 * just in case
5262 5257 */
5263 5258 ASSERT(sfmmup->sfmmu_as != NULL);
5264 5259 if (sfmmup->sfmmu_as->a_xhat != NULL)
5265 5260 xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
5266 5261 }
5267 5262
5268 5263 CPUSET_ZERO(cpuset);
5269 5264
5270 5265 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5271 5266 ((addr + len) > (caddr_t)USERLIMIT)) {
5272 5267 panic("user addr %p vprot %x in kernel space",
5273 5268 (void *)addr, vprot);
5274 5269 }
5275 5270 endaddr = addr + len;
5276 5271 hblktag.htag_id = sfmmup;
5277 5272 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5278 5273 DEMAP_RANGE_INIT(sfmmup, &dmr);
5279 5274
5280 5275 while (addr < endaddr) {
5281 5276 hmeshift = HME_HASH_SHIFT(hashno);
5282 5277 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5283 5278 hblktag.htag_rehash = hashno;
5284 5279 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5285 5280
5286 5281 SFMMU_HASH_LOCK(hmebp);
5287 5282
5288 5283 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
5289 5284 if (hmeblkp != NULL) {
5290 5285 ASSERT(!hmeblkp->hblk_shared);
5291 5286 /*
5292 5287 * We've encountered a shadow hmeblk so skip the range
5293 5288 * of the next smaller mapping size.
5294 5289 */
5295 5290 if (hmeblkp->hblk_shw_bit) {
5296 5291 ASSERT(sfmmup != ksfmmup);
5297 5292 ASSERT(hashno > 1);
5298 5293 addr = (caddr_t)P2END((uintptr_t)addr,
5299 5294 TTEBYTES(hashno - 1));
5300 5295 } else {
5301 5296 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp,
5302 5297 addr, endaddr, &dmr, vprot);
5303 5298 }
5304 5299 SFMMU_HASH_UNLOCK(hmebp);
5305 5300 hashno = 1;
5306 5301 continue;
5307 5302 }
5308 5303 SFMMU_HASH_UNLOCK(hmebp);
5309 5304
5310 5305 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
5311 5306 /*
5312 5307 * We have traversed the whole list and rehashed
5313 5308 * if necessary without finding the address to chgprot.
5314 5309 * This is ok so we increment the address by the
5315 5310 * smallest hmeblk range for kernel mappings and the
5316 5311 * largest hmeblk range, to account for shadow hmeblks,
5317 5312 * for user mappings and continue.
5318 5313 */
5319 5314 if (sfmmup == ksfmmup)
5320 5315 addr = (caddr_t)P2END((uintptr_t)addr,
5321 5316 TTEBYTES(1));
5322 5317 else
5323 5318 addr = (caddr_t)P2END((uintptr_t)addr,
5324 5319 TTEBYTES(hashno));
5325 5320 hashno = 1;
5326 5321 } else {
5327 5322 hashno++;
5328 5323 }
5329 5324 }
5330 5325
5331 5326 sfmmu_hblks_list_purge(&list, 0);
5332 5327 DEMAP_RANGE_FLUSH(&dmr);
5333 5328 cpuset = sfmmup->sfmmu_cpusran;
5334 5329 xt_sync(cpuset);
5335 5330 }
5336 5331
5337 5332 /*
5338 5333 * This function chgprots a range of addresses in an hmeblk. It returns the
5339 5334 * next addres that needs to be chgprot.
5340 5335 * It should be called with the hash lock held.
5341 5336 * XXX It shold be possible to optimize chgprot by not flushing every time but
5342 5337 * on the other hand:
5343 5338 * 1. do one flush crosscall.
5344 5339 * 2. only flush if we are increasing permissions (make sure this will work)
5345 5340 */
5346 5341 static caddr_t
5347 5342 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5348 5343 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5349 5344 {
5350 5345 uint_t pprot;
5351 5346 tte_t tte, ttemod;
5352 5347 struct sf_hment *sfhmep;
5353 5348 uint_t tteflags;
5354 5349 int ttesz;
5355 5350 struct page *pp = NULL;
5356 5351 kmutex_t *pml, *pmtx;
5357 5352 int ret;
5358 5353 int use_demap_range;
5359 5354 #if defined(SF_ERRATA_57)
5360 5355 int check_exec;
5361 5356 #endif
5362 5357
5363 5358 ASSERT(in_hblk_range(hmeblkp, addr));
5364 5359 ASSERT(hmeblkp->hblk_shw_bit == 0);
5365 5360 ASSERT(!hmeblkp->hblk_shared);
5366 5361
5367 5362 #ifdef DEBUG
5368 5363 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
5369 5364 (endaddr < get_hblk_endaddr(hmeblkp))) {
5370 5365 panic("sfmmu_hblk_chgprot: partial chgprot of large page");
5371 5366 }
5372 5367 #endif /* DEBUG */
5373 5368
5374 5369 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
5375 5370 ttesz = get_hblk_ttesz(hmeblkp);
5376 5371
5377 5372 pprot = sfmmu_vtop_prot(vprot, &tteflags);
5378 5373 #if defined(SF_ERRATA_57)
5379 5374 check_exec = (sfmmup != ksfmmup) &&
5380 5375 AS_TYPE_64BIT(sfmmup->sfmmu_as) &&
5381 5376 ((vprot & PROT_EXEC) == PROT_EXEC);
5382 5377 #endif
5383 5378 HBLKTOHME(sfhmep, hmeblkp, addr);
5384 5379
5385 5380 /*
5386 5381 * Flush the current demap region if addresses have been
5387 5382 * skipped or the page size doesn't match.
5388 5383 */
5389 5384 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE);
5390 5385 if (use_demap_range) {
5391 5386 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
5392 5387 } else if (dmrp != NULL) {
5393 5388 DEMAP_RANGE_FLUSH(dmrp);
5394 5389 }
5395 5390
5396 5391 while (addr < endaddr) {
5397 5392 sfmmu_copytte(&sfhmep->hme_tte, &tte);
5398 5393 if (TTE_IS_VALID(&tte)) {
5399 5394 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) {
5400 5395 /*
5401 5396 * if the new protection is the same as old
5402 5397 * continue
5403 5398 */
5404 5399 goto next_addr;
5405 5400 }
5406 5401 pml = NULL;
5407 5402 pp = sfhmep->hme_page;
5408 5403 if (pp) {
5409 5404 pml = sfmmu_mlist_enter(pp);
5410 5405 }
5411 5406 if (pp != sfhmep->hme_page) {
5412 5407 /*
5413 5408 * tte most have been unloaded
5414 5409 * underneath us. Recheck
5415 5410 */
5416 5411 ASSERT(pml);
5417 5412 sfmmu_mlist_exit(pml);
5418 5413 continue;
5419 5414 }
5420 5415
5421 5416 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
5422 5417
5423 5418 ttemod = tte;
5424 5419 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot);
5425 5420 #if defined(SF_ERRATA_57)
5426 5421 if (check_exec && addr < errata57_limit)
5427 5422 ttemod.tte_exec_perm = 0;
5428 5423 #endif
5429 5424 ret = sfmmu_modifytte_try(&tte, &ttemod,
5430 5425 &sfhmep->hme_tte);
5431 5426
5432 5427 if (ret < 0) {
5433 5428 /* tte changed underneath us */
5434 5429 if (pml) {
5435 5430 sfmmu_mlist_exit(pml);
5436 5431 }
5437 5432 continue;
5438 5433 }
5439 5434
5440 5435 if (tteflags & TTE_HWWR_INT) {
5441 5436 /*
5442 5437 * need to sync if we are clearing modify bit.
5443 5438 */
5444 5439 sfmmu_ttesync(sfmmup, addr, &tte, pp);
5445 5440 }
5446 5441
5447 5442 if (pp && PP_ISRO(pp)) {
5448 5443 if (pprot & TTE_WRPRM_INT) {
5449 5444 pmtx = sfmmu_page_enter(pp);
5450 5445 PP_CLRRO(pp);
5451 5446 sfmmu_page_exit(pmtx);
5452 5447 }
5453 5448 }
5454 5449
5455 5450 if (ret > 0 && use_demap_range) {
5456 5451 DEMAP_RANGE_MARKPG(dmrp, addr);
5457 5452 } else if (ret > 0) {
5458 5453 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
5459 5454 }
5460 5455
5461 5456 if (pml) {
5462 5457 sfmmu_mlist_exit(pml);
5463 5458 }
5464 5459 }
5465 5460 next_addr:
5466 5461 addr += TTEBYTES(ttesz);
5467 5462 sfhmep++;
5468 5463 DEMAP_RANGE_NEXTPG(dmrp);
5469 5464 }
5470 5465 return (addr);
5471 5466 }
5472 5467
5473 5468 /*
5474 5469 * This routine is deprecated and should only be used by hat_chgprot.
5475 5470 * The correct routine is sfmmu_vtop_attr.
5476 5471 * This routine converts virtual page protections to physical ones. It will
5477 5472 * update the tteflags field with the tte mask corresponding to the protections
5478 5473 * affected and it returns the new protections. It will also clear the modify
5479 5474 * bit if we are taking away write permission. This is necessary since the
5480 5475 * modify bit is the hardware permission bit and we need to clear it in order
5481 5476 * to detect write faults.
5482 5477 * It accepts the following special protections:
5483 5478 * ~PROT_WRITE = remove write permissions.
5484 5479 * ~PROT_USER = remove user permissions.
5485 5480 */
5486 5481 static uint_t
5487 5482 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp)
5488 5483 {
5489 5484 if (vprot == (uint_t)~PROT_WRITE) {
5490 5485 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT;
5491 5486 return (0); /* will cause wrprm to be cleared */
5492 5487 }
5493 5488 if (vprot == (uint_t)~PROT_USER) {
5494 5489 *tteflagsp = TTE_PRIV_INT;
5495 5490 return (0); /* will cause privprm to be cleared */
5496 5491 }
5497 5492 if ((vprot == 0) || (vprot == PROT_USER) ||
5498 5493 ((vprot & PROT_ALL) != vprot)) {
5499 5494 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5500 5495 }
5501 5496
5502 5497 switch (vprot) {
5503 5498 case (PROT_READ):
5504 5499 case (PROT_EXEC):
5505 5500 case (PROT_EXEC | PROT_READ):
5506 5501 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5507 5502 return (TTE_PRIV_INT); /* set prv and clr wrt */
5508 5503 case (PROT_WRITE):
5509 5504 case (PROT_WRITE | PROT_READ):
5510 5505 case (PROT_EXEC | PROT_WRITE):
5511 5506 case (PROT_EXEC | PROT_WRITE | PROT_READ):
5512 5507 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5513 5508 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */
5514 5509 case (PROT_USER | PROT_READ):
5515 5510 case (PROT_USER | PROT_EXEC):
5516 5511 case (PROT_USER | PROT_EXEC | PROT_READ):
5517 5512 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5518 5513 return (0); /* clr prv and wrt */
5519 5514 case (PROT_USER | PROT_WRITE):
5520 5515 case (PROT_USER | PROT_WRITE | PROT_READ):
5521 5516 case (PROT_USER | PROT_EXEC | PROT_WRITE):
5522 5517 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5523 5518 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5524 5519 return (TTE_WRPRM_INT); /* clr prv and set wrt */
5525 5520 default:
5526 5521 panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5527 5522 }
5528 5523 return (0);
5529 5524 }
5530 5525
5531 5526 /*
5532 5527 * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5533 5528 * the normal algorithm would take too long for a very large VA range with
5534 5529 * few real mappings. This routine just walks thru all HMEs in the global
5535 5530 * hash table to find and remove mappings.
5536 5531 */
5537 5532 static void
5538 5533 hat_unload_large_virtual(
5539 5534 struct hat *sfmmup,
5540 5535 caddr_t startaddr,
5541 5536 size_t len,
5542 5537 uint_t flags,
5543 5538 hat_callback_t *callback)
5544 5539 {
5545 5540 struct hmehash_bucket *hmebp;
5546 5541 struct hme_blk *hmeblkp;
5547 5542 struct hme_blk *pr_hblk = NULL;
5548 5543 struct hme_blk *nx_hblk;
5549 5544 struct hme_blk *list = NULL;
5550 5545 int i;
5551 5546 demap_range_t dmr, *dmrp;
5552 5547 cpuset_t cpuset;
5553 5548 caddr_t endaddr = startaddr + len;
5554 5549 caddr_t sa;
5555 5550 caddr_t ea;
5556 5551 caddr_t cb_sa[MAX_CB_ADDR];
5557 5552 caddr_t cb_ea[MAX_CB_ADDR];
5558 5553 int addr_cnt = 0;
5559 5554 int a = 0;
5560 5555
5561 5556 if (sfmmup->sfmmu_free) {
5562 5557 dmrp = NULL;
5563 5558 } else {
5564 5559 dmrp = &dmr;
5565 5560 DEMAP_RANGE_INIT(sfmmup, dmrp);
5566 5561 }
5567 5562
5568 5563 /*
5569 5564 * Loop through all the hash buckets of HME blocks looking for matches.
5570 5565 */
5571 5566 for (i = 0; i <= UHMEHASH_SZ; i++) {
5572 5567 hmebp = &uhme_hash[i];
5573 5568 SFMMU_HASH_LOCK(hmebp);
5574 5569 hmeblkp = hmebp->hmeblkp;
5575 5570 pr_hblk = NULL;
5576 5571 while (hmeblkp) {
5577 5572 nx_hblk = hmeblkp->hblk_next;
5578 5573
5579 5574 /*
5580 5575 * skip if not this context, if a shadow block or
5581 5576 * if the mapping is not in the requested range
5582 5577 */
5583 5578 if (hmeblkp->hblk_tag.htag_id != sfmmup ||
5584 5579 hmeblkp->hblk_shw_bit ||
5585 5580 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr ||
5586 5581 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) {
5587 5582 pr_hblk = hmeblkp;
5588 5583 goto next_block;
5589 5584 }
5590 5585
5591 5586 ASSERT(!hmeblkp->hblk_shared);
5592 5587 /*
5593 5588 * unload if there are any current valid mappings
5594 5589 */
5595 5590 if (hmeblkp->hblk_vcnt != 0 ||
5596 5591 hmeblkp->hblk_hmecnt != 0)
5597 5592 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
5598 5593 sa, ea, dmrp, flags);
5599 5594
5600 5595 /*
5601 5596 * on unmap we also release the HME block itself, once
5602 5597 * all mappings are gone.
5603 5598 */
5604 5599 if ((flags & HAT_UNLOAD_UNMAP) != 0 &&
5605 5600 !hmeblkp->hblk_vcnt &&
5606 5601 !hmeblkp->hblk_hmecnt) {
5607 5602 ASSERT(!hmeblkp->hblk_lckcnt);
5608 5603 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5609 5604 &list, 0);
5610 5605 } else {
5611 5606 pr_hblk = hmeblkp;
5612 5607 }
5613 5608
5614 5609 if (callback == NULL)
5615 5610 goto next_block;
5616 5611
5617 5612 /*
5618 5613 * HME blocks may span more than one page, but we may be
5619 5614 * unmapping only one page, so check for a smaller range
5620 5615 * for the callback
5621 5616 */
5622 5617 if (sa < startaddr)
5623 5618 sa = startaddr;
5624 5619 if (--ea > endaddr)
5625 5620 ea = endaddr - 1;
5626 5621
5627 5622 cb_sa[addr_cnt] = sa;
5628 5623 cb_ea[addr_cnt] = ea;
5629 5624 if (++addr_cnt == MAX_CB_ADDR) {
5630 5625 if (dmrp != NULL) {
5631 5626 DEMAP_RANGE_FLUSH(dmrp);
5632 5627 cpuset = sfmmup->sfmmu_cpusran;
5633 5628 xt_sync(cpuset);
5634 5629 }
5635 5630
5636 5631 for (a = 0; a < MAX_CB_ADDR; ++a) {
5637 5632 callback->hcb_start_addr = cb_sa[a];
5638 5633 callback->hcb_end_addr = cb_ea[a];
5639 5634 callback->hcb_function(callback);
5640 5635 }
5641 5636 addr_cnt = 0;
5642 5637 }
5643 5638
5644 5639 next_block:
5645 5640 hmeblkp = nx_hblk;
5646 5641 }
5647 5642 SFMMU_HASH_UNLOCK(hmebp);
5648 5643 }
5649 5644
5650 5645 sfmmu_hblks_list_purge(&list, 0);
5651 5646 if (dmrp != NULL) {
5652 5647 DEMAP_RANGE_FLUSH(dmrp);
5653 5648 cpuset = sfmmup->sfmmu_cpusran;
5654 5649 xt_sync(cpuset);
5655 5650 }
5656 5651
5657 5652 for (a = 0; a < addr_cnt; ++a) {
5658 5653 callback->hcb_start_addr = cb_sa[a];
5659 5654 callback->hcb_end_addr = cb_ea[a];
5660 5655 callback->hcb_function(callback);
5661 5656 }
5662 5657
5663 5658 /*
5664 5659 * Check TSB and TLB page sizes if the process isn't exiting.
5665 5660 */
5666 5661 if (!sfmmup->sfmmu_free)
5667 5662 sfmmu_check_page_sizes(sfmmup, 0);
5668 5663 }
5669 5664
5670 5665 /*
5671 5666 * Unload all the mappings in the range [addr..addr+len). addr and len must
5672 5667 * be MMU_PAGESIZE aligned.
5673 5668 */
5674 5669
5675 5670 extern struct seg *segkmap;
5676 5671 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5677 5672 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5678 5673
5679 5674
5680 5675 void
5681 5676 hat_unload_callback(
5682 5677 struct hat *sfmmup,
5683 5678 caddr_t addr,
5684 5679 size_t len,
5685 5680 uint_t flags,
5686 5681 hat_callback_t *callback)
5687 5682 {
5688 5683 struct hmehash_bucket *hmebp;
5689 5684 hmeblk_tag hblktag;
5690 5685 int hmeshift, hashno, iskernel;
5691 5686 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5692 5687 caddr_t endaddr;
5693 5688 cpuset_t cpuset;
5694 5689 int addr_count = 0;
5695 5690 int a;
5696 5691 caddr_t cb_start_addr[MAX_CB_ADDR];
5697 5692 caddr_t cb_end_addr[MAX_CB_ADDR];
5698 5693 int issegkmap = ISSEGKMAP(sfmmup, addr);
5699 5694 demap_range_t dmr, *dmrp;
5700 5695
5701 5696 if (sfmmup->sfmmu_xhat_provider) {
5702 5697 XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
5703 5698 return;
5704 5699 } else {
5705 5700 /*
5706 5701 * This must be a CPU HAT. If the address space has
↓ open down ↓ |
772 lines elided |
↑ open up ↑ |
5707 5702 * XHATs attached, unload the mappings for all of them,
5708 5703 * just in case
5709 5704 */
5710 5705 ASSERT(sfmmup->sfmmu_as != NULL);
5711 5706 if (sfmmup->sfmmu_as->a_xhat != NULL)
5712 5707 xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
5713 5708 len, flags, callback);
5714 5709 }
5715 5710
5716 5711 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5717 - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5712 + AS_LOCK_HELD(sfmmup->sfmmu_as));
5718 5713
5719 5714 ASSERT(sfmmup != NULL);
5720 5715 ASSERT((len & MMU_PAGEOFFSET) == 0);
5721 5716 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5722 5717
5723 5718 /*
5724 5719 * Probing through a large VA range (say 63 bits) will be slow, even
5725 5720 * at 4 Meg steps between the probes. So, when the virtual address range
5726 5721 * is very large, search the HME entries for what to unload.
5727 5722 *
5728 5723 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need
5729 5724 *
5730 5725 * UHMEHASH_SZ is number of hash buckets to examine
5731 5726 *
5732 5727 */
5733 5728 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) {
5734 5729 hat_unload_large_virtual(sfmmup, addr, len, flags, callback);
5735 5730 return;
5736 5731 }
5737 5732
5738 5733 CPUSET_ZERO(cpuset);
5739 5734
5740 5735 /*
5741 5736 * If the process is exiting, we can save a lot of fuss since
5742 5737 * we'll flush the TLB when we free the ctx anyway.
5743 5738 */
5744 5739 if (sfmmup->sfmmu_free) {
5745 5740 dmrp = NULL;
5746 5741 } else {
5747 5742 dmrp = &dmr;
5748 5743 DEMAP_RANGE_INIT(sfmmup, dmrp);
5749 5744 }
5750 5745
5751 5746 endaddr = addr + len;
5752 5747 hblktag.htag_id = sfmmup;
5753 5748 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
5754 5749
5755 5750 /*
5756 5751 * It is likely for the vm to call unload over a wide range of
5757 5752 * addresses that are actually very sparsely populated by
5758 5753 * translations. In order to speed this up the sfmmu hat supports
5759 5754 * the concept of shadow hmeblks. Dummy large page hmeblks that
5760 5755 * correspond to actual small translations are allocated at tteload
5761 5756 * time and are referred to as shadow hmeblks. Now, during unload
5762 5757 * time, we first check if we have a shadow hmeblk for that
5763 5758 * translation. The absence of one means the corresponding address
5764 5759 * range is empty and can be skipped.
5765 5760 *
5766 5761 * The kernel is an exception to above statement and that is why
5767 5762 * we don't use shadow hmeblks and hash starting from the smallest
5768 5763 * page size.
5769 5764 */
5770 5765 if (sfmmup == KHATID) {
5771 5766 iskernel = 1;
5772 5767 hashno = TTE64K;
5773 5768 } else {
5774 5769 iskernel = 0;
5775 5770 if (mmu_page_sizes == max_mmu_page_sizes) {
5776 5771 hashno = TTE256M;
5777 5772 } else {
5778 5773 hashno = TTE4M;
5779 5774 }
5780 5775 }
5781 5776 while (addr < endaddr) {
5782 5777 hmeshift = HME_HASH_SHIFT(hashno);
5783 5778 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
5784 5779 hblktag.htag_rehash = hashno;
5785 5780 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
5786 5781
5787 5782 SFMMU_HASH_LOCK(hmebp);
5788 5783
5789 5784 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list);
5790 5785 if (hmeblkp == NULL) {
5791 5786 /*
5792 5787 * didn't find an hmeblk. skip the appropiate
5793 5788 * address range.
5794 5789 */
5795 5790 SFMMU_HASH_UNLOCK(hmebp);
5796 5791 if (iskernel) {
5797 5792 if (hashno < mmu_hashcnt) {
5798 5793 hashno++;
5799 5794 continue;
5800 5795 } else {
5801 5796 hashno = TTE64K;
5802 5797 addr = (caddr_t)roundup((uintptr_t)addr
5803 5798 + 1, MMU_PAGESIZE64K);
5804 5799 continue;
5805 5800 }
5806 5801 }
5807 5802 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5808 5803 (1 << hmeshift));
5809 5804 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5810 5805 ASSERT(hashno == TTE64K);
5811 5806 continue;
5812 5807 }
5813 5808 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5814 5809 hashno = TTE512K;
5815 5810 continue;
5816 5811 }
5817 5812 if (mmu_page_sizes == max_mmu_page_sizes) {
5818 5813 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5819 5814 hashno = TTE4M;
5820 5815 continue;
5821 5816 }
5822 5817 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5823 5818 hashno = TTE32M;
5824 5819 continue;
5825 5820 }
5826 5821 hashno = TTE256M;
5827 5822 continue;
5828 5823 } else {
5829 5824 hashno = TTE4M;
5830 5825 continue;
5831 5826 }
5832 5827 }
5833 5828 ASSERT(hmeblkp);
5834 5829 ASSERT(!hmeblkp->hblk_shared);
5835 5830 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5836 5831 /*
5837 5832 * If the valid count is zero we can skip the range
5838 5833 * mapped by this hmeblk.
5839 5834 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP
5840 5835 * is used by segment drivers as a hint
5841 5836 * that the mapping resource won't be used any longer.
5842 5837 * The best example of this is during exit().
5843 5838 */
5844 5839 addr = (caddr_t)roundup((uintptr_t)addr + 1,
5845 5840 get_hblk_span(hmeblkp));
5846 5841 if ((flags & HAT_UNLOAD_UNMAP) ||
5847 5842 (iskernel && !issegkmap)) {
5848 5843 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
5849 5844 &list, 0);
5850 5845 }
5851 5846 SFMMU_HASH_UNLOCK(hmebp);
5852 5847
5853 5848 if (iskernel) {
5854 5849 hashno = TTE64K;
5855 5850 continue;
5856 5851 }
5857 5852 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5858 5853 ASSERT(hashno == TTE64K);
5859 5854 continue;
5860 5855 }
5861 5856 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5862 5857 hashno = TTE512K;
5863 5858 continue;
5864 5859 }
5865 5860 if (mmu_page_sizes == max_mmu_page_sizes) {
5866 5861 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5867 5862 hashno = TTE4M;
5868 5863 continue;
5869 5864 }
5870 5865 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5871 5866 hashno = TTE32M;
5872 5867 continue;
5873 5868 }
5874 5869 hashno = TTE256M;
5875 5870 continue;
5876 5871 } else {
5877 5872 hashno = TTE4M;
5878 5873 continue;
5879 5874 }
5880 5875 }
5881 5876 if (hmeblkp->hblk_shw_bit) {
5882 5877 /*
5883 5878 * If we encounter a shadow hmeblk we know there is
5884 5879 * smaller sized hmeblks mapping the same address space.
5885 5880 * Decrement the hash size and rehash.
5886 5881 */
5887 5882 ASSERT(sfmmup != KHATID);
5888 5883 hashno--;
5889 5884 SFMMU_HASH_UNLOCK(hmebp);
5890 5885 continue;
5891 5886 }
5892 5887
5893 5888 /*
5894 5889 * track callback address ranges.
5895 5890 * only start a new range when it's not contiguous
5896 5891 */
5897 5892 if (callback != NULL) {
5898 5893 if (addr_count > 0 &&
5899 5894 addr == cb_end_addr[addr_count - 1])
5900 5895 --addr_count;
5901 5896 else
5902 5897 cb_start_addr[addr_count] = addr;
5903 5898 }
5904 5899
5905 5900 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr,
5906 5901 dmrp, flags);
5907 5902
5908 5903 if (callback != NULL)
5909 5904 cb_end_addr[addr_count++] = addr;
5910 5905
5911 5906 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) &&
5912 5907 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
5913 5908 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0);
5914 5909 }
5915 5910 SFMMU_HASH_UNLOCK(hmebp);
5916 5911
5917 5912 /*
5918 5913 * Notify our caller as to exactly which pages
5919 5914 * have been unloaded. We do these in clumps,
5920 5915 * to minimize the number of xt_sync()s that need to occur.
5921 5916 */
5922 5917 if (callback != NULL && addr_count == MAX_CB_ADDR) {
5923 5918 if (dmrp != NULL) {
5924 5919 DEMAP_RANGE_FLUSH(dmrp);
5925 5920 cpuset = sfmmup->sfmmu_cpusran;
5926 5921 xt_sync(cpuset);
5927 5922 }
5928 5923
5929 5924 for (a = 0; a < MAX_CB_ADDR; ++a) {
5930 5925 callback->hcb_start_addr = cb_start_addr[a];
5931 5926 callback->hcb_end_addr = cb_end_addr[a];
5932 5927 callback->hcb_function(callback);
5933 5928 }
5934 5929 addr_count = 0;
5935 5930 }
5936 5931 if (iskernel) {
5937 5932 hashno = TTE64K;
5938 5933 continue;
5939 5934 }
5940 5935 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) {
5941 5936 ASSERT(hashno == TTE64K);
5942 5937 continue;
5943 5938 }
5944 5939 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) {
5945 5940 hashno = TTE512K;
5946 5941 continue;
5947 5942 }
5948 5943 if (mmu_page_sizes == max_mmu_page_sizes) {
5949 5944 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) {
5950 5945 hashno = TTE4M;
5951 5946 continue;
5952 5947 }
5953 5948 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) {
5954 5949 hashno = TTE32M;
5955 5950 continue;
5956 5951 }
5957 5952 hashno = TTE256M;
5958 5953 } else {
5959 5954 hashno = TTE4M;
5960 5955 }
5961 5956 }
5962 5957
5963 5958 sfmmu_hblks_list_purge(&list, 0);
5964 5959 if (dmrp != NULL) {
5965 5960 DEMAP_RANGE_FLUSH(dmrp);
5966 5961 cpuset = sfmmup->sfmmu_cpusran;
5967 5962 xt_sync(cpuset);
5968 5963 }
5969 5964 if (callback && addr_count != 0) {
5970 5965 for (a = 0; a < addr_count; ++a) {
5971 5966 callback->hcb_start_addr = cb_start_addr[a];
5972 5967 callback->hcb_end_addr = cb_end_addr[a];
5973 5968 callback->hcb_function(callback);
5974 5969 }
5975 5970 }
5976 5971
5977 5972 /*
5978 5973 * Check TSB and TLB page sizes if the process isn't exiting.
5979 5974 */
5980 5975 if (!sfmmup->sfmmu_free)
5981 5976 sfmmu_check_page_sizes(sfmmup, 0);
5982 5977 }
5983 5978
5984 5979 /*
5985 5980 * Unload all the mappings in the range [addr..addr+len). addr and len must
5986 5981 * be MMU_PAGESIZE aligned.
5987 5982 */
5988 5983 void
5989 5984 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5990 5985 {
5991 5986 if (sfmmup->sfmmu_xhat_provider) {
5992 5987 XHAT_UNLOAD(sfmmup, addr, len, flags);
5993 5988 return;
5994 5989 }
5995 5990 hat_unload_callback(sfmmup, addr, len, flags, NULL);
5996 5991 }
5997 5992
5998 5993
5999 5994 /*
6000 5995 * Find the largest mapping size for this page.
6001 5996 */
6002 5997 int
6003 5998 fnd_mapping_sz(page_t *pp)
6004 5999 {
6005 6000 int sz;
6006 6001 int p_index;
6007 6002
6008 6003 p_index = PP_MAPINDEX(pp);
6009 6004
6010 6005 sz = 0;
6011 6006 p_index >>= 1; /* don't care about 8K bit */
6012 6007 for (; p_index; p_index >>= 1) {
6013 6008 sz++;
6014 6009 }
6015 6010
6016 6011 return (sz);
6017 6012 }
6018 6013
6019 6014 /*
6020 6015 * This function unloads a range of addresses for an hmeblk.
6021 6016 * It returns the next address to be unloaded.
6022 6017 * It should be called with the hash lock held.
6023 6018 */
6024 6019 static caddr_t
6025 6020 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6026 6021 caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
6027 6022 {
6028 6023 tte_t tte, ttemod;
6029 6024 struct sf_hment *sfhmep;
6030 6025 int ttesz;
6031 6026 long ttecnt;
6032 6027 page_t *pp;
6033 6028 kmutex_t *pml;
6034 6029 int ret;
6035 6030 int use_demap_range;
6036 6031
6037 6032 ASSERT(in_hblk_range(hmeblkp, addr));
6038 6033 ASSERT(!hmeblkp->hblk_shw_bit);
6039 6034 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared);
6040 6035 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared);
6041 6036 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared);
6042 6037
6043 6038 #ifdef DEBUG
6044 6039 if (get_hblk_ttesz(hmeblkp) != TTE8K &&
6045 6040 (endaddr < get_hblk_endaddr(hmeblkp))) {
6046 6041 panic("sfmmu_hblk_unload: partial unload of large page");
6047 6042 }
6048 6043 #endif /* DEBUG */
6049 6044
6050 6045 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6051 6046 ttesz = get_hblk_ttesz(hmeblkp);
6052 6047
6053 6048 use_demap_range = ((dmrp == NULL) ||
6054 6049 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)));
6055 6050
6056 6051 if (use_demap_range) {
6057 6052 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr);
6058 6053 } else if (dmrp != NULL) {
6059 6054 DEMAP_RANGE_FLUSH(dmrp);
6060 6055 }
6061 6056 ttecnt = 0;
6062 6057 HBLKTOHME(sfhmep, hmeblkp, addr);
6063 6058
6064 6059 while (addr < endaddr) {
6065 6060 pml = NULL;
6066 6061 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6067 6062 if (TTE_IS_VALID(&tte)) {
6068 6063 pp = sfhmep->hme_page;
6069 6064 if (pp != NULL) {
6070 6065 pml = sfmmu_mlist_enter(pp);
6071 6066 }
6072 6067
6073 6068 /*
6074 6069 * Verify if hme still points to 'pp' now that
6075 6070 * we have p_mapping lock.
6076 6071 */
6077 6072 if (sfhmep->hme_page != pp) {
6078 6073 if (pp != NULL && sfhmep->hme_page != NULL) {
6079 6074 ASSERT(pml != NULL);
6080 6075 sfmmu_mlist_exit(pml);
6081 6076 /* Re-start this iteration. */
6082 6077 continue;
6083 6078 }
6084 6079 ASSERT((pp != NULL) &&
6085 6080 (sfhmep->hme_page == NULL));
6086 6081 goto tte_unloaded;
6087 6082 }
6088 6083
6089 6084 /*
6090 6085 * This point on we have both HASH and p_mapping
6091 6086 * lock.
6092 6087 */
6093 6088 ASSERT(pp == sfhmep->hme_page);
6094 6089 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6095 6090
6096 6091 /*
6097 6092 * We need to loop on modify tte because it is
6098 6093 * possible for pagesync to come along and
6099 6094 * change the software bits beneath us.
6100 6095 *
6101 6096 * Page_unload can also invalidate the tte after
6102 6097 * we read tte outside of p_mapping lock.
6103 6098 */
6104 6099 again:
6105 6100 ttemod = tte;
6106 6101
6107 6102 TTE_SET_INVALID(&ttemod);
6108 6103 ret = sfmmu_modifytte_try(&tte, &ttemod,
6109 6104 &sfhmep->hme_tte);
6110 6105
6111 6106 if (ret <= 0) {
6112 6107 if (TTE_IS_VALID(&tte)) {
6113 6108 ASSERT(ret < 0);
6114 6109 goto again;
6115 6110 }
6116 6111 if (pp != NULL) {
6117 6112 panic("sfmmu_hblk_unload: pp = 0x%p "
6118 6113 "tte became invalid under mlist"
6119 6114 " lock = 0x%p", (void *)pp,
6120 6115 (void *)pml);
6121 6116 }
6122 6117 continue;
6123 6118 }
6124 6119
6125 6120 if (!(flags & HAT_UNLOAD_NOSYNC)) {
6126 6121 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6127 6122 }
6128 6123
6129 6124 /*
6130 6125 * Ok- we invalidated the tte. Do the rest of the job.
6131 6126 */
6132 6127 ttecnt++;
6133 6128
6134 6129 if (flags & HAT_UNLOAD_UNLOCK) {
6135 6130 ASSERT(hmeblkp->hblk_lckcnt > 0);
6136 6131 atomic_dec_32(&hmeblkp->hblk_lckcnt);
6137 6132 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK);
6138 6133 }
6139 6134
6140 6135 /*
6141 6136 * Normally we would need to flush the page
6142 6137 * from the virtual cache at this point in
6143 6138 * order to prevent a potential cache alias
6144 6139 * inconsistency.
6145 6140 * The particular scenario we need to worry
6146 6141 * about is:
6147 6142 * Given: va1 and va2 are two virtual address
6148 6143 * that alias and map the same physical
6149 6144 * address.
6150 6145 * 1. mapping exists from va1 to pa and data
6151 6146 * has been read into the cache.
6152 6147 * 2. unload va1.
6153 6148 * 3. load va2 and modify data using va2.
6154 6149 * 4 unload va2.
6155 6150 * 5. load va1 and reference data. Unless we
6156 6151 * flush the data cache when we unload we will
6157 6152 * get stale data.
6158 6153 * Fortunately, page coloring eliminates the
6159 6154 * above scenario by remembering the color a
6160 6155 * physical page was last or is currently
6161 6156 * mapped to. Now, we delay the flush until
6162 6157 * the loading of translations. Only when the
6163 6158 * new translation is of a different color
6164 6159 * are we forced to flush.
6165 6160 */
6166 6161 if (use_demap_range) {
6167 6162 /*
6168 6163 * Mark this page as needing a demap.
6169 6164 */
6170 6165 DEMAP_RANGE_MARKPG(dmrp, addr);
6171 6166 } else {
6172 6167 ASSERT(sfmmup != NULL);
6173 6168 ASSERT(!hmeblkp->hblk_shared);
6174 6169 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
6175 6170 sfmmup->sfmmu_free, 0);
6176 6171 }
6177 6172
6178 6173 if (pp) {
6179 6174 /*
6180 6175 * Remove the hment from the mapping list
6181 6176 */
6182 6177 ASSERT(hmeblkp->hblk_hmecnt > 0);
6183 6178
6184 6179 /*
6185 6180 * Again, we cannot
6186 6181 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS);
6187 6182 */
6188 6183 HME_SUB(sfhmep, pp);
6189 6184 membar_stst();
6190 6185 atomic_dec_16(&hmeblkp->hblk_hmecnt);
6191 6186 }
6192 6187
6193 6188 ASSERT(hmeblkp->hblk_vcnt > 0);
6194 6189 atomic_dec_16(&hmeblkp->hblk_vcnt);
6195 6190
6196 6191 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
6197 6192 !hmeblkp->hblk_lckcnt);
6198 6193
6199 6194 #ifdef VAC
6200 6195 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) {
6201 6196 if (PP_ISTNC(pp)) {
6202 6197 /*
6203 6198 * If page was temporary
6204 6199 * uncached, try to recache
6205 6200 * it. Note that HME_SUB() was
6206 6201 * called above so p_index and
6207 6202 * mlist had been updated.
6208 6203 */
6209 6204 conv_tnc(pp, ttesz);
6210 6205 } else if (pp->p_mapping == NULL) {
6211 6206 ASSERT(kpm_enable);
6212 6207 /*
6213 6208 * Page is marked to be in VAC conflict
6214 6209 * to an existing kpm mapping and/or is
6215 6210 * kpm mapped using only the regular
6216 6211 * pagesize.
6217 6212 */
6218 6213 sfmmu_kpm_hme_unload(pp);
6219 6214 }
6220 6215 }
6221 6216 #endif /* VAC */
6222 6217 } else if ((pp = sfhmep->hme_page) != NULL) {
6223 6218 /*
6224 6219 * TTE is invalid but the hme
6225 6220 * still exists. let pageunload
6226 6221 * complete its job.
6227 6222 */
6228 6223 ASSERT(pml == NULL);
6229 6224 pml = sfmmu_mlist_enter(pp);
6230 6225 if (sfhmep->hme_page != NULL) {
6231 6226 sfmmu_mlist_exit(pml);
6232 6227 continue;
6233 6228 }
6234 6229 ASSERT(sfhmep->hme_page == NULL);
6235 6230 } else if (hmeblkp->hblk_hmecnt != 0) {
6236 6231 /*
6237 6232 * pageunload may have not finished decrementing
6238 6233 * hblk_vcnt and hblk_hmecnt. Find page_t if any and
6239 6234 * wait for pageunload to finish. Rely on pageunload
6240 6235 * to decrement hblk_hmecnt after hblk_vcnt.
6241 6236 */
6242 6237 pfn_t pfn = TTE_TO_TTEPFN(&tte);
6243 6238 ASSERT(pml == NULL);
6244 6239 if (pf_is_memory(pfn)) {
6245 6240 pp = page_numtopp_nolock(pfn);
6246 6241 if (pp != NULL) {
6247 6242 pml = sfmmu_mlist_enter(pp);
6248 6243 sfmmu_mlist_exit(pml);
6249 6244 pml = NULL;
6250 6245 }
6251 6246 }
6252 6247 }
6253 6248
6254 6249 tte_unloaded:
6255 6250 /*
6256 6251 * At this point, the tte we are looking at
6257 6252 * should be unloaded, and hme has been unlinked
6258 6253 * from page too. This is important because in
6259 6254 * pageunload, it does ttesync() then HME_SUB.
6260 6255 * We need to make sure HME_SUB has been completed
6261 6256 * so we know ttesync() has been completed. Otherwise,
6262 6257 * at exit time, after return from hat layer, VM will
6263 6258 * release as structure which hat_setstat() (called
6264 6259 * by ttesync()) needs.
6265 6260 */
6266 6261 #ifdef DEBUG
6267 6262 {
6268 6263 tte_t dtte;
6269 6264
6270 6265 ASSERT(sfhmep->hme_page == NULL);
6271 6266
6272 6267 sfmmu_copytte(&sfhmep->hme_tte, &dtte);
6273 6268 ASSERT(!TTE_IS_VALID(&dtte));
6274 6269 }
6275 6270 #endif
6276 6271
6277 6272 if (pml) {
6278 6273 sfmmu_mlist_exit(pml);
6279 6274 }
6280 6275
6281 6276 addr += TTEBYTES(ttesz);
6282 6277 sfhmep++;
6283 6278 DEMAP_RANGE_NEXTPG(dmrp);
6284 6279 }
6285 6280 /*
6286 6281 * For shared hmeblks this routine is only called when region is freed
6287 6282 * and no longer referenced. So no need to decrement ttecnt
6288 6283 * in the region structure here.
6289 6284 */
6290 6285 if (ttecnt > 0 && sfmmup != NULL) {
6291 6286 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt);
6292 6287 }
6293 6288 return (addr);
6294 6289 }
6295 6290
6296 6291 /*
6297 6292 * Invalidate a virtual address range for the local CPU.
6298 6293 * For best performance ensure that the va range is completely
6299 6294 * mapped, otherwise the entire TLB will be flushed.
6300 6295 */
6301 6296 void
6302 6297 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size)
6303 6298 {
6304 6299 ssize_t sz;
6305 6300 caddr_t endva = va + size;
6306 6301
6307 6302 while (va < endva) {
6308 6303 sz = hat_getpagesize(sfmmup, va);
6309 6304 if (sz < 0) {
6310 6305 vtag_flushall();
6311 6306 break;
6312 6307 }
6313 6308 vtag_flushpage(va, (uint64_t)sfmmup);
6314 6309 va += sz;
6315 6310 }
6316 6311 }
6317 6312
6318 6313 /*
6319 6314 * Synchronize all the mappings in the range [addr..addr+len).
6320 6315 * Can be called with clearflag having two states:
6321 6316 * HAT_SYNC_DONTZERO means just return the rm stats
6322 6317 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats
6323 6318 */
6324 6319 void
↓ open down ↓ |
597 lines elided |
↑ open up ↑ |
6325 6320 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6326 6321 {
6327 6322 struct hmehash_bucket *hmebp;
6328 6323 hmeblk_tag hblktag;
6329 6324 int hmeshift, hashno = 1;
6330 6325 struct hme_blk *hmeblkp, *list = NULL;
6331 6326 caddr_t endaddr;
6332 6327 cpuset_t cpuset;
6333 6328
6334 6329 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
6335 - ASSERT((sfmmup == ksfmmup) ||
6336 - AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
6330 + ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as));
6337 6331 ASSERT((len & MMU_PAGEOFFSET) == 0);
6338 6332 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6339 6333 (clearflag == HAT_SYNC_ZERORM));
6340 6334
6341 6335 CPUSET_ZERO(cpuset);
6342 6336
6343 6337 endaddr = addr + len;
6344 6338 hblktag.htag_id = sfmmup;
6345 6339 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
6346 6340
6347 6341 /*
6348 6342 * Spitfire supports 4 page sizes.
6349 6343 * Most pages are expected to be of the smallest page
6350 6344 * size (8K) and these will not need to be rehashed. 64K
6351 6345 * pages also don't need to be rehashed because the an hmeblk
6352 6346 * spans 64K of address space. 512K pages might need 1 rehash and
6353 6347 * and 4M pages 2 rehashes.
6354 6348 */
6355 6349 while (addr < endaddr) {
6356 6350 hmeshift = HME_HASH_SHIFT(hashno);
6357 6351 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift);
6358 6352 hblktag.htag_rehash = hashno;
6359 6353 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift);
6360 6354
6361 6355 SFMMU_HASH_LOCK(hmebp);
6362 6356
6363 6357 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list);
6364 6358 if (hmeblkp != NULL) {
6365 6359 ASSERT(!hmeblkp->hblk_shared);
6366 6360 /*
6367 6361 * We've encountered a shadow hmeblk so skip the range
6368 6362 * of the next smaller mapping size.
6369 6363 */
6370 6364 if (hmeblkp->hblk_shw_bit) {
6371 6365 ASSERT(sfmmup != ksfmmup);
6372 6366 ASSERT(hashno > 1);
6373 6367 addr = (caddr_t)P2END((uintptr_t)addr,
6374 6368 TTEBYTES(hashno - 1));
6375 6369 } else {
6376 6370 addr = sfmmu_hblk_sync(sfmmup, hmeblkp,
6377 6371 addr, endaddr, clearflag);
6378 6372 }
6379 6373 SFMMU_HASH_UNLOCK(hmebp);
6380 6374 hashno = 1;
6381 6375 continue;
6382 6376 }
6383 6377 SFMMU_HASH_UNLOCK(hmebp);
6384 6378
6385 6379 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) {
6386 6380 /*
6387 6381 * We have traversed the whole list and rehashed
6388 6382 * if necessary without finding the address to sync.
6389 6383 * This is ok so we increment the address by the
6390 6384 * smallest hmeblk range for kernel mappings and the
6391 6385 * largest hmeblk range, to account for shadow hmeblks,
6392 6386 * for user mappings and continue.
6393 6387 */
6394 6388 if (sfmmup == ksfmmup)
6395 6389 addr = (caddr_t)P2END((uintptr_t)addr,
6396 6390 TTEBYTES(1));
6397 6391 else
6398 6392 addr = (caddr_t)P2END((uintptr_t)addr,
6399 6393 TTEBYTES(hashno));
6400 6394 hashno = 1;
6401 6395 } else {
6402 6396 hashno++;
6403 6397 }
6404 6398 }
6405 6399 sfmmu_hblks_list_purge(&list, 0);
6406 6400 cpuset = sfmmup->sfmmu_cpusran;
6407 6401 xt_sync(cpuset);
6408 6402 }
6409 6403
6410 6404 static caddr_t
6411 6405 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6412 6406 caddr_t endaddr, int clearflag)
6413 6407 {
6414 6408 tte_t tte, ttemod;
6415 6409 struct sf_hment *sfhmep;
6416 6410 int ttesz;
6417 6411 struct page *pp;
6418 6412 kmutex_t *pml;
6419 6413 int ret;
6420 6414
6421 6415 ASSERT(hmeblkp->hblk_shw_bit == 0);
6422 6416 ASSERT(!hmeblkp->hblk_shared);
6423 6417
6424 6418 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp));
6425 6419
6426 6420 ttesz = get_hblk_ttesz(hmeblkp);
6427 6421 HBLKTOHME(sfhmep, hmeblkp, addr);
6428 6422
6429 6423 while (addr < endaddr) {
6430 6424 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6431 6425 if (TTE_IS_VALID(&tte)) {
6432 6426 pml = NULL;
6433 6427 pp = sfhmep->hme_page;
6434 6428 if (pp) {
6435 6429 pml = sfmmu_mlist_enter(pp);
6436 6430 }
6437 6431 if (pp != sfhmep->hme_page) {
6438 6432 /*
6439 6433 * tte most have been unloaded
6440 6434 * underneath us. Recheck
6441 6435 */
6442 6436 ASSERT(pml);
6443 6437 sfmmu_mlist_exit(pml);
6444 6438 continue;
6445 6439 }
6446 6440
6447 6441 ASSERT(pp == NULL || sfmmu_mlist_held(pp));
6448 6442
6449 6443 if (clearflag == HAT_SYNC_ZERORM) {
6450 6444 ttemod = tte;
6451 6445 TTE_CLR_RM(&ttemod);
6452 6446 ret = sfmmu_modifytte_try(&tte, &ttemod,
6453 6447 &sfhmep->hme_tte);
6454 6448 if (ret < 0) {
6455 6449 if (pml) {
6456 6450 sfmmu_mlist_exit(pml);
6457 6451 }
6458 6452 continue;
6459 6453 }
6460 6454
6461 6455 if (ret > 0) {
6462 6456 sfmmu_tlb_demap(addr, sfmmup,
6463 6457 hmeblkp, 0, 0);
6464 6458 }
6465 6459 }
6466 6460 sfmmu_ttesync(sfmmup, addr, &tte, pp);
6467 6461 if (pml) {
6468 6462 sfmmu_mlist_exit(pml);
6469 6463 }
6470 6464 }
6471 6465 addr += TTEBYTES(ttesz);
6472 6466 sfhmep++;
6473 6467 }
6474 6468 return (addr);
6475 6469 }
6476 6470
6477 6471 /*
6478 6472 * This function will sync a tte to the page struct and it will
6479 6473 * update the hat stats. Currently it allows us to pass a NULL pp
6480 6474 * and we will simply update the stats. We may want to change this
6481 6475 * so we only keep stats for pages backed by pp's.
6482 6476 */
6483 6477 static void
6484 6478 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6485 6479 {
6486 6480 uint_t rm = 0;
6487 6481 int sz;
6488 6482 pgcnt_t npgs;
6489 6483
6490 6484 ASSERT(TTE_IS_VALID(ttep));
6491 6485
6492 6486 if (TTE_IS_NOSYNC(ttep)) {
6493 6487 return;
6494 6488 }
6495 6489
6496 6490 if (TTE_IS_REF(ttep)) {
6497 6491 rm = P_REF;
6498 6492 }
6499 6493 if (TTE_IS_MOD(ttep)) {
6500 6494 rm |= P_MOD;
6501 6495 }
6502 6496
6503 6497 if (rm == 0) {
6504 6498 return;
6505 6499 }
6506 6500
6507 6501 sz = TTE_CSZ(ttep);
6508 6502 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) {
6509 6503 int i;
6510 6504 caddr_t vaddr = addr;
6511 6505
6512 6506 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) {
6513 6507 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm);
6514 6508 }
6515 6509
6516 6510 }
6517 6511
6518 6512 /*
6519 6513 * XXX I want to use cas to update nrm bits but they
6520 6514 * currently belong in common/vm and not in hat where
6521 6515 * they should be.
6522 6516 * The nrm bits are protected by the same mutex as
6523 6517 * the one that protects the page's mapping list.
6524 6518 */
6525 6519 if (!pp)
6526 6520 return;
6527 6521 ASSERT(sfmmu_mlist_held(pp));
6528 6522 /*
6529 6523 * If the tte is for a large page, we need to sync all the
6530 6524 * pages covered by the tte.
6531 6525 */
6532 6526 if (sz != TTE8K) {
6533 6527 ASSERT(pp->p_szc != 0);
6534 6528 pp = PP_GROUPLEADER(pp, sz);
6535 6529 ASSERT(sfmmu_mlist_held(pp));
6536 6530 }
6537 6531
6538 6532 /* Get number of pages from tte size. */
6539 6533 npgs = TTEPAGES(sz);
6540 6534
6541 6535 do {
6542 6536 ASSERT(pp);
6543 6537 ASSERT(sfmmu_mlist_held(pp));
6544 6538 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) ||
6545 6539 ((rm & P_MOD) != 0 && !PP_ISMOD(pp)))
6546 6540 hat_page_setattr(pp, rm);
6547 6541
6548 6542 /*
6549 6543 * Are we done? If not, we must have a large mapping.
6550 6544 * For large mappings we need to sync the rest of the pages
6551 6545 * covered by this tte; goto the next page.
6552 6546 */
6553 6547 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp)));
6554 6548 }
6555 6549
6556 6550 /*
6557 6551 * Execute pre-callback handler of each pa_hment linked to pp
6558 6552 *
6559 6553 * Inputs:
6560 6554 * flag: either HAT_PRESUSPEND or HAT_SUSPEND.
6561 6555 * capture_cpus: pointer to return value (below)
6562 6556 *
6563 6557 * Returns:
6564 6558 * Propagates the subsystem callback return values back to the caller;
6565 6559 * returns 0 on success. If capture_cpus is non-NULL, the value returned
6566 6560 * is zero if all of the pa_hments are of a type that do not require
6567 6561 * capturing CPUs prior to suspending the mapping, else it is 1.
6568 6562 */
6569 6563 static int
6570 6564 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus)
6571 6565 {
6572 6566 struct sf_hment *sfhmep;
6573 6567 struct pa_hment *pahmep;
6574 6568 int (*f)(caddr_t, uint_t, uint_t, void *);
6575 6569 int ret;
6576 6570 id_t id;
6577 6571 int locked = 0;
6578 6572 kmutex_t *pml;
6579 6573
6580 6574 ASSERT(PAGE_EXCL(pp));
6581 6575 if (!sfmmu_mlist_held(pp)) {
6582 6576 pml = sfmmu_mlist_enter(pp);
6583 6577 locked = 1;
6584 6578 }
6585 6579
6586 6580 if (capture_cpus)
6587 6581 *capture_cpus = 0;
6588 6582
6589 6583 top:
6590 6584 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6591 6585 /*
6592 6586 * skip sf_hments corresponding to VA<->PA mappings;
6593 6587 * for pa_hment's, hme_tte.ll is zero
6594 6588 */
6595 6589 if (!IS_PAHME(sfhmep))
6596 6590 continue;
6597 6591
6598 6592 pahmep = sfhmep->hme_data;
6599 6593 ASSERT(pahmep != NULL);
6600 6594
6601 6595 /*
6602 6596 * skip if pre-handler has been called earlier in this loop
6603 6597 */
6604 6598 if (pahmep->flags & flag)
6605 6599 continue;
6606 6600
6607 6601 id = pahmep->cb_id;
6608 6602 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6609 6603 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0)
6610 6604 *capture_cpus = 1;
6611 6605 if ((f = sfmmu_cb_table[id].prehandler) == NULL) {
6612 6606 pahmep->flags |= flag;
6613 6607 continue;
6614 6608 }
6615 6609
6616 6610 /*
6617 6611 * Drop the mapping list lock to avoid locking order issues.
6618 6612 */
6619 6613 if (locked)
6620 6614 sfmmu_mlist_exit(pml);
6621 6615
6622 6616 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt);
6623 6617 if (ret != 0)
6624 6618 return (ret); /* caller must do the cleanup */
6625 6619
6626 6620 if (locked) {
6627 6621 pml = sfmmu_mlist_enter(pp);
6628 6622 pahmep->flags |= flag;
6629 6623 goto top;
6630 6624 }
6631 6625
6632 6626 pahmep->flags |= flag;
6633 6627 }
6634 6628
6635 6629 if (locked)
6636 6630 sfmmu_mlist_exit(pml);
6637 6631
6638 6632 return (0);
6639 6633 }
6640 6634
6641 6635 /*
6642 6636 * Execute post-callback handler of each pa_hment linked to pp
6643 6637 *
6644 6638 * Same overall assumptions and restrictions apply as for
6645 6639 * hat_pageprocess_precallbacks().
6646 6640 */
6647 6641 static void
6648 6642 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag)
6649 6643 {
6650 6644 pfn_t pgpfn = pp->p_pagenum;
6651 6645 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1;
6652 6646 pfn_t newpfn;
6653 6647 struct sf_hment *sfhmep;
6654 6648 struct pa_hment *pahmep;
6655 6649 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t);
6656 6650 id_t id;
6657 6651 int locked = 0;
6658 6652 kmutex_t *pml;
6659 6653
6660 6654 ASSERT(PAGE_EXCL(pp));
6661 6655 if (!sfmmu_mlist_held(pp)) {
6662 6656 pml = sfmmu_mlist_enter(pp);
6663 6657 locked = 1;
6664 6658 }
6665 6659
6666 6660 top:
6667 6661 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6668 6662 /*
6669 6663 * skip sf_hments corresponding to VA<->PA mappings;
6670 6664 * for pa_hment's, hme_tte.ll is zero
6671 6665 */
6672 6666 if (!IS_PAHME(sfhmep))
6673 6667 continue;
6674 6668
6675 6669 pahmep = sfhmep->hme_data;
6676 6670 ASSERT(pahmep != NULL);
6677 6671
6678 6672 if ((pahmep->flags & flag) == 0)
6679 6673 continue;
6680 6674
6681 6675 pahmep->flags &= ~flag;
6682 6676
6683 6677 id = pahmep->cb_id;
6684 6678 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid);
6685 6679 if ((f = sfmmu_cb_table[id].posthandler) == NULL)
6686 6680 continue;
6687 6681
6688 6682 /*
6689 6683 * Convert the base page PFN into the constituent PFN
6690 6684 * which is needed by the callback handler.
6691 6685 */
6692 6686 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask);
6693 6687
6694 6688 /*
6695 6689 * Drop the mapping list lock to avoid locking order issues.
6696 6690 */
6697 6691 if (locked)
6698 6692 sfmmu_mlist_exit(pml);
6699 6693
6700 6694 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn)
6701 6695 != 0)
6702 6696 panic("sfmmu: posthandler failed");
6703 6697
6704 6698 if (locked) {
6705 6699 pml = sfmmu_mlist_enter(pp);
6706 6700 goto top;
6707 6701 }
6708 6702 }
6709 6703
6710 6704 if (locked)
6711 6705 sfmmu_mlist_exit(pml);
6712 6706 }
6713 6707
6714 6708 /*
6715 6709 * Suspend locked kernel mapping
6716 6710 */
6717 6711 void
6718 6712 hat_pagesuspend(struct page *pp)
6719 6713 {
6720 6714 struct sf_hment *sfhmep;
6721 6715 sfmmu_t *sfmmup;
6722 6716 tte_t tte, ttemod;
6723 6717 struct hme_blk *hmeblkp;
6724 6718 caddr_t addr;
6725 6719 int index, cons;
6726 6720 cpuset_t cpuset;
6727 6721
6728 6722 ASSERT(PAGE_EXCL(pp));
6729 6723 ASSERT(sfmmu_mlist_held(pp));
6730 6724
6731 6725 mutex_enter(&kpr_suspendlock);
6732 6726
6733 6727 /*
6734 6728 * We're about to suspend a kernel mapping so mark this thread as
6735 6729 * non-traceable by DTrace. This prevents us from running into issues
6736 6730 * with probe context trying to touch a suspended page
6737 6731 * in the relocation codepath itself.
6738 6732 */
6739 6733 curthread->t_flag |= T_DONTDTRACE;
6740 6734
6741 6735 index = PP_MAPINDEX(pp);
6742 6736 cons = TTE8K;
6743 6737
6744 6738 retry:
6745 6739 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
6746 6740
6747 6741 if (IS_PAHME(sfhmep))
6748 6742 continue;
6749 6743
6750 6744 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons)
6751 6745 continue;
6752 6746
6753 6747 /*
6754 6748 * Loop until we successfully set the suspend bit in
6755 6749 * the TTE.
6756 6750 */
6757 6751 again:
6758 6752 sfmmu_copytte(&sfhmep->hme_tte, &tte);
6759 6753 ASSERT(TTE_IS_VALID(&tte));
6760 6754
6761 6755 ttemod = tte;
6762 6756 TTE_SET_SUSPEND(&ttemod);
6763 6757 if (sfmmu_modifytte_try(&tte, &ttemod,
6764 6758 &sfhmep->hme_tte) < 0)
6765 6759 goto again;
6766 6760
6767 6761 /*
6768 6762 * Invalidate TSB entry
6769 6763 */
6770 6764 hmeblkp = sfmmu_hmetohblk(sfhmep);
6771 6765
6772 6766 sfmmup = hblktosfmmu(hmeblkp);
6773 6767 ASSERT(sfmmup == ksfmmup);
6774 6768 ASSERT(!hmeblkp->hblk_shared);
6775 6769
6776 6770 addr = tte_to_vaddr(hmeblkp, tte);
6777 6771
6778 6772 /*
6779 6773 * No need to make sure that the TSB for this sfmmu is
6780 6774 * not being relocated since it is ksfmmup and thus it
6781 6775 * will never be relocated.
6782 6776 */
6783 6777 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
6784 6778
6785 6779 /*
6786 6780 * Update xcall stats
6787 6781 */
6788 6782 cpuset = cpu_ready_set;
6789 6783 CPUSET_DEL(cpuset, CPU->cpu_id);
6790 6784
6791 6785 /* LINTED: constant in conditional context */
6792 6786 SFMMU_XCALL_STATS(ksfmmup);
6793 6787
6794 6788 /*
6795 6789 * Flush TLB entry on remote CPU's
6796 6790 */
6797 6791 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
6798 6792 (uint64_t)ksfmmup);
6799 6793 xt_sync(cpuset);
6800 6794
6801 6795 /*
6802 6796 * Flush TLB entry on local CPU
6803 6797 */
6804 6798 vtag_flushpage(addr, (uint64_t)ksfmmup);
6805 6799 }
6806 6800
6807 6801 while (index != 0) {
6808 6802 index = index >> 1;
6809 6803 if (index != 0)
6810 6804 cons++;
6811 6805 if (index & 0x1) {
6812 6806 pp = PP_GROUPLEADER(pp, cons);
6813 6807 goto retry;
6814 6808 }
6815 6809 }
6816 6810 }
6817 6811
6818 6812 #ifdef DEBUG
6819 6813
6820 6814 #define N_PRLE 1024
6821 6815 struct prle {
6822 6816 page_t *targ;
6823 6817 page_t *repl;
6824 6818 int status;
6825 6819 int pausecpus;
6826 6820 hrtime_t whence;
6827 6821 };
6828 6822
6829 6823 static struct prle page_relocate_log[N_PRLE];
6830 6824 static int prl_entry;
6831 6825 static kmutex_t prl_mutex;
6832 6826
6833 6827 #define PAGE_RELOCATE_LOG(t, r, s, p) \
6834 6828 mutex_enter(&prl_mutex); \
6835 6829 page_relocate_log[prl_entry].targ = *(t); \
6836 6830 page_relocate_log[prl_entry].repl = *(r); \
6837 6831 page_relocate_log[prl_entry].status = (s); \
6838 6832 page_relocate_log[prl_entry].pausecpus = (p); \
6839 6833 page_relocate_log[prl_entry].whence = gethrtime(); \
6840 6834 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \
6841 6835 mutex_exit(&prl_mutex);
6842 6836
6843 6837 #else /* !DEBUG */
6844 6838 #define PAGE_RELOCATE_LOG(t, r, s, p)
6845 6839 #endif
6846 6840
6847 6841 /*
6848 6842 * Core Kernel Page Relocation Algorithm
6849 6843 *
6850 6844 * Input:
6851 6845 *
6852 6846 * target : constituent pages are SE_EXCL locked.
6853 6847 * replacement: constituent pages are SE_EXCL locked.
6854 6848 *
6855 6849 * Output:
6856 6850 *
6857 6851 * nrelocp: number of pages relocated
6858 6852 */
6859 6853 int
6860 6854 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6861 6855 {
6862 6856 page_t *targ, *repl;
6863 6857 page_t *tpp, *rpp;
6864 6858 kmutex_t *low, *high;
6865 6859 spgcnt_t npages, i;
6866 6860 page_t *pl = NULL;
6867 6861 int old_pil;
6868 6862 cpuset_t cpuset;
6869 6863 int cap_cpus;
6870 6864 int ret;
6871 6865 #ifdef VAC
6872 6866 int cflags = 0;
6873 6867 #endif
6874 6868
6875 6869 if (!kcage_on || PP_ISNORELOC(*target)) {
6876 6870 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1);
6877 6871 return (EAGAIN);
6878 6872 }
6879 6873
6880 6874 mutex_enter(&kpr_mutex);
6881 6875 kreloc_thread = curthread;
6882 6876
6883 6877 targ = *target;
6884 6878 repl = *replacement;
6885 6879 ASSERT(repl != NULL);
6886 6880 ASSERT(targ->p_szc == repl->p_szc);
6887 6881
6888 6882 npages = page_get_pagecnt(targ->p_szc);
6889 6883
6890 6884 /*
6891 6885 * unload VA<->PA mappings that are not locked
6892 6886 */
6893 6887 tpp = targ;
6894 6888 for (i = 0; i < npages; i++) {
6895 6889 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC);
6896 6890 tpp++;
6897 6891 }
6898 6892
6899 6893 /*
6900 6894 * Do "presuspend" callbacks, in a context from which we can still
6901 6895 * block as needed. Note that we don't hold the mapping list lock
6902 6896 * of "targ" at this point due to potential locking order issues;
6903 6897 * we assume that between the hat_pageunload() above and holding
6904 6898 * the SE_EXCL lock that the mapping list *cannot* change at this
6905 6899 * point.
6906 6900 */
6907 6901 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus);
6908 6902 if (ret != 0) {
6909 6903 /*
6910 6904 * EIO translates to fatal error, for all others cleanup
6911 6905 * and return EAGAIN.
6912 6906 */
6913 6907 ASSERT(ret != EIO);
6914 6908 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND);
6915 6909 PAGE_RELOCATE_LOG(target, replacement, ret, -1);
6916 6910 kreloc_thread = NULL;
6917 6911 mutex_exit(&kpr_mutex);
6918 6912 return (EAGAIN);
6919 6913 }
6920 6914
6921 6915 /*
6922 6916 * acquire p_mapping list lock for both the target and replacement
6923 6917 * root pages.
6924 6918 *
6925 6919 * low and high refer to the need to grab the mlist locks in a
6926 6920 * specific order in order to prevent race conditions. Thus the
6927 6921 * lower lock must be grabbed before the higher lock.
6928 6922 *
6929 6923 * This will block hat_unload's accessing p_mapping list. Since
6930 6924 * we have SE_EXCL lock, hat_memload and hat_pageunload will be
6931 6925 * blocked. Thus, no one else will be accessing the p_mapping list
6932 6926 * while we suspend and reload the locked mapping below.
6933 6927 */
6934 6928 tpp = targ;
6935 6929 rpp = repl;
6936 6930 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high);
6937 6931
6938 6932 kpreempt_disable();
6939 6933
6940 6934 /*
6941 6935 * We raise our PIL to 13 so that we don't get captured by
6942 6936 * another CPU or pinned by an interrupt thread. We can't go to
6943 6937 * PIL 14 since the nexus driver(s) may need to interrupt at
6944 6938 * that level in the case of IOMMU pseudo mappings.
6945 6939 */
6946 6940 cpuset = cpu_ready_set;
6947 6941 CPUSET_DEL(cpuset, CPU->cpu_id);
6948 6942 if (!cap_cpus || CPUSET_ISNULL(cpuset)) {
6949 6943 old_pil = splr(XCALL_PIL);
6950 6944 } else {
6951 6945 old_pil = -1;
6952 6946 xc_attention(cpuset);
6953 6947 }
6954 6948 ASSERT(getpil() == XCALL_PIL);
6955 6949
6956 6950 /*
6957 6951 * Now do suspend callbacks. In the case of an IOMMU mapping
6958 6952 * this will suspend all DMA activity to the page while it is
6959 6953 * being relocated. Since we are well above LOCK_LEVEL and CPUs
6960 6954 * may be captured at this point we should have acquired any needed
6961 6955 * locks in the presuspend callback.
6962 6956 */
6963 6957 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL);
6964 6958 if (ret != 0) {
6965 6959 repl = targ;
6966 6960 goto suspend_fail;
6967 6961 }
6968 6962
6969 6963 /*
6970 6964 * Raise the PIL yet again, this time to block all high-level
6971 6965 * interrupts on this CPU. This is necessary to prevent an
6972 6966 * interrupt routine from pinning the thread which holds the
6973 6967 * mapping suspended and then touching the suspended page.
6974 6968 *
6975 6969 * Once the page is suspended we also need to be careful to
6976 6970 * avoid calling any functions which touch any seg_kmem memory
6977 6971 * since that memory may be backed by the very page we are
6978 6972 * relocating in here!
6979 6973 */
6980 6974 hat_pagesuspend(targ);
6981 6975
6982 6976 /*
6983 6977 * Now that we are confident everybody has stopped using this page,
6984 6978 * copy the page contents. Note we use a physical copy to prevent
6985 6979 * locking issues and to avoid fpRAS because we can't handle it in
6986 6980 * this context.
6987 6981 */
6988 6982 for (i = 0; i < npages; i++, tpp++, rpp++) {
6989 6983 #ifdef VAC
6990 6984 /*
6991 6985 * If the replacement has a different vcolor than
6992 6986 * the one being replacd, we need to handle VAC
6993 6987 * consistency for it just as we were setting up
6994 6988 * a new mapping to it.
6995 6989 */
6996 6990 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) &&
6997 6991 (tpp->p_vcolor != rpp->p_vcolor) &&
6998 6992 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) {
6999 6993 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp));
7000 6994 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp),
7001 6995 rpp->p_pagenum);
7002 6996 }
7003 6997 #endif
7004 6998 /*
7005 6999 * Copy the contents of the page.
7006 7000 */
7007 7001 ppcopy_kernel(tpp, rpp);
7008 7002 }
7009 7003
7010 7004 tpp = targ;
7011 7005 rpp = repl;
7012 7006 for (i = 0; i < npages; i++, tpp++, rpp++) {
7013 7007 /*
7014 7008 * Copy attributes. VAC consistency was handled above,
7015 7009 * if required.
7016 7010 */
7017 7011 rpp->p_nrm = tpp->p_nrm;
7018 7012 tpp->p_nrm = 0;
7019 7013 rpp->p_index = tpp->p_index;
7020 7014 tpp->p_index = 0;
7021 7015 #ifdef VAC
7022 7016 rpp->p_vcolor = tpp->p_vcolor;
7023 7017 #endif
7024 7018 }
7025 7019
7026 7020 /*
7027 7021 * First, unsuspend the page, if we set the suspend bit, and transfer
7028 7022 * the mapping list from the target page to the replacement page.
7029 7023 * Next process postcallbacks; since pa_hment's are linked only to the
7030 7024 * p_mapping list of root page, we don't iterate over the constituent
7031 7025 * pages.
7032 7026 */
7033 7027 hat_pagereload(targ, repl);
7034 7028
7035 7029 suspend_fail:
7036 7030 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND);
7037 7031
7038 7032 /*
7039 7033 * Now lower our PIL and release any captured CPUs since we
7040 7034 * are out of the "danger zone". After this it will again be
7041 7035 * safe to acquire adaptive mutex locks, or to drop them...
7042 7036 */
7043 7037 if (old_pil != -1) {
7044 7038 splx(old_pil);
7045 7039 } else {
7046 7040 xc_dismissed(cpuset);
7047 7041 }
7048 7042
7049 7043 kpreempt_enable();
7050 7044
7051 7045 sfmmu_mlist_reloc_exit(low, high);
7052 7046
7053 7047 /*
7054 7048 * Postsuspend callbacks should drop any locks held across
7055 7049 * the suspend callbacks. As before, we don't hold the mapping
7056 7050 * list lock at this point.. our assumption is that the mapping
7057 7051 * list still can't change due to our holding SE_EXCL lock and
7058 7052 * there being no unlocked mappings left. Hence the restriction
7059 7053 * on calling context to hat_delete_callback()
7060 7054 */
7061 7055 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND);
7062 7056 if (ret != 0) {
7063 7057 /*
7064 7058 * The second presuspend call failed: we got here through
7065 7059 * the suspend_fail label above.
7066 7060 */
7067 7061 ASSERT(ret != EIO);
7068 7062 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus);
7069 7063 kreloc_thread = NULL;
7070 7064 mutex_exit(&kpr_mutex);
7071 7065 return (EAGAIN);
7072 7066 }
7073 7067
7074 7068 /*
7075 7069 * Now that we're out of the performance critical section we can
7076 7070 * take care of updating the hash table, since we still
7077 7071 * hold all the pages locked SE_EXCL at this point we
7078 7072 * needn't worry about things changing out from under us.
7079 7073 */
7080 7074 tpp = targ;
7081 7075 rpp = repl;
7082 7076 for (i = 0; i < npages; i++, tpp++, rpp++) {
7083 7077
7084 7078 /*
7085 7079 * replace targ with replacement in page_hash table
7086 7080 */
7087 7081 targ = tpp;
7088 7082 page_relocate_hash(rpp, targ);
7089 7083
7090 7084 /*
7091 7085 * concatenate target; caller of platform_page_relocate()
7092 7086 * expects target to be concatenated after returning.
7093 7087 */
7094 7088 ASSERT(targ->p_next == targ);
7095 7089 ASSERT(targ->p_prev == targ);
7096 7090 page_list_concat(&pl, &targ);
7097 7091 }
7098 7092
7099 7093 ASSERT(*target == pl);
7100 7094 *nrelocp = npages;
7101 7095 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus);
7102 7096 kreloc_thread = NULL;
7103 7097 mutex_exit(&kpr_mutex);
7104 7098 return (0);
7105 7099 }
7106 7100
7107 7101 /*
7108 7102 * Called when stray pa_hments are found attached to a page which is
7109 7103 * being freed. Notify the subsystem which attached the pa_hment of
7110 7104 * the error if it registered a suitable handler, else panic.
7111 7105 */
7112 7106 static void
7113 7107 sfmmu_pahment_leaked(struct pa_hment *pahmep)
7114 7108 {
7115 7109 id_t cb_id = pahmep->cb_id;
7116 7110
7117 7111 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid);
7118 7112 if (sfmmu_cb_table[cb_id].errhandler != NULL) {
7119 7113 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len,
7120 7114 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0)
7121 7115 return; /* non-fatal */
7122 7116 }
7123 7117 panic("pa_hment leaked: 0x%p", (void *)pahmep);
7124 7118 }
7125 7119
7126 7120 /*
7127 7121 * Remove all mappings to page 'pp'.
7128 7122 */
7129 7123 int
7130 7124 hat_pageunload(struct page *pp, uint_t forceflag)
7131 7125 {
7132 7126 struct page *origpp = pp;
7133 7127 struct sf_hment *sfhme, *tmphme;
7134 7128 struct hme_blk *hmeblkp;
7135 7129 kmutex_t *pml;
7136 7130 #ifdef VAC
7137 7131 kmutex_t *pmtx;
7138 7132 #endif
7139 7133 cpuset_t cpuset, tset;
7140 7134 int index, cons;
7141 7135 int xhme_blks;
7142 7136 int pa_hments;
7143 7137
7144 7138 ASSERT(PAGE_EXCL(pp));
7145 7139
7146 7140 retry_xhat:
7147 7141 tmphme = NULL;
7148 7142 xhme_blks = 0;
7149 7143 pa_hments = 0;
7150 7144 CPUSET_ZERO(cpuset);
7151 7145
7152 7146 pml = sfmmu_mlist_enter(pp);
7153 7147
7154 7148 #ifdef VAC
7155 7149 if (pp->p_kpmref)
7156 7150 sfmmu_kpm_pageunload(pp);
7157 7151 ASSERT(!PP_ISMAPPED_KPM(pp));
7158 7152 #endif
7159 7153 /*
7160 7154 * Clear vpm reference. Since the page is exclusively locked
7161 7155 * vpm cannot be referencing it.
7162 7156 */
7163 7157 if (vpm_enable) {
7164 7158 pp->p_vpmref = 0;
7165 7159 }
7166 7160
7167 7161 index = PP_MAPINDEX(pp);
7168 7162 cons = TTE8K;
7169 7163 retry:
7170 7164 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7171 7165 tmphme = sfhme->hme_next;
7172 7166
7173 7167 if (IS_PAHME(sfhme)) {
7174 7168 ASSERT(sfhme->hme_data != NULL);
7175 7169 pa_hments++;
7176 7170 continue;
7177 7171 }
7178 7172
7179 7173 hmeblkp = sfmmu_hmetohblk(sfhme);
7180 7174 if (hmeblkp->hblk_xhat_bit) {
7181 7175 struct xhat_hme_blk *xblk =
7182 7176 (struct xhat_hme_blk *)hmeblkp;
7183 7177
7184 7178 (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
7185 7179 pp, forceflag, XBLK2PROVBLK(xblk));
7186 7180
7187 7181 xhme_blks = 1;
7188 7182 continue;
7189 7183 }
7190 7184
7191 7185 /*
7192 7186 * If there are kernel mappings don't unload them, they will
7193 7187 * be suspended.
7194 7188 */
7195 7189 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7196 7190 hmeblkp->hblk_tag.htag_id == ksfmmup)
7197 7191 continue;
7198 7192
7199 7193 tset = sfmmu_pageunload(pp, sfhme, cons);
7200 7194 CPUSET_OR(cpuset, tset);
7201 7195 }
7202 7196
7203 7197 while (index != 0) {
7204 7198 index = index >> 1;
7205 7199 if (index != 0)
7206 7200 cons++;
7207 7201 if (index & 0x1) {
7208 7202 /* Go to leading page */
7209 7203 pp = PP_GROUPLEADER(pp, cons);
7210 7204 ASSERT(sfmmu_mlist_held(pp));
7211 7205 goto retry;
7212 7206 }
7213 7207 }
7214 7208
7215 7209 /*
7216 7210 * cpuset may be empty if the page was only mapped by segkpm,
7217 7211 * in which case we won't actually cross-trap.
7218 7212 */
7219 7213 xt_sync(cpuset);
7220 7214
7221 7215 /*
7222 7216 * The page should have no mappings at this point, unless
7223 7217 * we were called from hat_page_relocate() in which case we
7224 7218 * leave the locked mappings which will be suspended later.
7225 7219 */
7226 7220 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
7227 7221 (forceflag == SFMMU_KERNEL_RELOC));
7228 7222
7229 7223 #ifdef VAC
7230 7224 if (PP_ISTNC(pp)) {
7231 7225 if (cons == TTE8K) {
7232 7226 pmtx = sfmmu_page_enter(pp);
7233 7227 PP_CLRTNC(pp);
7234 7228 sfmmu_page_exit(pmtx);
7235 7229 } else {
7236 7230 conv_tnc(pp, cons);
7237 7231 }
7238 7232 }
7239 7233 #endif /* VAC */
7240 7234
7241 7235 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) {
7242 7236 /*
7243 7237 * Unlink any pa_hments and free them, calling back
7244 7238 * the responsible subsystem to notify it of the error.
7245 7239 * This can occur in situations such as drivers leaking
7246 7240 * DMA handles: naughty, but common enough that we'd like
7247 7241 * to keep the system running rather than bringing it
7248 7242 * down with an obscure error like "pa_hment leaked"
7249 7243 * which doesn't aid the user in debugging their driver.
7250 7244 */
7251 7245 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7252 7246 tmphme = sfhme->hme_next;
7253 7247 if (IS_PAHME(sfhme)) {
7254 7248 struct pa_hment *pahmep = sfhme->hme_data;
7255 7249 sfmmu_pahment_leaked(pahmep);
7256 7250 HME_SUB(sfhme, pp);
7257 7251 kmem_cache_free(pa_hment_cache, pahmep);
7258 7252 }
7259 7253 }
7260 7254
7261 7255 ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
7262 7256 }
7263 7257
7264 7258 sfmmu_mlist_exit(pml);
7265 7259
7266 7260 /*
7267 7261 * XHAT may not have finished unloading pages
7268 7262 * because some other thread was waiting for
7269 7263 * mlist lock and XHAT_PAGEUNLOAD let it do
7270 7264 * the job.
7271 7265 */
7272 7266 if (xhme_blks) {
7273 7267 pp = origpp;
7274 7268 goto retry_xhat;
7275 7269 }
7276 7270
7277 7271 return (0);
7278 7272 }
7279 7273
7280 7274 cpuset_t
7281 7275 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7282 7276 {
7283 7277 struct hme_blk *hmeblkp;
7284 7278 sfmmu_t *sfmmup;
7285 7279 tte_t tte, ttemod;
7286 7280 #ifdef DEBUG
7287 7281 tte_t orig_old;
7288 7282 #endif /* DEBUG */
7289 7283 caddr_t addr;
7290 7284 int ttesz;
7291 7285 int ret;
7292 7286 cpuset_t cpuset;
7293 7287
7294 7288 ASSERT(pp != NULL);
7295 7289 ASSERT(sfmmu_mlist_held(pp));
7296 7290 ASSERT(!PP_ISKAS(pp));
7297 7291
7298 7292 CPUSET_ZERO(cpuset);
7299 7293
7300 7294 hmeblkp = sfmmu_hmetohblk(sfhme);
7301 7295
7302 7296 readtte:
7303 7297 sfmmu_copytte(&sfhme->hme_tte, &tte);
7304 7298 if (TTE_IS_VALID(&tte)) {
7305 7299 sfmmup = hblktosfmmu(hmeblkp);
7306 7300 ttesz = get_hblk_ttesz(hmeblkp);
7307 7301 /*
7308 7302 * Only unload mappings of 'cons' size.
7309 7303 */
7310 7304 if (ttesz != cons)
7311 7305 return (cpuset);
7312 7306
7313 7307 /*
7314 7308 * Note that we have p_mapping lock, but no hash lock here.
7315 7309 * hblk_unload() has to have both hash lock AND p_mapping
7316 7310 * lock before it tries to modify tte. So, the tte could
7317 7311 * not become invalid in the sfmmu_modifytte_try() below.
7318 7312 */
7319 7313 ttemod = tte;
7320 7314 #ifdef DEBUG
7321 7315 orig_old = tte;
7322 7316 #endif /* DEBUG */
7323 7317
7324 7318 TTE_SET_INVALID(&ttemod);
7325 7319 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7326 7320 if (ret < 0) {
7327 7321 #ifdef DEBUG
7328 7322 /* only R/M bits can change. */
7329 7323 chk_tte(&orig_old, &tte, &ttemod, hmeblkp);
7330 7324 #endif /* DEBUG */
7331 7325 goto readtte;
7332 7326 }
7333 7327
7334 7328 if (ret == 0) {
7335 7329 panic("pageunload: cas failed?");
7336 7330 }
7337 7331
7338 7332 addr = tte_to_vaddr(hmeblkp, tte);
7339 7333
7340 7334 if (hmeblkp->hblk_shared) {
7341 7335 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7342 7336 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7343 7337 sf_region_t *rgnp;
7344 7338 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7345 7339 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7346 7340 ASSERT(srdp != NULL);
7347 7341 rgnp = srdp->srd_hmergnp[rid];
7348 7342 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
7349 7343 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1);
7350 7344 sfmmu_ttesync(NULL, addr, &tte, pp);
7351 7345 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0);
7352 7346 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]);
7353 7347 } else {
7354 7348 sfmmu_ttesync(sfmmup, addr, &tte, pp);
7355 7349 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]);
7356 7350
7357 7351 /*
7358 7352 * We need to flush the page from the virtual cache
7359 7353 * in order to prevent a virtual cache alias
7360 7354 * inconsistency. The particular scenario we need
7361 7355 * to worry about is:
7362 7356 * Given: va1 and va2 are two virtual address that
7363 7357 * alias and will map the same physical address.
7364 7358 * 1. mapping exists from va1 to pa and data has
7365 7359 * been read into the cache.
7366 7360 * 2. unload va1.
7367 7361 * 3. load va2 and modify data using va2.
7368 7362 * 4 unload va2.
7369 7363 * 5. load va1 and reference data. Unless we flush
7370 7364 * the data cache when we unload we will get
7371 7365 * stale data.
7372 7366 * This scenario is taken care of by using virtual
7373 7367 * page coloring.
7374 7368 */
7375 7369 if (sfmmup->sfmmu_ismhat) {
7376 7370 /*
7377 7371 * Flush TSBs, TLBs and caches
7378 7372 * of every process
7379 7373 * sharing this ism segment.
7380 7374 */
7381 7375 sfmmu_hat_lock_all();
7382 7376 mutex_enter(&ism_mlist_lock);
7383 7377 kpreempt_disable();
7384 7378 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp,
7385 7379 pp->p_pagenum, CACHE_NO_FLUSH);
7386 7380 kpreempt_enable();
7387 7381 mutex_exit(&ism_mlist_lock);
7388 7382 sfmmu_hat_unlock_all();
7389 7383 cpuset = cpu_ready_set;
7390 7384 } else {
7391 7385 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7392 7386 cpuset = sfmmup->sfmmu_cpusran;
7393 7387 }
7394 7388 }
7395 7389
7396 7390 /*
7397 7391 * Hme_sub has to run after ttesync() and a_rss update.
7398 7392 * See hblk_unload().
7399 7393 */
7400 7394 HME_SUB(sfhme, pp);
7401 7395 membar_stst();
7402 7396
7403 7397 /*
7404 7398 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS)
7405 7399 * since pteload may have done a HME_ADD() right after
7406 7400 * we did the HME_SUB() above. Hmecnt is now maintained
7407 7401 * by cas only. no lock guranteed its value. The only
7408 7402 * gurantee we have is the hmecnt should not be less than
7409 7403 * what it should be so the hblk will not be taken away.
7410 7404 * It's also important that we decremented the hmecnt after
7411 7405 * we are done with hmeblkp so that this hmeblk won't be
7412 7406 * stolen.
7413 7407 */
7414 7408 ASSERT(hmeblkp->hblk_hmecnt > 0);
7415 7409 ASSERT(hmeblkp->hblk_vcnt > 0);
7416 7410 atomic_dec_16(&hmeblkp->hblk_vcnt);
7417 7411 atomic_dec_16(&hmeblkp->hblk_hmecnt);
7418 7412 /*
7419 7413 * This is bug 4063182.
7420 7414 * XXX: fixme
7421 7415 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt ||
7422 7416 * !hmeblkp->hblk_lckcnt);
7423 7417 */
7424 7418 } else {
7425 7419 panic("invalid tte? pp %p &tte %p",
7426 7420 (void *)pp, (void *)&tte);
7427 7421 }
7428 7422
7429 7423 return (cpuset);
7430 7424 }
7431 7425
7432 7426 /*
7433 7427 * While relocating a kernel page, this function will move the mappings
7434 7428 * from tpp to dpp and modify any associated data with these mappings.
7435 7429 * It also unsuspends the suspended kernel mapping.
7436 7430 */
7437 7431 static void
7438 7432 hat_pagereload(struct page *tpp, struct page *dpp)
7439 7433 {
7440 7434 struct sf_hment *sfhme;
7441 7435 tte_t tte, ttemod;
7442 7436 int index, cons;
7443 7437
7444 7438 ASSERT(getpil() == PIL_MAX);
7445 7439 ASSERT(sfmmu_mlist_held(tpp));
7446 7440 ASSERT(sfmmu_mlist_held(dpp));
7447 7441
7448 7442 index = PP_MAPINDEX(tpp);
7449 7443 cons = TTE8K;
7450 7444
7451 7445 /* Update real mappings to the page */
7452 7446 retry:
7453 7447 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) {
7454 7448 if (IS_PAHME(sfhme))
7455 7449 continue;
7456 7450 sfmmu_copytte(&sfhme->hme_tte, &tte);
7457 7451 ttemod = tte;
7458 7452
7459 7453 /*
7460 7454 * replace old pfn with new pfn in TTE
7461 7455 */
7462 7456 PFN_TO_TTE(ttemod, dpp->p_pagenum);
7463 7457
7464 7458 /*
7465 7459 * clear suspend bit
7466 7460 */
7467 7461 ASSERT(TTE_IS_SUSPEND(&ttemod));
7468 7462 TTE_CLR_SUSPEND(&ttemod);
7469 7463
7470 7464 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0)
7471 7465 panic("hat_pagereload(): sfmmu_modifytte_try() failed");
7472 7466
7473 7467 /*
7474 7468 * set hme_page point to new page
7475 7469 */
7476 7470 sfhme->hme_page = dpp;
7477 7471 }
7478 7472
7479 7473 /*
7480 7474 * move p_mapping list from old page to new page
7481 7475 */
7482 7476 dpp->p_mapping = tpp->p_mapping;
7483 7477 tpp->p_mapping = NULL;
7484 7478 dpp->p_share = tpp->p_share;
7485 7479 tpp->p_share = 0;
7486 7480
7487 7481 while (index != 0) {
7488 7482 index = index >> 1;
7489 7483 if (index != 0)
7490 7484 cons++;
7491 7485 if (index & 0x1) {
7492 7486 tpp = PP_GROUPLEADER(tpp, cons);
7493 7487 dpp = PP_GROUPLEADER(dpp, cons);
7494 7488 goto retry;
7495 7489 }
7496 7490 }
7497 7491
7498 7492 curthread->t_flag &= ~T_DONTDTRACE;
7499 7493 mutex_exit(&kpr_suspendlock);
7500 7494 }
7501 7495
7502 7496 uint_t
7503 7497 hat_pagesync(struct page *pp, uint_t clearflag)
7504 7498 {
7505 7499 struct sf_hment *sfhme, *tmphme = NULL;
7506 7500 struct hme_blk *hmeblkp;
7507 7501 kmutex_t *pml;
7508 7502 cpuset_t cpuset, tset;
7509 7503 int index, cons;
7510 7504 extern ulong_t po_share;
7511 7505 page_t *save_pp = pp;
7512 7506 int stop_on_sh = 0;
7513 7507 uint_t shcnt;
7514 7508
7515 7509 CPUSET_ZERO(cpuset);
7516 7510
7517 7511 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) {
7518 7512 return (PP_GENERIC_ATTR(pp));
7519 7513 }
7520 7514
7521 7515 if ((clearflag & HAT_SYNC_ZERORM) == 0) {
7522 7516 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) {
7523 7517 return (PP_GENERIC_ATTR(pp));
7524 7518 }
7525 7519 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) {
7526 7520 return (PP_GENERIC_ATTR(pp));
7527 7521 }
7528 7522 if (clearflag & HAT_SYNC_STOPON_SHARED) {
7529 7523 if (pp->p_share > po_share) {
7530 7524 hat_page_setattr(pp, P_REF);
7531 7525 return (PP_GENERIC_ATTR(pp));
7532 7526 }
7533 7527 stop_on_sh = 1;
7534 7528 shcnt = 0;
7535 7529 }
7536 7530 }
7537 7531
7538 7532 clearflag &= ~HAT_SYNC_STOPON_SHARED;
7539 7533 pml = sfmmu_mlist_enter(pp);
7540 7534 index = PP_MAPINDEX(pp);
7541 7535 cons = TTE8K;
7542 7536 retry:
7543 7537 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7544 7538 /*
7545 7539 * We need to save the next hment on the list since
7546 7540 * it is possible for pagesync to remove an invalid hment
7547 7541 * from the list.
7548 7542 */
7549 7543 tmphme = sfhme->hme_next;
7550 7544 if (IS_PAHME(sfhme))
7551 7545 continue;
7552 7546 /*
7553 7547 * If we are looking for large mappings and this hme doesn't
7554 7548 * reach the range we are seeking, just ignore it.
7555 7549 */
7556 7550 hmeblkp = sfmmu_hmetohblk(sfhme);
7557 7551 if (hmeblkp->hblk_xhat_bit)
7558 7552 continue;
7559 7553
7560 7554 if (hme_size(sfhme) < cons)
7561 7555 continue;
7562 7556
7563 7557 if (stop_on_sh) {
7564 7558 if (hmeblkp->hblk_shared) {
7565 7559 sf_srd_t *srdp = hblktosrd(hmeblkp);
7566 7560 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7567 7561 sf_region_t *rgnp;
7568 7562 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7569 7563 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7570 7564 ASSERT(srdp != NULL);
7571 7565 rgnp = srdp->srd_hmergnp[rid];
7572 7566 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
7573 7567 rgnp, rid);
7574 7568 shcnt += rgnp->rgn_refcnt;
7575 7569 } else {
7576 7570 shcnt++;
7577 7571 }
7578 7572 if (shcnt > po_share) {
7579 7573 /*
7580 7574 * tell the pager to spare the page this time
7581 7575 * around.
7582 7576 */
7583 7577 hat_page_setattr(save_pp, P_REF);
7584 7578 index = 0;
7585 7579 break;
7586 7580 }
7587 7581 }
7588 7582 tset = sfmmu_pagesync(pp, sfhme,
7589 7583 clearflag & ~HAT_SYNC_STOPON_RM);
7590 7584 CPUSET_OR(cpuset, tset);
7591 7585
7592 7586 /*
7593 7587 * If clearflag is HAT_SYNC_DONTZERO, break out as soon
7594 7588 * as the "ref" or "mod" is set or share cnt exceeds po_share.
7595 7589 */
7596 7590 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO &&
7597 7591 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
7598 7592 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) {
7599 7593 index = 0;
7600 7594 break;
7601 7595 }
7602 7596 }
7603 7597
7604 7598 while (index) {
7605 7599 index = index >> 1;
7606 7600 cons++;
7607 7601 if (index & 0x1) {
7608 7602 /* Go to leading page */
7609 7603 pp = PP_GROUPLEADER(pp, cons);
7610 7604 goto retry;
7611 7605 }
7612 7606 }
7613 7607
7614 7608 xt_sync(cpuset);
7615 7609 sfmmu_mlist_exit(pml);
7616 7610 return (PP_GENERIC_ATTR(save_pp));
7617 7611 }
7618 7612
7619 7613 /*
7620 7614 * Get all the hardware dependent attributes for a page struct
7621 7615 */
7622 7616 static cpuset_t
7623 7617 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7624 7618 uint_t clearflag)
7625 7619 {
7626 7620 caddr_t addr;
7627 7621 tte_t tte, ttemod;
7628 7622 struct hme_blk *hmeblkp;
7629 7623 int ret;
7630 7624 sfmmu_t *sfmmup;
7631 7625 cpuset_t cpuset;
7632 7626
7633 7627 ASSERT(pp != NULL);
7634 7628 ASSERT(sfmmu_mlist_held(pp));
7635 7629 ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
7636 7630 (clearflag == HAT_SYNC_ZERORM));
7637 7631
7638 7632 SFMMU_STAT(sf_pagesync);
7639 7633
7640 7634 CPUSET_ZERO(cpuset);
7641 7635
7642 7636 sfmmu_pagesync_retry:
7643 7637
7644 7638 sfmmu_copytte(&sfhme->hme_tte, &tte);
7645 7639 if (TTE_IS_VALID(&tte)) {
7646 7640 hmeblkp = sfmmu_hmetohblk(sfhme);
7647 7641 sfmmup = hblktosfmmu(hmeblkp);
7648 7642 addr = tte_to_vaddr(hmeblkp, tte);
7649 7643 if (clearflag == HAT_SYNC_ZERORM) {
7650 7644 ttemod = tte;
7651 7645 TTE_CLR_RM(&ttemod);
7652 7646 ret = sfmmu_modifytte_try(&tte, &ttemod,
7653 7647 &sfhme->hme_tte);
7654 7648 if (ret < 0) {
7655 7649 /*
7656 7650 * cas failed and the new value is not what
7657 7651 * we want.
7658 7652 */
7659 7653 goto sfmmu_pagesync_retry;
7660 7654 }
7661 7655
7662 7656 if (ret > 0) {
7663 7657 /* we win the cas */
7664 7658 if (hmeblkp->hblk_shared) {
7665 7659 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7666 7660 uint_t rid =
7667 7661 hmeblkp->hblk_tag.htag_rid;
7668 7662 sf_region_t *rgnp;
7669 7663 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7670 7664 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7671 7665 ASSERT(srdp != NULL);
7672 7666 rgnp = srdp->srd_hmergnp[rid];
7673 7667 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7674 7668 srdp, rgnp, rid);
7675 7669 cpuset = sfmmu_rgntlb_demap(addr,
7676 7670 rgnp, hmeblkp, 1);
7677 7671 } else {
7678 7672 sfmmu_tlb_demap(addr, sfmmup, hmeblkp,
7679 7673 0, 0);
7680 7674 cpuset = sfmmup->sfmmu_cpusran;
7681 7675 }
7682 7676 }
7683 7677 }
7684 7678 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr,
7685 7679 &tte, pp);
7686 7680 }
7687 7681 return (cpuset);
7688 7682 }
7689 7683
7690 7684 /*
7691 7685 * Remove write permission from a mappings to a page, so that
7692 7686 * we can detect the next modification of it. This requires modifying
7693 7687 * the TTE then invalidating (demap) any TLB entry using that TTE.
7694 7688 * This code is similar to sfmmu_pagesync().
7695 7689 */
7696 7690 static cpuset_t
7697 7691 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme)
7698 7692 {
7699 7693 caddr_t addr;
7700 7694 tte_t tte;
7701 7695 tte_t ttemod;
7702 7696 struct hme_blk *hmeblkp;
7703 7697 int ret;
7704 7698 sfmmu_t *sfmmup;
7705 7699 cpuset_t cpuset;
7706 7700
7707 7701 ASSERT(pp != NULL);
7708 7702 ASSERT(sfmmu_mlist_held(pp));
7709 7703
7710 7704 CPUSET_ZERO(cpuset);
7711 7705 SFMMU_STAT(sf_clrwrt);
7712 7706
7713 7707 retry:
7714 7708
7715 7709 sfmmu_copytte(&sfhme->hme_tte, &tte);
7716 7710 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7717 7711 hmeblkp = sfmmu_hmetohblk(sfhme);
7718 7712
7719 7713 /*
7720 7714 * xhat mappings should never be to a VMODSORT page.
7721 7715 */
7722 7716 ASSERT(hmeblkp->hblk_xhat_bit == 0);
7723 7717
7724 7718 sfmmup = hblktosfmmu(hmeblkp);
7725 7719 addr = tte_to_vaddr(hmeblkp, tte);
7726 7720
7727 7721 ttemod = tte;
7728 7722 TTE_CLR_WRT(&ttemod);
7729 7723 TTE_CLR_MOD(&ttemod);
7730 7724 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7731 7725
7732 7726 /*
7733 7727 * if cas failed and the new value is not what
7734 7728 * we want retry
7735 7729 */
7736 7730 if (ret < 0)
7737 7731 goto retry;
7738 7732
7739 7733 /* we win the cas */
7740 7734 if (ret > 0) {
7741 7735 if (hmeblkp->hblk_shared) {
7742 7736 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
7743 7737 uint_t rid = hmeblkp->hblk_tag.htag_rid;
7744 7738 sf_region_t *rgnp;
7745 7739 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
7746 7740 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
7747 7741 ASSERT(srdp != NULL);
7748 7742 rgnp = srdp->srd_hmergnp[rid];
7749 7743 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
7750 7744 srdp, rgnp, rid);
7751 7745 cpuset = sfmmu_rgntlb_demap(addr,
7752 7746 rgnp, hmeblkp, 1);
7753 7747 } else {
7754 7748 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0);
7755 7749 cpuset = sfmmup->sfmmu_cpusran;
7756 7750 }
7757 7751 }
7758 7752 }
7759 7753
7760 7754 return (cpuset);
7761 7755 }
7762 7756
7763 7757 /*
7764 7758 * Walk all mappings of a page, removing write permission and clearing the
7765 7759 * ref/mod bits. This code is similar to hat_pagesync()
7766 7760 */
7767 7761 static void
7768 7762 hat_page_clrwrt(page_t *pp)
7769 7763 {
7770 7764 struct sf_hment *sfhme;
7771 7765 struct sf_hment *tmphme = NULL;
7772 7766 kmutex_t *pml;
7773 7767 cpuset_t cpuset;
7774 7768 cpuset_t tset;
7775 7769 int index;
7776 7770 int cons;
7777 7771
7778 7772 CPUSET_ZERO(cpuset);
7779 7773
7780 7774 pml = sfmmu_mlist_enter(pp);
7781 7775 index = PP_MAPINDEX(pp);
7782 7776 cons = TTE8K;
7783 7777 retry:
7784 7778 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7785 7779 tmphme = sfhme->hme_next;
7786 7780
7787 7781 /*
7788 7782 * If we are looking for large mappings and this hme doesn't
7789 7783 * reach the range we are seeking, just ignore its.
7790 7784 */
7791 7785
7792 7786 if (hme_size(sfhme) < cons)
7793 7787 continue;
7794 7788
7795 7789 tset = sfmmu_pageclrwrt(pp, sfhme);
7796 7790 CPUSET_OR(cpuset, tset);
7797 7791 }
7798 7792
7799 7793 while (index) {
7800 7794 index = index >> 1;
7801 7795 cons++;
7802 7796 if (index & 0x1) {
7803 7797 /* Go to leading page */
7804 7798 pp = PP_GROUPLEADER(pp, cons);
7805 7799 goto retry;
7806 7800 }
7807 7801 }
7808 7802
7809 7803 xt_sync(cpuset);
7810 7804 sfmmu_mlist_exit(pml);
7811 7805 }
7812 7806
7813 7807 /*
7814 7808 * Set the given REF/MOD/RO bits for the given page.
7815 7809 * For a vnode with a sorted v_pages list, we need to change
7816 7810 * the attributes and the v_pages list together under page_vnode_mutex.
7817 7811 */
7818 7812 void
7819 7813 hat_page_setattr(page_t *pp, uint_t flag)
7820 7814 {
7821 7815 vnode_t *vp = pp->p_vnode;
7822 7816 page_t **listp;
7823 7817 kmutex_t *pmtx;
7824 7818 kmutex_t *vphm = NULL;
7825 7819 int noshuffle;
7826 7820
7827 7821 noshuffle = flag & P_NSH;
7828 7822 flag &= ~P_NSH;
7829 7823
7830 7824 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7831 7825
7832 7826 /*
7833 7827 * nothing to do if attribute already set
7834 7828 */
7835 7829 if ((pp->p_nrm & flag) == flag)
7836 7830 return;
7837 7831
7838 7832 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
7839 7833 !noshuffle) {
7840 7834 vphm = page_vnode_mutex(vp);
7841 7835 mutex_enter(vphm);
7842 7836 }
7843 7837
7844 7838 pmtx = sfmmu_page_enter(pp);
7845 7839 pp->p_nrm |= flag;
7846 7840 sfmmu_page_exit(pmtx);
7847 7841
7848 7842 if (vphm != NULL) {
7849 7843 /*
7850 7844 * Some File Systems examine v_pages for NULL w/o
7851 7845 * grabbing the vphm mutex. Must not let it become NULL when
7852 7846 * pp is the only page on the list.
7853 7847 */
7854 7848 if (pp->p_vpnext != pp) {
7855 7849 page_vpsub(&vp->v_pages, pp);
7856 7850 if (vp->v_pages != NULL)
7857 7851 listp = &vp->v_pages->p_vpprev->p_vpnext;
7858 7852 else
7859 7853 listp = &vp->v_pages;
7860 7854 page_vpadd(listp, pp);
7861 7855 }
7862 7856 mutex_exit(vphm);
7863 7857 }
7864 7858 }
7865 7859
7866 7860 void
7867 7861 hat_page_clrattr(page_t *pp, uint_t flag)
7868 7862 {
7869 7863 vnode_t *vp = pp->p_vnode;
7870 7864 kmutex_t *pmtx;
7871 7865
7872 7866 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7873 7867
7874 7868 pmtx = sfmmu_page_enter(pp);
7875 7869
7876 7870 /*
7877 7871 * Caller is expected to hold page's io lock for VMODSORT to work
7878 7872 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
7879 7873 * bit is cleared.
7880 7874 * We don't have assert to avoid tripping some existing third party
7881 7875 * code. The dirty page is moved back to top of the v_page list
7882 7876 * after IO is done in pvn_write_done().
7883 7877 */
7884 7878 pp->p_nrm &= ~flag;
7885 7879 sfmmu_page_exit(pmtx);
7886 7880
7887 7881 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
7888 7882
7889 7883 /*
7890 7884 * VMODSORT works by removing write permissions and getting
7891 7885 * a fault when a page is made dirty. At this point
7892 7886 * we need to remove write permission from all mappings
7893 7887 * to this page.
7894 7888 */
7895 7889 hat_page_clrwrt(pp);
7896 7890 }
7897 7891 }
7898 7892
7899 7893 uint_t
7900 7894 hat_page_getattr(page_t *pp, uint_t flag)
7901 7895 {
7902 7896 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
7903 7897 return ((uint_t)(pp->p_nrm & flag));
7904 7898 }
7905 7899
7906 7900 /*
7907 7901 * DEBUG kernels: verify that a kernel va<->pa translation
7908 7902 * is safe by checking the underlying page_t is in a page
7909 7903 * relocation-safe state.
7910 7904 */
7911 7905 #ifdef DEBUG
7912 7906 void
7913 7907 sfmmu_check_kpfn(pfn_t pfn)
7914 7908 {
7915 7909 page_t *pp;
7916 7910 int index, cons;
7917 7911
7918 7912 if (hat_check_vtop == 0)
7919 7913 return;
7920 7914
7921 7915 if (kvseg.s_base == NULL || panicstr)
7922 7916 return;
7923 7917
7924 7918 pp = page_numtopp_nolock(pfn);
7925 7919 if (!pp)
7926 7920 return;
7927 7921
7928 7922 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7929 7923 return;
7930 7924
7931 7925 /*
7932 7926 * Handed a large kernel page, we dig up the root page since we
7933 7927 * know the root page might have the lock also.
7934 7928 */
7935 7929 if (pp->p_szc != 0) {
7936 7930 index = PP_MAPINDEX(pp);
7937 7931 cons = TTE8K;
7938 7932 again:
7939 7933 while (index != 0) {
7940 7934 index >>= 1;
7941 7935 if (index != 0)
7942 7936 cons++;
7943 7937 if (index & 0x1) {
7944 7938 pp = PP_GROUPLEADER(pp, cons);
7945 7939 goto again;
7946 7940 }
7947 7941 }
7948 7942 }
7949 7943
7950 7944 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp))
7951 7945 return;
7952 7946
7953 7947 /*
7954 7948 * Pages need to be locked or allocated "permanent" (either from
7955 7949 * static_arena arena or explicitly setting PG_NORELOC when calling
7956 7950 * page_create_va()) for VA->PA translations to be valid.
7957 7951 */
7958 7952 if (!PP_ISNORELOC(pp))
7959 7953 panic("Illegal VA->PA translation, pp 0x%p not permanent",
7960 7954 (void *)pp);
7961 7955 else
7962 7956 panic("Illegal VA->PA translation, pp 0x%p not locked",
7963 7957 (void *)pp);
7964 7958 }
7965 7959 #endif /* DEBUG */
7966 7960
7967 7961 /*
7968 7962 * Returns a page frame number for a given virtual address.
↓ open down ↓ |
1622 lines elided |
↑ open up ↑ |
7969 7963 * Returns PFN_INVALID to indicate an invalid mapping
7970 7964 */
7971 7965 pfn_t
7972 7966 hat_getpfnum(struct hat *hat, caddr_t addr)
7973 7967 {
7974 7968 pfn_t pfn;
7975 7969 tte_t tte;
7976 7970
7977 7971 /*
7978 7972 * We would like to
7979 - * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
7973 + * ASSERT(AS_LOCK_HELD(as));
7980 7974 * but we can't because the iommu driver will call this
7981 7975 * routine at interrupt time and it can't grab the as lock
7982 7976 * or it will deadlock: A thread could have the as lock
7983 7977 * and be waiting for io. The io can't complete
7984 7978 * because the interrupt thread is blocked trying to grab
7985 7979 * the as lock.
7986 7980 */
7987 7981
7988 7982 ASSERT(hat->sfmmu_xhat_provider == NULL);
7989 7983
7990 7984 if (hat == ksfmmup) {
7991 7985 if (IS_KMEM_VA_LARGEPAGE(addr)) {
7992 7986 ASSERT(segkmem_lpszc > 0);
7993 7987 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7994 7988 if (pfn != PFN_INVALID) {
7995 7989 sfmmu_check_kpfn(pfn);
7996 7990 return (pfn);
7997 7991 }
7998 7992 } else if (segkpm && IS_KPM_ADDR(addr)) {
7999 7993 return (sfmmu_kpm_vatopfn(addr));
8000 7994 }
8001 7995 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte))
8002 7996 == PFN_SUSPENDED) {
8003 7997 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte);
8004 7998 }
8005 7999 sfmmu_check_kpfn(pfn);
8006 8000 return (pfn);
8007 8001 } else {
8008 8002 return (sfmmu_uvatopfn(addr, hat, NULL));
8009 8003 }
8010 8004 }
8011 8005
8012 8006 /*
8013 8007 * This routine will return both pfn and tte for the vaddr.
8014 8008 */
8015 8009 static pfn_t
8016 8010 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep)
8017 8011 {
8018 8012 struct hmehash_bucket *hmebp;
8019 8013 hmeblk_tag hblktag;
8020 8014 int hmeshift, hashno = 1;
8021 8015 struct hme_blk *hmeblkp = NULL;
8022 8016 tte_t tte;
8023 8017
8024 8018 struct sf_hment *sfhmep;
8025 8019 pfn_t pfn;
8026 8020
8027 8021 /* support for ISM */
8028 8022 ism_map_t *ism_map;
8029 8023 ism_blk_t *ism_blkp;
8030 8024 int i;
8031 8025 sfmmu_t *ism_hatid = NULL;
8032 8026 sfmmu_t *locked_hatid = NULL;
8033 8027 sfmmu_t *sv_sfmmup = sfmmup;
8034 8028 caddr_t sv_vaddr = vaddr;
8035 8029 sf_srd_t *srdp;
8036 8030
8037 8031 if (ttep == NULL) {
8038 8032 ttep = &tte;
8039 8033 } else {
8040 8034 ttep->ll = 0;
8041 8035 }
8042 8036
8043 8037 ASSERT(sfmmup != ksfmmup);
8044 8038 SFMMU_STAT(sf_user_vtop);
8045 8039 /*
8046 8040 * Set ism_hatid if vaddr falls in a ISM segment.
8047 8041 */
8048 8042 ism_blkp = sfmmup->sfmmu_iblk;
8049 8043 if (ism_blkp != NULL) {
8050 8044 sfmmu_ismhat_enter(sfmmup, 0);
8051 8045 locked_hatid = sfmmup;
8052 8046 }
8053 8047 while (ism_blkp != NULL && ism_hatid == NULL) {
8054 8048 ism_map = ism_blkp->iblk_maps;
8055 8049 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
8056 8050 if (vaddr >= ism_start(ism_map[i]) &&
8057 8051 vaddr < ism_end(ism_map[i])) {
8058 8052 sfmmup = ism_hatid = ism_map[i].imap_ismhat;
8059 8053 vaddr = (caddr_t)(vaddr -
8060 8054 ism_start(ism_map[i]));
8061 8055 break;
8062 8056 }
8063 8057 }
8064 8058 ism_blkp = ism_blkp->iblk_next;
8065 8059 }
8066 8060 if (locked_hatid) {
8067 8061 sfmmu_ismhat_exit(locked_hatid, 0);
8068 8062 }
8069 8063
8070 8064 hblktag.htag_id = sfmmup;
8071 8065 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
8072 8066 do {
8073 8067 hmeshift = HME_HASH_SHIFT(hashno);
8074 8068 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift);
8075 8069 hblktag.htag_rehash = hashno;
8076 8070 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift);
8077 8071
8078 8072 SFMMU_HASH_LOCK(hmebp);
8079 8073
8080 8074 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp);
8081 8075 if (hmeblkp != NULL) {
8082 8076 ASSERT(!hmeblkp->hblk_shared);
8083 8077 HBLKTOHME(sfhmep, hmeblkp, vaddr);
8084 8078 sfmmu_copytte(&sfhmep->hme_tte, ttep);
8085 8079 SFMMU_HASH_UNLOCK(hmebp);
8086 8080 if (TTE_IS_VALID(ttep)) {
8087 8081 pfn = TTE_TO_PFN(vaddr, ttep);
8088 8082 return (pfn);
8089 8083 }
8090 8084 break;
8091 8085 }
8092 8086 SFMMU_HASH_UNLOCK(hmebp);
8093 8087 hashno++;
8094 8088 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt));
8095 8089
8096 8090 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) {
8097 8091 return (PFN_INVALID);
8098 8092 }
8099 8093 srdp = sv_sfmmup->sfmmu_srdp;
8100 8094 ASSERT(srdp != NULL);
8101 8095 ASSERT(srdp->srd_refcnt != 0);
8102 8096 hblktag.htag_id = srdp;
8103 8097 hashno = 1;
8104 8098 do {
8105 8099 hmeshift = HME_HASH_SHIFT(hashno);
8106 8100 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift);
8107 8101 hblktag.htag_rehash = hashno;
8108 8102 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift);
8109 8103
8110 8104 SFMMU_HASH_LOCK(hmebp);
8111 8105 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL;
8112 8106 hmeblkp = hmeblkp->hblk_next) {
8113 8107 uint_t rid;
8114 8108 sf_region_t *rgnp;
8115 8109 caddr_t rsaddr;
8116 8110 caddr_t readdr;
8117 8111
8118 8112 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag,
8119 8113 sv_sfmmup->sfmmu_hmeregion_map)) {
8120 8114 continue;
8121 8115 }
8122 8116 ASSERT(hmeblkp->hblk_shared);
8123 8117 rid = hmeblkp->hblk_tag.htag_rid;
8124 8118 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8125 8119 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8126 8120 rgnp = srdp->srd_hmergnp[rid];
8127 8121 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
8128 8122 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr);
8129 8123 sfmmu_copytte(&sfhmep->hme_tte, ttep);
8130 8124 rsaddr = rgnp->rgn_saddr;
8131 8125 readdr = rsaddr + rgnp->rgn_size;
8132 8126 #ifdef DEBUG
8133 8127 if (TTE_IS_VALID(ttep) ||
8134 8128 get_hblk_ttesz(hmeblkp) > TTE8K) {
8135 8129 caddr_t eva = tte_to_evaddr(hmeblkp, ttep);
8136 8130 ASSERT(eva > sv_vaddr);
8137 8131 ASSERT(sv_vaddr >= rsaddr);
8138 8132 ASSERT(sv_vaddr < readdr);
8139 8133 ASSERT(eva <= readdr);
8140 8134 }
8141 8135 #endif /* DEBUG */
8142 8136 /*
8143 8137 * Continue the search if we
8144 8138 * found an invalid 8K tte outside of the area
8145 8139 * covered by this hmeblk's region.
8146 8140 */
8147 8141 if (TTE_IS_VALID(ttep)) {
8148 8142 SFMMU_HASH_UNLOCK(hmebp);
8149 8143 pfn = TTE_TO_PFN(sv_vaddr, ttep);
8150 8144 return (pfn);
8151 8145 } else if (get_hblk_ttesz(hmeblkp) > TTE8K ||
8152 8146 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) {
8153 8147 SFMMU_HASH_UNLOCK(hmebp);
8154 8148 pfn = PFN_INVALID;
8155 8149 return (pfn);
8156 8150 }
8157 8151 }
8158 8152 SFMMU_HASH_UNLOCK(hmebp);
8159 8153 hashno++;
8160 8154 } while (hashno <= mmu_hashcnt);
8161 8155 return (PFN_INVALID);
8162 8156 }
8163 8157
8164 8158
8165 8159 /*
8166 8160 * For compatability with AT&T and later optimizations
8167 8161 */
8168 8162 /* ARGSUSED */
8169 8163 void
8170 8164 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8171 8165 {
8172 8166 ASSERT(hat != NULL);
8173 8167 ASSERT(hat->sfmmu_xhat_provider == NULL);
8174 8168 }
8175 8169
8176 8170 /*
8177 8171 * Return the number of mappings to a particular page. This number is an
8178 8172 * approximation of the number of people sharing the page.
8179 8173 *
8180 8174 * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8181 8175 * hat_page_checkshare() can be used to compare threshold to share
8182 8176 * count that reflects the number of region sharers albeit at higher cost.
8183 8177 */
8184 8178 ulong_t
8185 8179 hat_page_getshare(page_t *pp)
8186 8180 {
8187 8181 page_t *spp = pp; /* start page */
8188 8182 kmutex_t *pml;
8189 8183 ulong_t cnt;
8190 8184 int index, sz = TTE64K;
8191 8185
8192 8186 /*
8193 8187 * We need to grab the mlist lock to make sure any outstanding
8194 8188 * load/unloads complete. Otherwise we could return zero
8195 8189 * even though the unload(s) hasn't finished yet.
8196 8190 */
8197 8191 pml = sfmmu_mlist_enter(spp);
8198 8192 cnt = spp->p_share;
8199 8193
8200 8194 #ifdef VAC
8201 8195 if (kpm_enable)
8202 8196 cnt += spp->p_kpmref;
8203 8197 #endif
8204 8198 if (vpm_enable && pp->p_vpmref) {
8205 8199 cnt += 1;
8206 8200 }
8207 8201
8208 8202 /*
8209 8203 * If we have any large mappings, we count the number of
8210 8204 * mappings that this large page is part of.
8211 8205 */
8212 8206 index = PP_MAPINDEX(spp);
8213 8207 index >>= 1;
8214 8208 while (index) {
8215 8209 pp = PP_GROUPLEADER(spp, sz);
8216 8210 if ((index & 0x1) && pp != spp) {
8217 8211 cnt += pp->p_share;
8218 8212 spp = pp;
8219 8213 }
8220 8214 index >>= 1;
8221 8215 sz++;
8222 8216 }
8223 8217 sfmmu_mlist_exit(pml);
8224 8218 return (cnt);
8225 8219 }
8226 8220
8227 8221 /*
8228 8222 * Return 1 if the number of mappings exceeds sh_thresh. Return 0
8229 8223 * otherwise. Count shared hmeblks by region's refcnt.
8230 8224 */
8231 8225 int
8232 8226 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
8233 8227 {
8234 8228 kmutex_t *pml;
8235 8229 ulong_t cnt = 0;
8236 8230 int index, sz = TTE8K;
8237 8231 struct sf_hment *sfhme, *tmphme = NULL;
8238 8232 struct hme_blk *hmeblkp;
8239 8233
8240 8234 pml = sfmmu_mlist_enter(pp);
8241 8235
8242 8236 #ifdef VAC
8243 8237 if (kpm_enable)
8244 8238 cnt = pp->p_kpmref;
8245 8239 #endif
8246 8240
8247 8241 if (vpm_enable && pp->p_vpmref) {
8248 8242 cnt += 1;
8249 8243 }
8250 8244
8251 8245 if (pp->p_share + cnt > sh_thresh) {
8252 8246 sfmmu_mlist_exit(pml);
8253 8247 return (1);
8254 8248 }
8255 8249
8256 8250 index = PP_MAPINDEX(pp);
8257 8251
8258 8252 again:
8259 8253 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8260 8254 tmphme = sfhme->hme_next;
8261 8255 if (IS_PAHME(sfhme)) {
8262 8256 continue;
8263 8257 }
8264 8258
8265 8259 hmeblkp = sfmmu_hmetohblk(sfhme);
8266 8260 if (hmeblkp->hblk_xhat_bit) {
8267 8261 cnt++;
8268 8262 if (cnt > sh_thresh) {
8269 8263 sfmmu_mlist_exit(pml);
8270 8264 return (1);
8271 8265 }
8272 8266 continue;
8273 8267 }
8274 8268 if (hme_size(sfhme) != sz) {
8275 8269 continue;
8276 8270 }
8277 8271
8278 8272 if (hmeblkp->hblk_shared) {
8279 8273 sf_srd_t *srdp = hblktosrd(hmeblkp);
8280 8274 uint_t rid = hmeblkp->hblk_tag.htag_rid;
8281 8275 sf_region_t *rgnp;
8282 8276 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8283 8277 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
8284 8278 ASSERT(srdp != NULL);
8285 8279 rgnp = srdp->srd_hmergnp[rid];
8286 8280 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp,
8287 8281 rgnp, rid);
8288 8282 cnt += rgnp->rgn_refcnt;
8289 8283 } else {
8290 8284 cnt++;
8291 8285 }
8292 8286 if (cnt > sh_thresh) {
8293 8287 sfmmu_mlist_exit(pml);
8294 8288 return (1);
8295 8289 }
8296 8290 }
8297 8291
8298 8292 index >>= 1;
8299 8293 sz++;
8300 8294 while (index) {
8301 8295 pp = PP_GROUPLEADER(pp, sz);
8302 8296 ASSERT(sfmmu_mlist_held(pp));
8303 8297 if (index & 0x1) {
8304 8298 goto again;
8305 8299 }
8306 8300 index >>= 1;
8307 8301 sz++;
8308 8302 }
8309 8303 sfmmu_mlist_exit(pml);
8310 8304 return (0);
8311 8305 }
8312 8306
8313 8307 /*
8314 8308 * Unload all large mappings to the pp and reset the p_szc field of every
8315 8309 * constituent page according to the remaining mappings.
8316 8310 *
8317 8311 * pp must be locked SE_EXCL. Even though no other constituent pages are
8318 8312 * locked it's legal to unload the large mappings to the pp because all
8319 8313 * constituent pages of large locked mappings have to be locked SE_SHARED.
8320 8314 * This means if we have SE_EXCL lock on one of constituent pages none of the
8321 8315 * large mappings to pp are locked.
8322 8316 *
8323 8317 * Decrease p_szc field starting from the last constituent page and ending
8324 8318 * with the root page. This method is used because other threads rely on the
8325 8319 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc
8326 8320 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This
8327 8321 * ensures that p_szc changes of the constituent pages appears atomic for all
8328 8322 * threads that use sfmmu_mlspl_enter() to examine p_szc field.
8329 8323 *
8330 8324 * This mechanism is only used for file system pages where it's not always
8331 8325 * possible to get SE_EXCL locks on all constituent pages to demote the size
8332 8326 * code (as is done for anonymous or kernel large pages).
8333 8327 *
8334 8328 * See more comments in front of sfmmu_mlspl_enter().
8335 8329 */
8336 8330 void
8337 8331 hat_page_demote(page_t *pp)
8338 8332 {
8339 8333 int index;
8340 8334 int sz;
8341 8335 cpuset_t cpuset;
8342 8336 int sync = 0;
8343 8337 page_t *rootpp;
8344 8338 struct sf_hment *sfhme;
8345 8339 struct sf_hment *tmphme = NULL;
8346 8340 struct hme_blk *hmeblkp;
8347 8341 uint_t pszc;
8348 8342 page_t *lastpp;
8349 8343 cpuset_t tset;
8350 8344 pgcnt_t npgs;
8351 8345 kmutex_t *pml;
8352 8346 kmutex_t *pmtx = NULL;
8353 8347
8354 8348 ASSERT(PAGE_EXCL(pp));
8355 8349 ASSERT(!PP_ISFREE(pp));
8356 8350 ASSERT(!PP_ISKAS(pp));
8357 8351 ASSERT(page_szc_lock_assert(pp));
8358 8352 pml = sfmmu_mlist_enter(pp);
8359 8353
8360 8354 pszc = pp->p_szc;
8361 8355 if (pszc == 0) {
8362 8356 goto out;
8363 8357 }
8364 8358
8365 8359 index = PP_MAPINDEX(pp) >> 1;
8366 8360
8367 8361 if (index) {
8368 8362 CPUSET_ZERO(cpuset);
8369 8363 sz = TTE64K;
8370 8364 sync = 1;
8371 8365 }
8372 8366
8373 8367 while (index) {
8374 8368 if (!(index & 0x1)) {
8375 8369 index >>= 1;
8376 8370 sz++;
8377 8371 continue;
8378 8372 }
8379 8373 ASSERT(sz <= pszc);
8380 8374 rootpp = PP_GROUPLEADER(pp, sz);
8381 8375 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8382 8376 tmphme = sfhme->hme_next;
8383 8377 ASSERT(!IS_PAHME(sfhme));
8384 8378 hmeblkp = sfmmu_hmetohblk(sfhme);
8385 8379 if (hme_size(sfhme) != sz) {
8386 8380 continue;
8387 8381 }
8388 8382 if (hmeblkp->hblk_xhat_bit) {
8389 8383 cmn_err(CE_PANIC,
8390 8384 "hat_page_demote: xhat hmeblk");
8391 8385 }
8392 8386 tset = sfmmu_pageunload(rootpp, sfhme, sz);
8393 8387 CPUSET_OR(cpuset, tset);
8394 8388 }
8395 8389 if (index >>= 1) {
8396 8390 sz++;
8397 8391 }
8398 8392 }
8399 8393
8400 8394 ASSERT(!PP_ISMAPPED_LARGE(pp));
8401 8395
8402 8396 if (sync) {
8403 8397 xt_sync(cpuset);
8404 8398 #ifdef VAC
8405 8399 if (PP_ISTNC(pp)) {
8406 8400 conv_tnc(rootpp, sz);
8407 8401 }
8408 8402 #endif /* VAC */
8409 8403 }
8410 8404
8411 8405 pmtx = sfmmu_page_enter(pp);
8412 8406
8413 8407 ASSERT(pp->p_szc == pszc);
8414 8408 rootpp = PP_PAGEROOT(pp);
8415 8409 ASSERT(rootpp->p_szc == pszc);
8416 8410 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1);
8417 8411
8418 8412 while (lastpp != rootpp) {
8419 8413 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0;
8420 8414 ASSERT(sz < pszc);
8421 8415 npgs = (sz == 0) ? 1 : TTEPAGES(sz);
8422 8416 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1);
8423 8417 while (--npgs > 0) {
8424 8418 lastpp->p_szc = (uchar_t)sz;
8425 8419 lastpp = PP_PAGEPREV(lastpp);
8426 8420 }
8427 8421 if (sz) {
8428 8422 /*
8429 8423 * make sure before current root's pszc
8430 8424 * is updated all updates to constituent pages pszc
8431 8425 * fields are globally visible.
8432 8426 */
8433 8427 membar_producer();
8434 8428 }
8435 8429 lastpp->p_szc = sz;
8436 8430 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz)));
8437 8431 if (lastpp != rootpp) {
8438 8432 lastpp = PP_PAGEPREV(lastpp);
8439 8433 }
8440 8434 }
8441 8435 if (sz == 0) {
8442 8436 /* the loop above doesn't cover this case */
8443 8437 rootpp->p_szc = 0;
8444 8438 }
8445 8439 out:
8446 8440 ASSERT(pp->p_szc == 0);
8447 8441 if (pmtx != NULL) {
8448 8442 sfmmu_page_exit(pmtx);
8449 8443 }
8450 8444 sfmmu_mlist_exit(pml);
8451 8445 }
8452 8446
8453 8447 /*
8454 8448 * Refresh the HAT ismttecnt[] element for size szc.
8455 8449 * Caller must have set ISM busy flag to prevent mapping
8456 8450 * lists from changing while we're traversing them.
8457 8451 */
8458 8452 pgcnt_t
8459 8453 ism_tsb_entries(sfmmu_t *sfmmup, int szc)
8460 8454 {
8461 8455 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk;
8462 8456 ism_map_t *ism_map;
8463 8457 pgcnt_t npgs = 0;
8464 8458 pgcnt_t npgs_scd = 0;
8465 8459 int j;
8466 8460 sf_scd_t *scdp;
8467 8461 uchar_t rid;
8468 8462
8469 8463 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
8470 8464 scdp = sfmmup->sfmmu_scdp;
8471 8465
8472 8466 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) {
8473 8467 ism_map = ism_blkp->iblk_maps;
8474 8468 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) {
8475 8469 rid = ism_map[j].imap_rid;
8476 8470 ASSERT(rid == SFMMU_INVALID_ISMRID ||
8477 8471 rid < sfmmup->sfmmu_srdp->srd_next_ismrid);
8478 8472
8479 8473 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID &&
8480 8474 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
8481 8475 /* ISM is in sfmmup's SCD */
8482 8476 npgs_scd +=
8483 8477 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8484 8478 } else {
8485 8479 /* ISMs is not in SCD */
8486 8480 npgs +=
8487 8481 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc];
8488 8482 }
8489 8483 }
8490 8484 }
8491 8485 sfmmup->sfmmu_ismttecnt[szc] = npgs;
8492 8486 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd;
8493 8487 return (npgs);
8494 8488 }
8495 8489
8496 8490 /*
8497 8491 * Yield the memory claim requirement for an address space.
8498 8492 *
8499 8493 * This is currently implemented as the number of bytes that have active
8500 8494 * hardware translations that have page structures. Therefore, it can
8501 8495 * underestimate the traditional resident set size, eg, if the
8502 8496 * physical page is present and the hardware translation is missing;
8503 8497 * and it can overestimate the rss, eg, if there are active
8504 8498 * translations to a frame buffer with page structs.
8505 8499 * Also, it does not take sharing into account.
8506 8500 *
8507 8501 * Note that we don't acquire locks here since this function is most often
8508 8502 * called from the clock thread.
8509 8503 */
8510 8504 size_t
8511 8505 hat_get_mapped_size(struct hat *hat)
8512 8506 {
8513 8507 size_t assize = 0;
8514 8508 int i;
8515 8509
8516 8510 if (hat == NULL)
8517 8511 return (0);
8518 8512
8519 8513 ASSERT(hat->sfmmu_xhat_provider == NULL);
8520 8514
8521 8515 for (i = 0; i < mmu_page_sizes; i++)
8522 8516 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8523 8517 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8524 8518
8525 8519 if (hat->sfmmu_iblk == NULL)
8526 8520 return (assize);
8527 8521
8528 8522 for (i = 0; i < mmu_page_sizes; i++)
8529 8523 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8530 8524 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8531 8525
8532 8526 return (assize);
8533 8527 }
8534 8528
8535 8529 int
8536 8530 hat_stats_enable(struct hat *hat)
8537 8531 {
8538 8532 hatlock_t *hatlockp;
8539 8533
8540 8534 ASSERT(hat->sfmmu_xhat_provider == NULL);
8541 8535
8542 8536 hatlockp = sfmmu_hat_enter(hat);
8543 8537 hat->sfmmu_rmstat++;
8544 8538 sfmmu_hat_exit(hatlockp);
8545 8539 return (1);
8546 8540 }
8547 8541
8548 8542 void
8549 8543 hat_stats_disable(struct hat *hat)
8550 8544 {
8551 8545 hatlock_t *hatlockp;
8552 8546
8553 8547 ASSERT(hat->sfmmu_xhat_provider == NULL);
8554 8548
8555 8549 hatlockp = sfmmu_hat_enter(hat);
8556 8550 hat->sfmmu_rmstat--;
8557 8551 sfmmu_hat_exit(hatlockp);
8558 8552 }
8559 8553
8560 8554 /*
8561 8555 * Routines for entering or removing ourselves from the
8562 8556 * ism_hat's mapping list. This is used for both private and
8563 8557 * SCD hats.
8564 8558 */
8565 8559 static void
8566 8560 iment_add(struct ism_ment *iment, struct hat *ism_hat)
8567 8561 {
8568 8562 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8569 8563
8570 8564 iment->iment_prev = NULL;
8571 8565 iment->iment_next = ism_hat->sfmmu_iment;
8572 8566 if (ism_hat->sfmmu_iment) {
8573 8567 ism_hat->sfmmu_iment->iment_prev = iment;
8574 8568 }
8575 8569 ism_hat->sfmmu_iment = iment;
8576 8570 }
8577 8571
8578 8572 static void
8579 8573 iment_sub(struct ism_ment *iment, struct hat *ism_hat)
8580 8574 {
8581 8575 ASSERT(MUTEX_HELD(&ism_mlist_lock));
8582 8576
8583 8577 if (ism_hat->sfmmu_iment == NULL) {
8584 8578 panic("ism map entry remove - no entries");
8585 8579 }
8586 8580
8587 8581 if (iment->iment_prev) {
8588 8582 ASSERT(ism_hat->sfmmu_iment != iment);
8589 8583 iment->iment_prev->iment_next = iment->iment_next;
8590 8584 } else {
8591 8585 ASSERT(ism_hat->sfmmu_iment == iment);
8592 8586 ism_hat->sfmmu_iment = iment->iment_next;
8593 8587 }
8594 8588
8595 8589 if (iment->iment_next) {
8596 8590 iment->iment_next->iment_prev = iment->iment_prev;
8597 8591 }
8598 8592
8599 8593 /*
8600 8594 * zero out the entry
8601 8595 */
8602 8596 iment->iment_next = NULL;
8603 8597 iment->iment_prev = NULL;
8604 8598 iment->iment_hat = NULL;
8605 8599 iment->iment_base_va = 0;
8606 8600 }
8607 8601
8608 8602 /*
8609 8603 * Hat_share()/unshare() return an (non-zero) error
8610 8604 * when saddr and daddr are not properly aligned.
8611 8605 *
8612 8606 * The top level mapping element determines the alignment
8613 8607 * requirement for saddr and daddr, depending on different
8614 8608 * architectures.
8615 8609 *
8616 8610 * When hat_share()/unshare() are not supported,
8617 8611 * HATOP_SHARE()/UNSHARE() return 0
8618 8612 */
8619 8613 int
8620 8614 hat_share(struct hat *sfmmup, caddr_t addr,
8621 8615 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
8622 8616 {
8623 8617 ism_blk_t *ism_blkp;
8624 8618 ism_blk_t *new_iblk;
8625 8619 ism_map_t *ism_map;
8626 8620 ism_ment_t *ism_ment;
8627 8621 int i, added;
8628 8622 hatlock_t *hatlockp;
8629 8623 int reload_mmu = 0;
8630 8624 uint_t ismshift = page_get_shift(ismszc);
8631 8625 size_t ismpgsz = page_get_pagesize(ismszc);
8632 8626 uint_t ismmask = (uint_t)ismpgsz - 1;
8633 8627 size_t sh_size = ISM_SHIFT(ismshift, len);
8634 8628 ushort_t ismhatflag;
8635 8629 hat_region_cookie_t rcookie;
8636 8630 sf_scd_t *old_scdp;
8637 8631
8638 8632 #ifdef DEBUG
8639 8633 caddr_t eaddr = addr + len;
8640 8634 #endif /* DEBUG */
8641 8635
8642 8636 ASSERT(ism_hatid != NULL && sfmmup != NULL);
8643 8637 ASSERT(sptaddr == ISMID_STARTADDR);
8644 8638 /*
8645 8639 * Check the alignment.
8646 8640 */
8647 8641 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8648 8642 return (EINVAL);
8649 8643
8650 8644 /*
8651 8645 * Check size alignment.
8652 8646 */
8653 8647 if (!ISM_ALIGNED(ismshift, len))
8654 8648 return (EINVAL);
8655 8649
8656 8650 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
8657 8651
8658 8652 /*
8659 8653 * Allocate ism_ment for the ism_hat's mapping list, and an
8660 8654 * ism map blk in case we need one. We must do our
8661 8655 * allocations before acquiring locks to prevent a deadlock
8662 8656 * in the kmem allocator on the mapping list lock.
8663 8657 */
8664 8658 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8665 8659 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8666 8660
8667 8661 /*
8668 8662 * Serialize ISM mappings with the ISM busy flag, and also the
8669 8663 * trap handlers.
8670 8664 */
8671 8665 sfmmu_ismhat_enter(sfmmup, 0);
8672 8666
8673 8667 /*
8674 8668 * Allocate an ism map blk if necessary.
8675 8669 */
8676 8670 if (sfmmup->sfmmu_iblk == NULL) {
8677 8671 sfmmup->sfmmu_iblk = new_iblk;
8678 8672 bzero(new_iblk, sizeof (*new_iblk));
8679 8673 new_iblk->iblk_nextpa = (uint64_t)-1;
8680 8674 membar_stst(); /* make sure next ptr visible to all CPUs */
8681 8675 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk);
8682 8676 reload_mmu = 1;
8683 8677 new_iblk = NULL;
8684 8678 }
8685 8679
8686 8680 #ifdef DEBUG
8687 8681 /*
8688 8682 * Make sure mapping does not already exist.
8689 8683 */
8690 8684 ism_blkp = sfmmup->sfmmu_iblk;
8691 8685 while (ism_blkp != NULL) {
8692 8686 ism_map = ism_blkp->iblk_maps;
8693 8687 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
8694 8688 if ((addr >= ism_start(ism_map[i]) &&
8695 8689 addr < ism_end(ism_map[i])) ||
8696 8690 eaddr > ism_start(ism_map[i]) &&
8697 8691 eaddr <= ism_end(ism_map[i])) {
8698 8692 panic("sfmmu_share: Already mapped!");
8699 8693 }
8700 8694 }
8701 8695 ism_blkp = ism_blkp->iblk_next;
8702 8696 }
8703 8697 #endif /* DEBUG */
8704 8698
8705 8699 ASSERT(ismszc >= TTE4M);
8706 8700 if (ismszc == TTE4M) {
8707 8701 ismhatflag = HAT_4M_FLAG;
8708 8702 } else if (ismszc == TTE32M) {
8709 8703 ismhatflag = HAT_32M_FLAG;
8710 8704 } else if (ismszc == TTE256M) {
8711 8705 ismhatflag = HAT_256M_FLAG;
8712 8706 }
8713 8707 /*
8714 8708 * Add mapping to first available mapping slot.
8715 8709 */
8716 8710 ism_blkp = sfmmup->sfmmu_iblk;
8717 8711 added = 0;
8718 8712 while (!added) {
8719 8713 ism_map = ism_blkp->iblk_maps;
8720 8714 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8721 8715 if (ism_map[i].imap_ismhat == NULL) {
8722 8716
8723 8717 ism_map[i].imap_ismhat = ism_hatid;
8724 8718 ism_map[i].imap_vb_shift = (uchar_t)ismshift;
8725 8719 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8726 8720 ism_map[i].imap_hatflags = ismhatflag;
8727 8721 ism_map[i].imap_sz_mask = ismmask;
8728 8722 /*
8729 8723 * imap_seg is checked in ISM_CHECK to see if
8730 8724 * non-NULL, then other info assumed valid.
8731 8725 */
8732 8726 membar_stst();
8733 8727 ism_map[i].imap_seg = (uintptr_t)addr | sh_size;
8734 8728 ism_map[i].imap_ment = ism_ment;
8735 8729
8736 8730 /*
8737 8731 * Now add ourselves to the ism_hat's
8738 8732 * mapping list.
8739 8733 */
8740 8734 ism_ment->iment_hat = sfmmup;
8741 8735 ism_ment->iment_base_va = addr;
8742 8736 ism_hatid->sfmmu_ismhat = 1;
8743 8737 mutex_enter(&ism_mlist_lock);
8744 8738 iment_add(ism_ment, ism_hatid);
8745 8739 mutex_exit(&ism_mlist_lock);
8746 8740 added = 1;
8747 8741 break;
8748 8742 }
8749 8743 }
8750 8744 if (!added && ism_blkp->iblk_next == NULL) {
8751 8745 ism_blkp->iblk_next = new_iblk;
8752 8746 new_iblk = NULL;
8753 8747 bzero(ism_blkp->iblk_next,
8754 8748 sizeof (*ism_blkp->iblk_next));
8755 8749 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1;
8756 8750 membar_stst();
8757 8751 ism_blkp->iblk_nextpa =
8758 8752 va_to_pa((caddr_t)ism_blkp->iblk_next);
8759 8753 }
8760 8754 ism_blkp = ism_blkp->iblk_next;
8761 8755 }
8762 8756
8763 8757 /*
8764 8758 * After calling hat_join_region, sfmmup may join a new SCD or
8765 8759 * move from the old scd to a new scd, in which case, we want to
8766 8760 * shrink the sfmmup's private tsb size, i.e., pass shrink to
8767 8761 * sfmmu_check_page_sizes at the end of this routine.
8768 8762 */
8769 8763 old_scdp = sfmmup->sfmmu_scdp;
8770 8764
8771 8765 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0,
8772 8766 PROT_ALL, ismszc, NULL, HAT_REGION_ISM);
8773 8767 if (rcookie != HAT_INVALID_REGION_COOKIE) {
8774 8768 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie);
8775 8769 }
8776 8770 /*
8777 8771 * Update our counters for this sfmmup's ism mappings.
8778 8772 */
8779 8773 for (i = 0; i <= ismszc; i++) {
8780 8774 if (!(disable_ism_large_pages & (1 << i)))
8781 8775 (void) ism_tsb_entries(sfmmup, i);
8782 8776 }
8783 8777
8784 8778 /*
8785 8779 * For ISM and DISM we do not support 512K pages, so we only only
8786 8780 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the
8787 8781 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus.
8788 8782 *
8789 8783 * Need to set 32M/256M ISM flags to make sure
8790 8784 * sfmmu_check_page_sizes() enables them on Panther.
8791 8785 */
8792 8786 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0);
8793 8787
8794 8788 switch (ismszc) {
8795 8789 case TTE256M:
8796 8790 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) {
8797 8791 hatlockp = sfmmu_hat_enter(sfmmup);
8798 8792 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM);
8799 8793 sfmmu_hat_exit(hatlockp);
8800 8794 }
8801 8795 break;
8802 8796 case TTE32M:
8803 8797 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) {
8804 8798 hatlockp = sfmmu_hat_enter(sfmmup);
8805 8799 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM);
8806 8800 sfmmu_hat_exit(hatlockp);
8807 8801 }
8808 8802 break;
8809 8803 default:
8810 8804 break;
8811 8805 }
8812 8806
8813 8807 /*
8814 8808 * If we updated the ismblkpa for this HAT we must make
8815 8809 * sure all CPUs running this process reload their tsbmiss area.
8816 8810 * Otherwise they will fail to load the mappings in the tsbmiss
8817 8811 * handler and will loop calling pagefault().
8818 8812 */
8819 8813 if (reload_mmu) {
8820 8814 hatlockp = sfmmu_hat_enter(sfmmup);
8821 8815 sfmmu_sync_mmustate(sfmmup);
8822 8816 sfmmu_hat_exit(hatlockp);
8823 8817 }
8824 8818
8825 8819 sfmmu_ismhat_exit(sfmmup, 0);
8826 8820
8827 8821 /*
8828 8822 * Free up ismblk if we didn't use it.
8829 8823 */
8830 8824 if (new_iblk != NULL)
8831 8825 kmem_cache_free(ism_blk_cache, new_iblk);
8832 8826
8833 8827 /*
8834 8828 * Check TSB and TLB page sizes.
8835 8829 */
8836 8830 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) {
8837 8831 sfmmu_check_page_sizes(sfmmup, 0);
8838 8832 } else {
8839 8833 sfmmu_check_page_sizes(sfmmup, 1);
8840 8834 }
8841 8835 return (0);
8842 8836 }
8843 8837
8844 8838 /*
8845 8839 * hat_unshare removes exactly one ism_map from
8846 8840 * this process's as. It expects multiple calls
8847 8841 * to hat_unshare for multiple shm segments.
8848 8842 */
8849 8843 void
8850 8844 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8851 8845 {
8852 8846 ism_map_t *ism_map;
8853 8847 ism_ment_t *free_ment = NULL;
8854 8848 ism_blk_t *ism_blkp;
8855 8849 struct hat *ism_hatid;
8856 8850 int found, i;
8857 8851 hatlock_t *hatlockp;
8858 8852 struct tsb_info *tsbinfo;
8859 8853 uint_t ismshift = page_get_shift(ismszc);
8860 8854 size_t sh_size = ISM_SHIFT(ismshift, len);
8861 8855 uchar_t ism_rid;
8862 8856 sf_scd_t *old_scdp;
8863 8857
8864 8858 ASSERT(ISM_ALIGNED(ismshift, addr));
8865 8859 ASSERT(ISM_ALIGNED(ismshift, len));
8866 8860 ASSERT(sfmmup != NULL);
8867 8861 ASSERT(sfmmup != ksfmmup);
8868 8862
8869 8863 if (sfmmup->sfmmu_xhat_provider) {
8870 8864 XHAT_UNSHARE(sfmmup, addr, len);
8871 8865 return;
8872 8866 } else {
8873 8867 /*
8874 8868 * This must be a CPU HAT. If the address space has
8875 8869 * XHATs attached, inform all XHATs that ISM segment
8876 8870 * is going away
8877 8871 */
8878 8872 ASSERT(sfmmup->sfmmu_as != NULL);
8879 8873 if (sfmmup->sfmmu_as->a_xhat != NULL)
8880 8874 xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
8881 8875 }
8882 8876
8883 8877 /*
8884 8878 * Make sure that during the entire time ISM mappings are removed,
8885 8879 * the trap handlers serialize behind us, and that no one else
8886 8880 * can be mucking with ISM mappings. This also lets us get away
8887 8881 * with not doing expensive cross calls to flush the TLB -- we
8888 8882 * just discard the context, flush the entire TSB, and call it
8889 8883 * a day.
8890 8884 */
8891 8885 sfmmu_ismhat_enter(sfmmup, 0);
8892 8886
8893 8887 /*
8894 8888 * Remove the mapping.
8895 8889 *
8896 8890 * We can't have any holes in the ism map.
8897 8891 * The tsb miss code while searching the ism map will
8898 8892 * stop on an empty map slot. So we must move
8899 8893 * everyone past the hole up 1 if any.
8900 8894 *
8901 8895 * Also empty ism map blks are not freed until the
8902 8896 * process exits. This is to prevent a MT race condition
8903 8897 * between sfmmu_unshare() and sfmmu_tsbmiss_exception().
8904 8898 */
8905 8899 found = 0;
8906 8900 ism_blkp = sfmmup->sfmmu_iblk;
8907 8901 while (!found && ism_blkp != NULL) {
8908 8902 ism_map = ism_blkp->iblk_maps;
8909 8903 for (i = 0; i < ISM_MAP_SLOTS; i++) {
8910 8904 if (addr == ism_start(ism_map[i]) &&
8911 8905 sh_size == (size_t)(ism_size(ism_map[i]))) {
8912 8906 found = 1;
8913 8907 break;
8914 8908 }
8915 8909 }
8916 8910 if (!found)
8917 8911 ism_blkp = ism_blkp->iblk_next;
8918 8912 }
8919 8913
8920 8914 if (found) {
8921 8915 ism_hatid = ism_map[i].imap_ismhat;
8922 8916 ism_rid = ism_map[i].imap_rid;
8923 8917 ASSERT(ism_hatid != NULL);
8924 8918 ASSERT(ism_hatid->sfmmu_ismhat == 1);
8925 8919
8926 8920 /*
8927 8921 * After hat_leave_region, the sfmmup may leave SCD,
8928 8922 * in which case, we want to grow the private tsb size when
8929 8923 * calling sfmmu_check_page_sizes at the end of the routine.
8930 8924 */
8931 8925 old_scdp = sfmmup->sfmmu_scdp;
8932 8926 /*
8933 8927 * Then remove ourselves from the region.
8934 8928 */
8935 8929 if (ism_rid != SFMMU_INVALID_ISMRID) {
8936 8930 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid),
8937 8931 HAT_REGION_ISM);
8938 8932 }
8939 8933
8940 8934 /*
8941 8935 * And now guarantee that any other cpu
8942 8936 * that tries to process an ISM miss
8943 8937 * will go to tl=0.
8944 8938 */
8945 8939 hatlockp = sfmmu_hat_enter(sfmmup);
8946 8940 sfmmu_invalidate_ctx(sfmmup);
8947 8941 sfmmu_hat_exit(hatlockp);
8948 8942
8949 8943 /*
8950 8944 * Remove ourselves from the ism mapping list.
8951 8945 */
8952 8946 mutex_enter(&ism_mlist_lock);
8953 8947 iment_sub(ism_map[i].imap_ment, ism_hatid);
8954 8948 mutex_exit(&ism_mlist_lock);
8955 8949 free_ment = ism_map[i].imap_ment;
8956 8950
8957 8951 /*
8958 8952 * We delete the ism map by copying
8959 8953 * the next map over the current one.
8960 8954 * We will take the next one in the maps
8961 8955 * array or from the next ism_blk.
8962 8956 */
8963 8957 while (ism_blkp != NULL) {
8964 8958 ism_map = ism_blkp->iblk_maps;
8965 8959 while (i < (ISM_MAP_SLOTS - 1)) {
8966 8960 ism_map[i] = ism_map[i + 1];
8967 8961 i++;
8968 8962 }
8969 8963 /* i == (ISM_MAP_SLOTS - 1) */
8970 8964 ism_blkp = ism_blkp->iblk_next;
8971 8965 if (ism_blkp != NULL) {
8972 8966 ism_map[i] = ism_blkp->iblk_maps[0];
8973 8967 i = 0;
8974 8968 } else {
8975 8969 ism_map[i].imap_seg = 0;
8976 8970 ism_map[i].imap_vb_shift = 0;
8977 8971 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID;
8978 8972 ism_map[i].imap_hatflags = 0;
8979 8973 ism_map[i].imap_sz_mask = 0;
8980 8974 ism_map[i].imap_ismhat = NULL;
8981 8975 ism_map[i].imap_ment = NULL;
8982 8976 }
8983 8977 }
8984 8978
8985 8979 /*
8986 8980 * Now flush entire TSB for the process, since
8987 8981 * demapping page by page can be too expensive.
8988 8982 * We don't have to flush the TLB here anymore
8989 8983 * since we switch to a new TLB ctx instead.
8990 8984 * Also, there is no need to flush if the process
8991 8985 * is exiting since the TSB will be freed later.
8992 8986 */
8993 8987 if (!sfmmup->sfmmu_free) {
8994 8988 hatlockp = sfmmu_hat_enter(sfmmup);
8995 8989 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL;
8996 8990 tsbinfo = tsbinfo->tsb_next) {
8997 8991 if (tsbinfo->tsb_flags & TSB_SWAPPED)
8998 8992 continue;
8999 8993 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) {
9000 8994 tsbinfo->tsb_flags |=
9001 8995 TSB_FLUSH_NEEDED;
9002 8996 continue;
9003 8997 }
9004 8998
9005 8999 sfmmu_inv_tsb(tsbinfo->tsb_va,
9006 9000 TSB_BYTES(tsbinfo->tsb_szc));
9007 9001 }
9008 9002 sfmmu_hat_exit(hatlockp);
9009 9003 }
9010 9004 }
9011 9005
9012 9006 /*
9013 9007 * Update our counters for this sfmmup's ism mappings.
9014 9008 */
9015 9009 for (i = 0; i <= ismszc; i++) {
9016 9010 if (!(disable_ism_large_pages & (1 << i)))
9017 9011 (void) ism_tsb_entries(sfmmup, i);
9018 9012 }
9019 9013
9020 9014 sfmmu_ismhat_exit(sfmmup, 0);
9021 9015
9022 9016 /*
9023 9017 * We must do our freeing here after dropping locks
9024 9018 * to prevent a deadlock in the kmem allocator on the
9025 9019 * mapping list lock.
9026 9020 */
9027 9021 if (free_ment != NULL)
9028 9022 kmem_cache_free(ism_ment_cache, free_ment);
9029 9023
9030 9024 /*
9031 9025 * Check TSB and TLB page sizes if the process isn't exiting.
9032 9026 */
9033 9027 if (!sfmmup->sfmmu_free) {
9034 9028 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
9035 9029 sfmmu_check_page_sizes(sfmmup, 1);
9036 9030 } else {
9037 9031 sfmmu_check_page_sizes(sfmmup, 0);
9038 9032 }
9039 9033 }
9040 9034 }
9041 9035
9042 9036 /* ARGSUSED */
9043 9037 static int
9044 9038 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags)
9045 9039 {
9046 9040 /* void *buf is sfmmu_t pointer */
9047 9041 bzero(buf, sizeof (sfmmu_t));
9048 9042
9049 9043 return (0);
9050 9044 }
9051 9045
9052 9046 /* ARGSUSED */
9053 9047 static void
9054 9048 sfmmu_idcache_destructor(void *buf, void *cdrarg)
9055 9049 {
9056 9050 /* void *buf is sfmmu_t pointer */
9057 9051 }
9058 9052
9059 9053 /*
9060 9054 * setup kmem hmeblks by bzeroing all members and initializing the nextpa
9061 9055 * field to be the pa of this hmeblk
9062 9056 */
9063 9057 /* ARGSUSED */
9064 9058 static int
9065 9059 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags)
9066 9060 {
9067 9061 struct hme_blk *hmeblkp;
9068 9062
9069 9063 bzero(buf, (size_t)cdrarg);
9070 9064 hmeblkp = (struct hme_blk *)buf;
9071 9065 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
9072 9066
9073 9067 #ifdef HBLK_TRACE
9074 9068 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL);
9075 9069 #endif /* HBLK_TRACE */
9076 9070
9077 9071 return (0);
9078 9072 }
9079 9073
9080 9074 /* ARGSUSED */
9081 9075 static void
9082 9076 sfmmu_hblkcache_destructor(void *buf, void *cdrarg)
9083 9077 {
9084 9078
9085 9079 #ifdef HBLK_TRACE
9086 9080
9087 9081 struct hme_blk *hmeblkp;
9088 9082
9089 9083 hmeblkp = (struct hme_blk *)buf;
9090 9084 mutex_destroy(&hmeblkp->hblk_audit_lock);
9091 9085
9092 9086 #endif /* HBLK_TRACE */
9093 9087 }
9094 9088
9095 9089 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8
9096 9090 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO;
9097 9091 /*
9098 9092 * The kmem allocator will callback into our reclaim routine when the system
9099 9093 * is running low in memory. We traverse the hash and free up all unused but
9100 9094 * still cached hme_blks. We also traverse the free list and free them up
9101 9095 * as well.
9102 9096 */
9103 9097 /*ARGSUSED*/
9104 9098 static void
9105 9099 sfmmu_hblkcache_reclaim(void *cdrarg)
9106 9100 {
9107 9101 int i;
9108 9102 struct hmehash_bucket *hmebp;
9109 9103 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL;
9110 9104 static struct hmehash_bucket *uhmehash_reclaim_hand;
9111 9105 static struct hmehash_bucket *khmehash_reclaim_hand;
9112 9106 struct hme_blk *list = NULL, *last_hmeblkp;
9113 9107 cpuset_t cpuset = cpu_ready_set;
9114 9108 cpu_hme_pend_t *cpuhp;
9115 9109
9116 9110 /* Free up hmeblks on the cpu pending lists */
9117 9111 for (i = 0; i < NCPU; i++) {
9118 9112 cpuhp = &cpu_hme_pend[i];
9119 9113 if (cpuhp->chp_listp != NULL) {
9120 9114 mutex_enter(&cpuhp->chp_mutex);
9121 9115 if (cpuhp->chp_listp == NULL) {
9122 9116 mutex_exit(&cpuhp->chp_mutex);
9123 9117 continue;
9124 9118 }
9125 9119 for (last_hmeblkp = cpuhp->chp_listp;
9126 9120 last_hmeblkp->hblk_next != NULL;
9127 9121 last_hmeblkp = last_hmeblkp->hblk_next)
9128 9122 ;
9129 9123 last_hmeblkp->hblk_next = list;
9130 9124 list = cpuhp->chp_listp;
9131 9125 cpuhp->chp_listp = NULL;
9132 9126 cpuhp->chp_count = 0;
9133 9127 mutex_exit(&cpuhp->chp_mutex);
9134 9128 }
9135 9129
9136 9130 }
9137 9131
9138 9132 if (list != NULL) {
9139 9133 kpreempt_disable();
9140 9134 CPUSET_DEL(cpuset, CPU->cpu_id);
9141 9135 xt_sync(cpuset);
9142 9136 xt_sync(cpuset);
9143 9137 kpreempt_enable();
9144 9138 sfmmu_hblk_free(&list);
9145 9139 list = NULL;
9146 9140 }
9147 9141
9148 9142 hmebp = uhmehash_reclaim_hand;
9149 9143 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ])
9150 9144 uhmehash_reclaim_hand = hmebp = uhme_hash;
9151 9145 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9152 9146
9153 9147 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9154 9148 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9155 9149 hmeblkp = hmebp->hmeblkp;
9156 9150 pr_hblk = NULL;
9157 9151 while (hmeblkp) {
9158 9152 nx_hblk = hmeblkp->hblk_next;
9159 9153 if (!hmeblkp->hblk_vcnt &&
9160 9154 !hmeblkp->hblk_hmecnt) {
9161 9155 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9162 9156 pr_hblk, &list, 0);
9163 9157 } else {
9164 9158 pr_hblk = hmeblkp;
9165 9159 }
9166 9160 hmeblkp = nx_hblk;
9167 9161 }
9168 9162 SFMMU_HASH_UNLOCK(hmebp);
9169 9163 }
9170 9164 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
9171 9165 hmebp = uhme_hash;
9172 9166 }
9173 9167
9174 9168 hmebp = khmehash_reclaim_hand;
9175 9169 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ])
9176 9170 khmehash_reclaim_hand = hmebp = khme_hash;
9177 9171 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio;
9178 9172
9179 9173 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) {
9180 9174 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) {
9181 9175 hmeblkp = hmebp->hmeblkp;
9182 9176 pr_hblk = NULL;
9183 9177 while (hmeblkp) {
9184 9178 nx_hblk = hmeblkp->hblk_next;
9185 9179 if (!hmeblkp->hblk_vcnt &&
9186 9180 !hmeblkp->hblk_hmecnt) {
9187 9181 sfmmu_hblk_hash_rm(hmebp, hmeblkp,
9188 9182 pr_hblk, &list, 0);
9189 9183 } else {
9190 9184 pr_hblk = hmeblkp;
9191 9185 }
9192 9186 hmeblkp = nx_hblk;
9193 9187 }
9194 9188 SFMMU_HASH_UNLOCK(hmebp);
9195 9189 }
9196 9190 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
9197 9191 hmebp = khme_hash;
9198 9192 }
9199 9193 sfmmu_hblks_list_purge(&list, 0);
9200 9194 }
9201 9195
9202 9196 /*
9203 9197 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface.
9204 9198 * same goes for sfmmu_get_addrvcolor().
9205 9199 *
9206 9200 * This function will return the virtual color for the specified page. The
9207 9201 * virtual color corresponds to this page current mapping or its last mapping.
9208 9202 * It is used by memory allocators to choose addresses with the correct
9209 9203 * alignment so vac consistency is automatically maintained. If the page
9210 9204 * has no color it returns -1.
9211 9205 */
9212 9206 /*ARGSUSED*/
9213 9207 int
9214 9208 sfmmu_get_ppvcolor(struct page *pp)
9215 9209 {
9216 9210 #ifdef VAC
9217 9211 int color;
9218 9212
9219 9213 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) {
9220 9214 return (-1);
9221 9215 }
9222 9216 color = PP_GET_VCOLOR(pp);
9223 9217 ASSERT(color < mmu_btop(shm_alignment));
9224 9218 return (color);
9225 9219 #else
9226 9220 return (-1);
9227 9221 #endif /* VAC */
9228 9222 }
9229 9223
9230 9224 /*
9231 9225 * This function will return the desired alignment for vac consistency
9232 9226 * (vac color) given a virtual address. If no vac is present it returns -1.
9233 9227 */
9234 9228 /*ARGSUSED*/
9235 9229 int
9236 9230 sfmmu_get_addrvcolor(caddr_t vaddr)
9237 9231 {
9238 9232 #ifdef VAC
9239 9233 if (cache & CACHE_VAC) {
9240 9234 return (addr_to_vcolor(vaddr));
9241 9235 } else {
9242 9236 return (-1);
9243 9237 }
9244 9238 #else
9245 9239 return (-1);
9246 9240 #endif /* VAC */
9247 9241 }
9248 9242
9249 9243 #ifdef VAC
9250 9244 /*
9251 9245 * Check for conflicts.
9252 9246 * A conflict exists if the new and existent mappings do not match in
9253 9247 * their "shm_alignment fields. If conflicts exist, the existant mappings
9254 9248 * are flushed unless one of them is locked. If one of them is locked, then
9255 9249 * the mappings are flushed and converted to non-cacheable mappings.
9256 9250 */
9257 9251 static void
9258 9252 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp)
9259 9253 {
9260 9254 struct hat *tmphat;
9261 9255 struct sf_hment *sfhmep, *tmphme = NULL;
9262 9256 struct hme_blk *hmeblkp;
9263 9257 int vcolor;
9264 9258 tte_t tte;
9265 9259
9266 9260 ASSERT(sfmmu_mlist_held(pp));
9267 9261 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */
9268 9262
9269 9263 vcolor = addr_to_vcolor(addr);
9270 9264 if (PP_NEWPAGE(pp)) {
9271 9265 PP_SET_VCOLOR(pp, vcolor);
9272 9266 return;
9273 9267 }
9274 9268
9275 9269 if (PP_GET_VCOLOR(pp) == vcolor) {
9276 9270 return;
9277 9271 }
9278 9272
9279 9273 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) {
9280 9274 /*
9281 9275 * Previous user of page had a different color
9282 9276 * but since there are no current users
9283 9277 * we just flush the cache and change the color.
9284 9278 */
9285 9279 SFMMU_STAT(sf_pgcolor_conflict);
9286 9280 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9287 9281 PP_SET_VCOLOR(pp, vcolor);
9288 9282 return;
9289 9283 }
9290 9284
9291 9285 /*
9292 9286 * If we get here we have a vac conflict with a current
9293 9287 * mapping. VAC conflict policy is as follows.
9294 9288 * - The default is to unload the other mappings unless:
9295 9289 * - If we have a large mapping we uncache the page.
9296 9290 * We need to uncache the rest of the large page too.
9297 9291 * - If any of the mappings are locked we uncache the page.
9298 9292 * - If the requested mapping is inconsistent
9299 9293 * with another mapping and that mapping
9300 9294 * is in the same address space we have to
9301 9295 * make it non-cached. The default thing
9302 9296 * to do is unload the inconsistent mapping
9303 9297 * but if they are in the same address space
9304 9298 * we run the risk of unmapping the pc or the
9305 9299 * stack which we will use as we return to the user,
9306 9300 * in which case we can then fault on the thing
9307 9301 * we just unloaded and get into an infinite loop.
9308 9302 */
9309 9303 if (PP_ISMAPPED_LARGE(pp)) {
9310 9304 int sz;
9311 9305
9312 9306 /*
9313 9307 * Existing mapping is for big pages. We don't unload
9314 9308 * existing big mappings to satisfy new mappings.
9315 9309 * Always convert all mappings to TNC.
9316 9310 */
9317 9311 sz = fnd_mapping_sz(pp);
9318 9312 pp = PP_GROUPLEADER(pp, sz);
9319 9313 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz));
9320 9314 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH,
9321 9315 TTEPAGES(sz));
9322 9316
9323 9317 return;
9324 9318 }
9325 9319
9326 9320 /*
9327 9321 * check if any mapping is in same as or if it is locked
9328 9322 * since in that case we need to uncache.
9329 9323 */
9330 9324 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9331 9325 tmphme = sfhmep->hme_next;
9332 9326 if (IS_PAHME(sfhmep))
9333 9327 continue;
9334 9328 hmeblkp = sfmmu_hmetohblk(sfhmep);
9335 9329 if (hmeblkp->hblk_xhat_bit)
9336 9330 continue;
9337 9331 tmphat = hblktosfmmu(hmeblkp);
9338 9332 sfmmu_copytte(&sfhmep->hme_tte, &tte);
9339 9333 ASSERT(TTE_IS_VALID(&tte));
9340 9334 if (hmeblkp->hblk_shared || tmphat == hat ||
9341 9335 hmeblkp->hblk_lckcnt) {
9342 9336 /*
9343 9337 * We have an uncache conflict
9344 9338 */
9345 9339 SFMMU_STAT(sf_uncache_conflict);
9346 9340 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
9347 9341 return;
9348 9342 }
9349 9343 }
9350 9344
9351 9345 /*
9352 9346 * We have an unload conflict
9353 9347 * We have already checked for LARGE mappings, therefore
9354 9348 * the remaining mapping(s) must be TTE8K.
9355 9349 */
9356 9350 SFMMU_STAT(sf_unload_conflict);
9357 9351
9358 9352 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9359 9353 tmphme = sfhmep->hme_next;
9360 9354 if (IS_PAHME(sfhmep))
9361 9355 continue;
9362 9356 hmeblkp = sfmmu_hmetohblk(sfhmep);
9363 9357 if (hmeblkp->hblk_xhat_bit)
9364 9358 continue;
9365 9359 ASSERT(!hmeblkp->hblk_shared);
9366 9360 (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9367 9361 }
9368 9362
9369 9363 if (PP_ISMAPPED_KPM(pp))
9370 9364 sfmmu_kpm_vac_unload(pp, addr);
9371 9365
9372 9366 /*
9373 9367 * Unloads only do TLB flushes so we need to flush the
9374 9368 * cache here.
9375 9369 */
9376 9370 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
9377 9371 PP_SET_VCOLOR(pp, vcolor);
9378 9372 }
9379 9373
9380 9374 /*
9381 9375 * Whenever a mapping is unloaded and the page is in TNC state,
9382 9376 * we see if the page can be made cacheable again. 'pp' is
9383 9377 * the page that we just unloaded a mapping from, the size
9384 9378 * of mapping that was unloaded is 'ottesz'.
9385 9379 * Remark:
9386 9380 * The recache policy for mpss pages can leave a performance problem
9387 9381 * under the following circumstances:
9388 9382 * . A large page in uncached mode has just been unmapped.
9389 9383 * . All constituent pages are TNC due to a conflicting small mapping.
9390 9384 * . There are many other, non conflicting, small mappings around for
9391 9385 * a lot of the constituent pages.
9392 9386 * . We're called w/ the "old" groupleader page and the old ottesz,
9393 9387 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so
9394 9388 * we end up w/ TTE8K or npages == 1.
9395 9389 * . We call tst_tnc w/ the old groupleader only, and if there is no
9396 9390 * conflict, we re-cache only this page.
9397 9391 * . All other small mappings are not checked and will be left in TNC mode.
9398 9392 * The problem is not very serious because:
9399 9393 * . mpss is actually only defined for heap and stack, so the probability
9400 9394 * is not very high that a large page mapping exists in parallel to a small
9401 9395 * one (this is possible, but seems to be bad programming style in the
9402 9396 * appl).
9403 9397 * . The problem gets a little bit more serious, when those TNC pages
9404 9398 * have to be mapped into kernel space, e.g. for networking.
9405 9399 * . When VAC alias conflicts occur in applications, this is regarded
9406 9400 * as an application bug. So if kstat's show them, the appl should
9407 9401 * be changed anyway.
9408 9402 */
9409 9403 void
9410 9404 conv_tnc(page_t *pp, int ottesz)
9411 9405 {
9412 9406 int cursz, dosz;
9413 9407 pgcnt_t curnpgs, dopgs;
9414 9408 pgcnt_t pg64k;
9415 9409 page_t *pp2;
9416 9410
9417 9411 /*
9418 9412 * Determine how big a range we check for TNC and find
9419 9413 * leader page. cursz is the size of the biggest
9420 9414 * mapping that still exist on 'pp'.
9421 9415 */
9422 9416 if (PP_ISMAPPED_LARGE(pp)) {
9423 9417 cursz = fnd_mapping_sz(pp);
9424 9418 } else {
9425 9419 cursz = TTE8K;
9426 9420 }
9427 9421
9428 9422 if (ottesz >= cursz) {
9429 9423 dosz = ottesz;
9430 9424 pp2 = pp;
9431 9425 } else {
9432 9426 dosz = cursz;
9433 9427 pp2 = PP_GROUPLEADER(pp, dosz);
9434 9428 }
9435 9429
9436 9430 pg64k = TTEPAGES(TTE64K);
9437 9431 dopgs = TTEPAGES(dosz);
9438 9432
9439 9433 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0));
9440 9434
9441 9435 while (dopgs != 0) {
9442 9436 curnpgs = TTEPAGES(cursz);
9443 9437 if (tst_tnc(pp2, curnpgs)) {
9444 9438 SFMMU_STAT_ADD(sf_recache, curnpgs);
9445 9439 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH,
9446 9440 curnpgs);
9447 9441 }
9448 9442
9449 9443 ASSERT(dopgs >= curnpgs);
9450 9444 dopgs -= curnpgs;
9451 9445
9452 9446 if (dopgs == 0) {
9453 9447 break;
9454 9448 }
9455 9449
9456 9450 pp2 = PP_PAGENEXT_N(pp2, curnpgs);
9457 9451 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) {
9458 9452 cursz = fnd_mapping_sz(pp2);
9459 9453 } else {
9460 9454 cursz = TTE8K;
9461 9455 }
9462 9456 }
9463 9457 }
9464 9458
9465 9459 /*
9466 9460 * Returns 1 if page(s) can be converted from TNC to cacheable setting,
9467 9461 * returns 0 otherwise. Note that oaddr argument is valid for only
9468 9462 * 8k pages.
9469 9463 */
9470 9464 int
9471 9465 tst_tnc(page_t *pp, pgcnt_t npages)
9472 9466 {
9473 9467 struct sf_hment *sfhme;
9474 9468 struct hme_blk *hmeblkp;
9475 9469 tte_t tte;
9476 9470 caddr_t vaddr;
9477 9471 int clr_valid = 0;
9478 9472 int color, color1, bcolor;
9479 9473 int i, ncolors;
9480 9474
9481 9475 ASSERT(pp != NULL);
9482 9476 ASSERT(!(cache & CACHE_WRITEBACK));
9483 9477
9484 9478 if (npages > 1) {
9485 9479 ncolors = CACHE_NUM_COLOR;
9486 9480 }
9487 9481
9488 9482 for (i = 0; i < npages; i++) {
9489 9483 ASSERT(sfmmu_mlist_held(pp));
9490 9484 ASSERT(PP_ISTNC(pp));
9491 9485 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR);
9492 9486
9493 9487 if (PP_ISPNC(pp)) {
9494 9488 return (0);
9495 9489 }
9496 9490
9497 9491 clr_valid = 0;
9498 9492 if (PP_ISMAPPED_KPM(pp)) {
9499 9493 caddr_t kpmvaddr;
9500 9494
9501 9495 ASSERT(kpm_enable);
9502 9496 kpmvaddr = hat_kpm_page2va(pp, 1);
9503 9497 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9504 9498 color1 = addr_to_vcolor(kpmvaddr);
9505 9499 clr_valid = 1;
9506 9500 }
9507 9501
9508 9502 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9509 9503 if (IS_PAHME(sfhme))
9510 9504 continue;
9511 9505 hmeblkp = sfmmu_hmetohblk(sfhme);
9512 9506 if (hmeblkp->hblk_xhat_bit)
9513 9507 continue;
9514 9508
9515 9509 sfmmu_copytte(&sfhme->hme_tte, &tte);
9516 9510 ASSERT(TTE_IS_VALID(&tte));
9517 9511
9518 9512 vaddr = tte_to_vaddr(hmeblkp, tte);
9519 9513 color = addr_to_vcolor(vaddr);
9520 9514
9521 9515 if (npages > 1) {
9522 9516 /*
9523 9517 * If there is a big mapping, make sure
9524 9518 * 8K mapping is consistent with the big
9525 9519 * mapping.
9526 9520 */
9527 9521 bcolor = i % ncolors;
9528 9522 if (color != bcolor) {
9529 9523 return (0);
9530 9524 }
9531 9525 }
9532 9526 if (!clr_valid) {
9533 9527 clr_valid = 1;
9534 9528 color1 = color;
9535 9529 }
9536 9530
9537 9531 if (color1 != color) {
9538 9532 return (0);
9539 9533 }
9540 9534 }
9541 9535
9542 9536 pp = PP_PAGENEXT(pp);
9543 9537 }
9544 9538
9545 9539 return (1);
9546 9540 }
9547 9541
9548 9542 void
9549 9543 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9550 9544 pgcnt_t npages)
9551 9545 {
9552 9546 kmutex_t *pmtx;
9553 9547 int i, ncolors, bcolor;
9554 9548 kpm_hlk_t *kpmp;
9555 9549 cpuset_t cpuset;
9556 9550
9557 9551 ASSERT(pp != NULL);
9558 9552 ASSERT(!(cache & CACHE_WRITEBACK));
9559 9553
9560 9554 kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
9561 9555 pmtx = sfmmu_page_enter(pp);
9562 9556
9563 9557 /*
9564 9558 * Fast path caching single unmapped page
9565 9559 */
9566 9560 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) &&
9567 9561 flags == HAT_CACHE) {
9568 9562 PP_CLRTNC(pp);
9569 9563 PP_CLRPNC(pp);
9570 9564 sfmmu_page_exit(pmtx);
9571 9565 sfmmu_kpm_kpmp_exit(kpmp);
9572 9566 return;
9573 9567 }
9574 9568
9575 9569 /*
9576 9570 * We need to capture all cpus in order to change cacheability
9577 9571 * because we can't allow one cpu to access the same physical
9578 9572 * page using a cacheable and a non-cachebale mapping at the same
9579 9573 * time. Since we may end up walking the ism mapping list
9580 9574 * have to grab it's lock now since we can't after all the
9581 9575 * cpus have been captured.
9582 9576 */
9583 9577 sfmmu_hat_lock_all();
9584 9578 mutex_enter(&ism_mlist_lock);
9585 9579 kpreempt_disable();
9586 9580 cpuset = cpu_ready_set;
9587 9581 xc_attention(cpuset);
9588 9582
9589 9583 if (npages > 1) {
9590 9584 /*
9591 9585 * Make sure all colors are flushed since the
9592 9586 * sfmmu_page_cache() only flushes one color-
9593 9587 * it does not know big pages.
9594 9588 */
9595 9589 ncolors = CACHE_NUM_COLOR;
9596 9590 if (flags & HAT_TMPNC) {
9597 9591 for (i = 0; i < ncolors; i++) {
9598 9592 sfmmu_cache_flushcolor(i, pp->p_pagenum);
9599 9593 }
9600 9594 cache_flush_flag = CACHE_NO_FLUSH;
9601 9595 }
9602 9596 }
9603 9597
9604 9598 for (i = 0; i < npages; i++) {
9605 9599
9606 9600 ASSERT(sfmmu_mlist_held(pp));
9607 9601
9608 9602 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) {
9609 9603
9610 9604 if (npages > 1) {
9611 9605 bcolor = i % ncolors;
9612 9606 } else {
9613 9607 bcolor = NO_VCOLOR;
9614 9608 }
9615 9609
9616 9610 sfmmu_page_cache(pp, flags, cache_flush_flag,
9617 9611 bcolor);
9618 9612 }
9619 9613
9620 9614 pp = PP_PAGENEXT(pp);
9621 9615 }
9622 9616
9623 9617 xt_sync(cpuset);
9624 9618 xc_dismissed(cpuset);
9625 9619 mutex_exit(&ism_mlist_lock);
9626 9620 sfmmu_hat_unlock_all();
9627 9621 sfmmu_page_exit(pmtx);
9628 9622 sfmmu_kpm_kpmp_exit(kpmp);
9629 9623 kpreempt_enable();
9630 9624 }
9631 9625
9632 9626 /*
9633 9627 * This function changes the virtual cacheability of all mappings to a
9634 9628 * particular page. When changing from uncache to cacheable the mappings will
9635 9629 * only be changed if all of them have the same virtual color.
9636 9630 * We need to flush the cache in all cpus. It is possible that
9637 9631 * a process referenced a page as cacheable but has sinced exited
9638 9632 * and cleared the mapping list. We still to flush it but have no
9639 9633 * state so all cpus is the only alternative.
9640 9634 */
9641 9635 static void
9642 9636 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor)
9643 9637 {
9644 9638 struct sf_hment *sfhme;
9645 9639 struct hme_blk *hmeblkp;
9646 9640 sfmmu_t *sfmmup;
9647 9641 tte_t tte, ttemod;
9648 9642 caddr_t vaddr;
9649 9643 int ret, color;
9650 9644 pfn_t pfn;
9651 9645
9652 9646 color = bcolor;
9653 9647 pfn = pp->p_pagenum;
9654 9648
9655 9649 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9656 9650
9657 9651 if (IS_PAHME(sfhme))
9658 9652 continue;
9659 9653 hmeblkp = sfmmu_hmetohblk(sfhme);
9660 9654
9661 9655 if (hmeblkp->hblk_xhat_bit)
9662 9656 continue;
9663 9657
9664 9658 sfmmu_copytte(&sfhme->hme_tte, &tte);
9665 9659 ASSERT(TTE_IS_VALID(&tte));
9666 9660 vaddr = tte_to_vaddr(hmeblkp, tte);
9667 9661 color = addr_to_vcolor(vaddr);
9668 9662
9669 9663 #ifdef DEBUG
9670 9664 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9671 9665 ASSERT(color == bcolor);
9672 9666 }
9673 9667 #endif
9674 9668
9675 9669 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp));
9676 9670
9677 9671 ttemod = tte;
9678 9672 if (flags & (HAT_UNCACHE | HAT_TMPNC)) {
9679 9673 TTE_CLR_VCACHEABLE(&ttemod);
9680 9674 } else { /* flags & HAT_CACHE */
9681 9675 TTE_SET_VCACHEABLE(&ttemod);
9682 9676 }
9683 9677 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
9684 9678 if (ret < 0) {
9685 9679 /*
9686 9680 * Since all cpus are captured modifytte should not
9687 9681 * fail.
9688 9682 */
9689 9683 panic("sfmmu_page_cache: write to tte failed");
9690 9684 }
9691 9685
9692 9686 sfmmup = hblktosfmmu(hmeblkp);
9693 9687 if (cache_flush_flag == CACHE_FLUSH) {
9694 9688 /*
9695 9689 * Flush TSBs, TLBs and caches
9696 9690 */
9697 9691 if (hmeblkp->hblk_shared) {
9698 9692 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9699 9693 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9700 9694 sf_region_t *rgnp;
9701 9695 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9702 9696 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9703 9697 ASSERT(srdp != NULL);
9704 9698 rgnp = srdp->srd_hmergnp[rid];
9705 9699 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9706 9700 srdp, rgnp, rid);
9707 9701 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9708 9702 hmeblkp, 0);
9709 9703 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr));
9710 9704 } else if (sfmmup->sfmmu_ismhat) {
9711 9705 if (flags & HAT_CACHE) {
9712 9706 SFMMU_STAT(sf_ism_recache);
9713 9707 } else {
9714 9708 SFMMU_STAT(sf_ism_uncache);
9715 9709 }
9716 9710 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9717 9711 pfn, CACHE_FLUSH);
9718 9712 } else {
9719 9713 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp,
9720 9714 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1);
9721 9715 }
9722 9716
9723 9717 /*
9724 9718 * all cache entries belonging to this pfn are
9725 9719 * now flushed.
9726 9720 */
9727 9721 cache_flush_flag = CACHE_NO_FLUSH;
9728 9722 } else {
9729 9723 /*
9730 9724 * Flush only TSBs and TLBs.
9731 9725 */
9732 9726 if (hmeblkp->hblk_shared) {
9733 9727 sf_srd_t *srdp = (sf_srd_t *)sfmmup;
9734 9728 uint_t rid = hmeblkp->hblk_tag.htag_rid;
9735 9729 sf_region_t *rgnp;
9736 9730 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
9737 9731 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
9738 9732 ASSERT(srdp != NULL);
9739 9733 rgnp = srdp->srd_hmergnp[rid];
9740 9734 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp,
9741 9735 srdp, rgnp, rid);
9742 9736 (void) sfmmu_rgntlb_demap(vaddr, rgnp,
9743 9737 hmeblkp, 0);
9744 9738 } else if (sfmmup->sfmmu_ismhat) {
9745 9739 if (flags & HAT_CACHE) {
9746 9740 SFMMU_STAT(sf_ism_recache);
9747 9741 } else {
9748 9742 SFMMU_STAT(sf_ism_uncache);
9749 9743 }
9750 9744 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp,
9751 9745 pfn, CACHE_NO_FLUSH);
9752 9746 } else {
9753 9747 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1);
9754 9748 }
9755 9749 }
9756 9750 }
9757 9751
9758 9752 if (PP_ISMAPPED_KPM(pp))
9759 9753 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag);
9760 9754
9761 9755 switch (flags) {
9762 9756
9763 9757 default:
9764 9758 panic("sfmmu_pagecache: unknown flags");
9765 9759 break;
9766 9760
9767 9761 case HAT_CACHE:
9768 9762 PP_CLRTNC(pp);
9769 9763 PP_CLRPNC(pp);
9770 9764 PP_SET_VCOLOR(pp, color);
9771 9765 break;
9772 9766
9773 9767 case HAT_TMPNC:
9774 9768 PP_SETTNC(pp);
9775 9769 PP_SET_VCOLOR(pp, NO_VCOLOR);
9776 9770 break;
9777 9771
9778 9772 case HAT_UNCACHE:
9779 9773 PP_SETPNC(pp);
9780 9774 PP_CLRTNC(pp);
9781 9775 PP_SET_VCOLOR(pp, NO_VCOLOR);
9782 9776 break;
9783 9777 }
9784 9778 }
9785 9779 #endif /* VAC */
9786 9780
9787 9781
9788 9782 /*
9789 9783 * Wrapper routine used to return a context.
9790 9784 *
9791 9785 * It's the responsibility of the caller to guarantee that the
9792 9786 * process serializes on calls here by taking the HAT lock for
9793 9787 * the hat.
9794 9788 *
9795 9789 */
9796 9790 static void
9797 9791 sfmmu_get_ctx(sfmmu_t *sfmmup)
9798 9792 {
9799 9793 mmu_ctx_t *mmu_ctxp;
9800 9794 uint_t pstate_save;
9801 9795 int ret;
9802 9796
9803 9797 ASSERT(sfmmu_hat_lock_held(sfmmup));
9804 9798 ASSERT(sfmmup != ksfmmup);
9805 9799
9806 9800 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) {
9807 9801 sfmmu_setup_tsbinfo(sfmmup);
9808 9802 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID);
9809 9803 }
9810 9804
9811 9805 kpreempt_disable();
9812 9806
9813 9807 mmu_ctxp = CPU_MMU_CTXP(CPU);
9814 9808 ASSERT(mmu_ctxp);
9815 9809 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
9816 9810 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
9817 9811
9818 9812 /*
9819 9813 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU.
9820 9814 */
9821 9815 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs)
9822 9816 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE);
9823 9817
9824 9818 /*
9825 9819 * Let the MMU set up the page sizes to use for
9826 9820 * this context in the TLB. Don't program 2nd dtlb for ism hat.
9827 9821 */
9828 9822 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) {
9829 9823 mmu_set_ctx_page_sizes(sfmmup);
9830 9824 }
9831 9825
9832 9826 /*
9833 9827 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with
9834 9828 * interrupts disabled to prevent race condition with wrap-around
9835 9829 * ctx invalidatation. In sun4v, ctx invalidation also involves
9836 9830 * a HV call to set the number of TSBs to 0. If interrupts are not
9837 9831 * disabled until after sfmmu_load_mmustate is complete TSBs may
9838 9832 * become assigned to INVALID_CONTEXT. This is not allowed.
9839 9833 */
9840 9834 pstate_save = sfmmu_disable_intrs();
9841 9835
9842 9836 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) &&
9843 9837 sfmmup->sfmmu_scdp != NULL) {
9844 9838 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
9845 9839 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
9846 9840 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED);
9847 9841 /* debug purpose only */
9848 9842 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
9849 9843 != INVALID_CONTEXT);
9850 9844 }
9851 9845 sfmmu_load_mmustate(sfmmup);
9852 9846
9853 9847 sfmmu_enable_intrs(pstate_save);
9854 9848
9855 9849 kpreempt_enable();
9856 9850 }
9857 9851
9858 9852 /*
9859 9853 * When all cnums are used up in a MMU, cnum will wrap around to the
9860 9854 * next generation and start from 2.
9861 9855 */
9862 9856 static void
9863 9857 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum)
9864 9858 {
9865 9859
9866 9860 /* caller must have disabled the preemption */
9867 9861 ASSERT(curthread->t_preempt >= 1);
9868 9862 ASSERT(mmu_ctxp != NULL);
9869 9863
9870 9864 /* acquire Per-MMU (PM) spin lock */
9871 9865 mutex_enter(&mmu_ctxp->mmu_lock);
9872 9866
9873 9867 /* re-check to see if wrap-around is needed */
9874 9868 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs)
9875 9869 goto done;
9876 9870
9877 9871 SFMMU_MMU_STAT(mmu_wrap_around);
9878 9872
9879 9873 /* update gnum */
9880 9874 ASSERT(mmu_ctxp->mmu_gnum != 0);
9881 9875 mmu_ctxp->mmu_gnum++;
9882 9876 if (mmu_ctxp->mmu_gnum == 0 ||
9883 9877 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) {
9884 9878 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.",
9885 9879 (void *)mmu_ctxp);
9886 9880 }
9887 9881
9888 9882 if (mmu_ctxp->mmu_ncpus > 1) {
9889 9883 cpuset_t cpuset;
9890 9884
9891 9885 membar_enter(); /* make sure updated gnum visible */
9892 9886
9893 9887 SFMMU_XCALL_STATS(NULL);
9894 9888
9895 9889 /* xcall to others on the same MMU to invalidate ctx */
9896 9890 cpuset = mmu_ctxp->mmu_cpuset;
9897 9891 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum);
9898 9892 CPUSET_DEL(cpuset, CPU->cpu_id);
9899 9893 CPUSET_AND(cpuset, cpu_ready_set);
9900 9894
9901 9895 /*
9902 9896 * Pass in INVALID_CONTEXT as the first parameter to
9903 9897 * sfmmu_raise_tsb_exception, which invalidates the context
9904 9898 * of any process running on the CPUs in the MMU.
9905 9899 */
9906 9900 xt_some(cpuset, sfmmu_raise_tsb_exception,
9907 9901 INVALID_CONTEXT, INVALID_CONTEXT);
9908 9902 xt_sync(cpuset);
9909 9903
9910 9904 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
9911 9905 }
9912 9906
9913 9907 if (sfmmu_getctx_sec() != INVALID_CONTEXT) {
9914 9908 sfmmu_setctx_sec(INVALID_CONTEXT);
9915 9909 sfmmu_clear_utsbinfo();
9916 9910 }
9917 9911
9918 9912 /*
9919 9913 * No xcall is needed here. For sun4u systems all CPUs in context
9920 9914 * domain share a single physical MMU therefore it's enough to flush
9921 9915 * TLB on local CPU. On sun4v systems we use 1 global context
9922 9916 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception
9923 9917 * handler. Note that vtag_flushall_uctxs() is called
9924 9918 * for Ultra II machine, where the equivalent flushall functionality
9925 9919 * is implemented in SW, and only user ctx TLB entries are flushed.
9926 9920 */
9927 9921 if (&vtag_flushall_uctxs != NULL) {
9928 9922 vtag_flushall_uctxs();
9929 9923 } else {
9930 9924 vtag_flushall();
9931 9925 }
9932 9926
9933 9927 /* reset mmu cnum, skips cnum 0 and 1 */
9934 9928 if (reset_cnum == B_TRUE)
9935 9929 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS;
9936 9930
9937 9931 done:
9938 9932 mutex_exit(&mmu_ctxp->mmu_lock);
9939 9933 }
9940 9934
9941 9935
9942 9936 /*
9943 9937 * For multi-threaded process, set the process context to INVALID_CONTEXT
9944 9938 * so that it faults and reloads the MMU state from TL=0. For single-threaded
9945 9939 * process, we can just load the MMU state directly without having to
9946 9940 * set context invalid. Caller must hold the hat lock since we don't
9947 9941 * acquire it here.
9948 9942 */
9949 9943 static void
9950 9944 sfmmu_sync_mmustate(sfmmu_t *sfmmup)
9951 9945 {
9952 9946 uint_t cnum;
9953 9947 uint_t pstate_save;
9954 9948
9955 9949 ASSERT(sfmmup != ksfmmup);
9956 9950 ASSERT(sfmmu_hat_lock_held(sfmmup));
9957 9951
9958 9952 kpreempt_disable();
9959 9953
9960 9954 /*
9961 9955 * We check whether the pass'ed-in sfmmup is the same as the
9962 9956 * current running proc. This is to makes sure the current proc
9963 9957 * stays single-threaded if it already is.
9964 9958 */
9965 9959 if ((sfmmup == curthread->t_procp->p_as->a_hat) &&
9966 9960 (curthread->t_procp->p_lwpcnt == 1)) {
9967 9961 /* single-thread */
9968 9962 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum;
9969 9963 if (cnum != INVALID_CONTEXT) {
9970 9964 uint_t curcnum;
9971 9965 /*
9972 9966 * Disable interrupts to prevent race condition
9973 9967 * with sfmmu_ctx_wrap_around ctx invalidation.
9974 9968 * In sun4v, ctx invalidation involves setting
9975 9969 * TSB to NULL, hence, interrupts should be disabled
9976 9970 * untill after sfmmu_load_mmustate is completed.
9977 9971 */
9978 9972 pstate_save = sfmmu_disable_intrs();
9979 9973 curcnum = sfmmu_getctx_sec();
9980 9974 if (curcnum == cnum)
9981 9975 sfmmu_load_mmustate(sfmmup);
9982 9976 sfmmu_enable_intrs(pstate_save);
9983 9977 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT);
9984 9978 }
9985 9979 } else {
9986 9980 /*
9987 9981 * multi-thread
9988 9982 * or when sfmmup is not the same as the curproc.
9989 9983 */
9990 9984 sfmmu_invalidate_ctx(sfmmup);
9991 9985 }
9992 9986
9993 9987 kpreempt_enable();
9994 9988 }
9995 9989
9996 9990
9997 9991 /*
9998 9992 * Replace the specified TSB with a new TSB. This function gets called when
9999 9993 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the
10000 9994 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
10001 9995 * (8K).
10002 9996 *
10003 9997 * Caller must hold the HAT lock, but should assume any tsb_info
10004 9998 * pointers it has are no longer valid after calling this function.
10005 9999 *
10006 10000 * Return values:
10007 10001 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints
10008 10002 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing
10009 10003 * something to this tsbinfo/TSB
10010 10004 * TSB_SUCCESS Operation succeeded
10011 10005 */
10012 10006 static tsb_replace_rc_t
10013 10007 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc,
10014 10008 hatlock_t *hatlockp, uint_t flags)
10015 10009 {
10016 10010 struct tsb_info *new_tsbinfo = NULL;
10017 10011 struct tsb_info *curtsb, *prevtsb;
10018 10012 uint_t tte_sz_mask;
10019 10013 int i;
10020 10014
10021 10015 ASSERT(sfmmup != ksfmmup);
10022 10016 ASSERT(sfmmup->sfmmu_ismhat == 0);
10023 10017 ASSERT(sfmmu_hat_lock_held(sfmmup));
10024 10018 ASSERT(szc <= tsb_max_growsize);
10025 10019
10026 10020 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY))
10027 10021 return (TSB_LOSTRACE);
10028 10022
10029 10023 /*
10030 10024 * Find the tsb_info ahead of this one in the list, and
10031 10025 * also make sure that the tsb_info passed in really
10032 10026 * exists!
10033 10027 */
10034 10028 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
10035 10029 curtsb != old_tsbinfo && curtsb != NULL;
10036 10030 prevtsb = curtsb, curtsb = curtsb->tsb_next)
10037 10031 ;
10038 10032 ASSERT(curtsb != NULL);
10039 10033
10040 10034 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10041 10035 /*
10042 10036 * The process is swapped out, so just set the new size
10043 10037 * code. When it swaps back in, we'll allocate a new one
10044 10038 * of the new chosen size.
10045 10039 */
10046 10040 curtsb->tsb_szc = szc;
10047 10041 return (TSB_SUCCESS);
10048 10042 }
10049 10043 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY);
10050 10044
10051 10045 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask;
10052 10046
10053 10047 /*
10054 10048 * All initialization is done inside of sfmmu_tsbinfo_alloc().
10055 10049 * If we fail to allocate a TSB, exit.
10056 10050 *
10057 10051 * If tsb grows with new tsb size > 4M and old tsb size < 4M,
10058 10052 * then try 4M slab after the initial alloc fails.
10059 10053 *
10060 10054 * If tsb swapin with tsb size > 4M, then try 4M after the
10061 10055 * initial alloc fails.
10062 10056 */
10063 10057 sfmmu_hat_exit(hatlockp);
10064 10058 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc,
10065 10059 tte_sz_mask, flags, sfmmup) &&
10066 10060 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) ||
10067 10061 (!(flags & TSB_SWAPIN) &&
10068 10062 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) ||
10069 10063 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE,
10070 10064 tte_sz_mask, flags, sfmmup))) {
10071 10065 (void) sfmmu_hat_enter(sfmmup);
10072 10066 if (!(flags & TSB_SWAPIN))
10073 10067 SFMMU_STAT(sf_tsb_resize_failures);
10074 10068 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10075 10069 return (TSB_ALLOCFAIL);
10076 10070 }
10077 10071 (void) sfmmu_hat_enter(sfmmup);
10078 10072
10079 10073 /*
10080 10074 * Re-check to make sure somebody else didn't muck with us while we
10081 10075 * didn't hold the HAT lock. If the process swapped out, fine, just
10082 10076 * exit; this can happen if we try to shrink the TSB from the context
10083 10077 * of another process (such as on an ISM unmap), though it is rare.
10084 10078 */
10085 10079 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
10086 10080 SFMMU_STAT(sf_tsb_resize_failures);
10087 10081 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10088 10082 sfmmu_hat_exit(hatlockp);
10089 10083 sfmmu_tsbinfo_free(new_tsbinfo);
10090 10084 (void) sfmmu_hat_enter(sfmmup);
10091 10085 return (TSB_LOSTRACE);
10092 10086 }
10093 10087
10094 10088 #ifdef DEBUG
10095 10089 /* Reverify that the tsb_info still exists.. for debugging only */
10096 10090 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb;
10097 10091 curtsb != old_tsbinfo && curtsb != NULL;
10098 10092 prevtsb = curtsb, curtsb = curtsb->tsb_next)
10099 10093 ;
10100 10094 ASSERT(curtsb != NULL);
10101 10095 #endif /* DEBUG */
10102 10096
10103 10097 /*
10104 10098 * Quiesce any CPUs running this process on their next TLB miss
10105 10099 * so they atomically see the new tsb_info. We temporarily set the
10106 10100 * context to invalid context so new threads that come on processor
10107 10101 * after we do the xcall to cpusran will also serialize behind the
10108 10102 * HAT lock on TLB miss and will see the new TSB. Since this short
10109 10103 * race with a new thread coming on processor is relatively rare,
10110 10104 * this synchronization mechanism should be cheaper than always
10111 10105 * pausing all CPUs for the duration of the setup, which is what
10112 10106 * the old implementation did. This is particuarly true if we are
10113 10107 * copying a huge chunk of memory around during that window.
10114 10108 *
10115 10109 * The memory barriers are to make sure things stay consistent
10116 10110 * with resume() since it does not hold the HAT lock while
10117 10111 * walking the list of tsb_info structures.
10118 10112 */
10119 10113 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) {
10120 10114 /* The TSB is either growing or shrinking. */
10121 10115 sfmmu_invalidate_ctx(sfmmup);
10122 10116 } else {
10123 10117 /*
10124 10118 * It is illegal to swap in TSBs from a process other
10125 10119 * than a process being swapped in. This in turn
10126 10120 * implies we do not have a valid MMU context here
10127 10121 * since a process needs one to resolve translation
10128 10122 * misses.
10129 10123 */
10130 10124 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup);
10131 10125 }
10132 10126
10133 10127 #ifdef DEBUG
10134 10128 ASSERT(max_mmu_ctxdoms > 0);
10135 10129
10136 10130 /*
10137 10131 * Process should have INVALID_CONTEXT on all MMUs
10138 10132 */
10139 10133 for (i = 0; i < max_mmu_ctxdoms; i++) {
10140 10134
10141 10135 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT);
10142 10136 }
10143 10137 #endif
10144 10138
10145 10139 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next;
10146 10140 membar_stst(); /* strict ordering required */
10147 10141 if (prevtsb)
10148 10142 prevtsb->tsb_next = new_tsbinfo;
10149 10143 else
10150 10144 sfmmup->sfmmu_tsb = new_tsbinfo;
10151 10145 membar_enter(); /* make sure new TSB globally visible */
10152 10146
10153 10147 /*
10154 10148 * We need to migrate TSB entries from the old TSB to the new TSB
10155 10149 * if tsb_remap_ttes is set and the TSB is growing.
10156 10150 */
10157 10151 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW))
10158 10152 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo);
10159 10153
10160 10154 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY);
10161 10155
10162 10156 /*
10163 10157 * Drop the HAT lock to free our old tsb_info.
10164 10158 */
10165 10159 sfmmu_hat_exit(hatlockp);
10166 10160
10167 10161 if ((flags & TSB_GROW) == TSB_GROW) {
10168 10162 SFMMU_STAT(sf_tsb_grow);
10169 10163 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) {
10170 10164 SFMMU_STAT(sf_tsb_shrink);
10171 10165 }
10172 10166
10173 10167 sfmmu_tsbinfo_free(old_tsbinfo);
10174 10168
10175 10169 (void) sfmmu_hat_enter(sfmmup);
10176 10170 return (TSB_SUCCESS);
10177 10171 }
10178 10172
10179 10173 /*
10180 10174 * This function will re-program hat pgsz array, and invalidate the
10181 10175 * process' context, forcing the process to switch to another
10182 10176 * context on the next TLB miss, and therefore start using the
10183 10177 * TLB that is reprogrammed for the new page sizes.
10184 10178 */
10185 10179 void
10186 10180 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz)
10187 10181 {
10188 10182 int i;
10189 10183 hatlock_t *hatlockp = NULL;
10190 10184
10191 10185 hatlockp = sfmmu_hat_enter(sfmmup);
10192 10186 /* USIII+-IV+ optimization, requires hat lock */
10193 10187 if (tmp_pgsz) {
10194 10188 for (i = 0; i < mmu_page_sizes; i++)
10195 10189 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i];
10196 10190 }
10197 10191 SFMMU_STAT(sf_tlb_reprog_pgsz);
10198 10192
10199 10193 sfmmu_invalidate_ctx(sfmmup);
10200 10194
10201 10195 sfmmu_hat_exit(hatlockp);
10202 10196 }
10203 10197
10204 10198 /*
10205 10199 * The scd_rttecnt field in the SCD must be updated to take account of the
10206 10200 * regions which it contains.
10207 10201 */
10208 10202 static void
10209 10203 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp)
10210 10204 {
10211 10205 uint_t rid;
10212 10206 uint_t i, j;
10213 10207 ulong_t w;
10214 10208 sf_region_t *rgnp;
10215 10209
10216 10210 ASSERT(srdp != NULL);
10217 10211
10218 10212 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
10219 10213 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
10220 10214 continue;
10221 10215 }
10222 10216
10223 10217 j = 0;
10224 10218 while (w) {
10225 10219 if (!(w & 0x1)) {
10226 10220 j++;
10227 10221 w >>= 1;
10228 10222 continue;
10229 10223 }
10230 10224 rid = (i << BT_ULSHIFT) | j;
10231 10225 j++;
10232 10226 w >>= 1;
10233 10227
10234 10228 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
10235 10229 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
10236 10230 rgnp = srdp->srd_hmergnp[rid];
10237 10231 ASSERT(rgnp->rgn_refcnt > 0);
10238 10232 ASSERT(rgnp->rgn_id == rid);
10239 10233
10240 10234 scdp->scd_rttecnt[rgnp->rgn_pgszc] +=
10241 10235 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
10242 10236
10243 10237 /*
10244 10238 * Maintain the tsb0 inflation cnt for the regions
10245 10239 * in the SCD.
10246 10240 */
10247 10241 if (rgnp->rgn_pgszc >= TTE4M) {
10248 10242 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt +=
10249 10243 rgnp->rgn_size >>
10250 10244 (TTE_PAGE_SHIFT(TTE8K) + 2);
10251 10245 }
10252 10246 }
10253 10247 }
10254 10248 }
10255 10249
10256 10250 /*
10257 10251 * This function assumes that there are either four or six supported page
10258 10252 * sizes and at most two programmable TLBs, so we need to decide which
10259 10253 * page sizes are most important and then tell the MMU layer so it
10260 10254 * can adjust the TLB page sizes accordingly (if supported).
10261 10255 *
10262 10256 * If these assumptions change, this function will need to be
10263 10257 * updated to support whatever the new limits are.
10264 10258 *
10265 10259 * The growing flag is nonzero if we are growing the address space,
10266 10260 * and zero if it is shrinking. This allows us to decide whether
10267 10261 * to grow or shrink our TSB, depending upon available memory
10268 10262 * conditions.
10269 10263 */
10270 10264 static void
10271 10265 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing)
10272 10266 {
10273 10267 uint64_t ttecnt[MMU_PAGE_SIZES];
10274 10268 uint64_t tte8k_cnt, tte4m_cnt;
10275 10269 uint8_t i;
10276 10270 int sectsb_thresh;
10277 10271
10278 10272 /*
10279 10273 * Kernel threads, processes with small address spaces not using
10280 10274 * large pages, and dummy ISM HATs need not apply.
10281 10275 */
10282 10276 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL)
10283 10277 return;
10284 10278
10285 10279 if (!SFMMU_LGPGS_INUSE(sfmmup) &&
10286 10280 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor)
10287 10281 return;
10288 10282
10289 10283 for (i = 0; i < mmu_page_sizes; i++) {
10290 10284 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] +
10291 10285 sfmmup->sfmmu_ismttecnt[i];
10292 10286 }
10293 10287
10294 10288 /* Check pagesizes in use, and possibly reprogram DTLB. */
10295 10289 if (&mmu_check_page_sizes)
10296 10290 mmu_check_page_sizes(sfmmup, ttecnt);
10297 10291
10298 10292 /*
10299 10293 * Calculate the number of 8k ttes to represent the span of these
10300 10294 * pages.
10301 10295 */
10302 10296 tte8k_cnt = ttecnt[TTE8K] +
10303 10297 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) +
10304 10298 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT));
10305 10299 if (mmu_page_sizes == max_mmu_page_sizes) {
10306 10300 tte4m_cnt = ttecnt[TTE4M] +
10307 10301 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) +
10308 10302 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M));
10309 10303 } else {
10310 10304 tte4m_cnt = ttecnt[TTE4M];
10311 10305 }
10312 10306
10313 10307 /*
10314 10308 * Inflate tte8k_cnt to allow for region large page allocation failure.
10315 10309 */
10316 10310 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt;
10317 10311
10318 10312 /*
10319 10313 * Inflate TSB sizes by a factor of 2 if this process
10320 10314 * uses 4M text pages to minimize extra conflict misses
10321 10315 * in the first TSB since without counting text pages
10322 10316 * 8K TSB may become too small.
10323 10317 *
10324 10318 * Also double the size of the second TSB to minimize
10325 10319 * extra conflict misses due to competition between 4M text pages
10326 10320 * and data pages.
10327 10321 *
10328 10322 * We need to adjust the second TSB allocation threshold by the
10329 10323 * inflation factor, since there is no point in creating a second
10330 10324 * TSB when we know all the mappings can fit in the I/D TLBs.
10331 10325 */
10332 10326 sectsb_thresh = tsb_sectsb_threshold;
10333 10327 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) {
10334 10328 tte8k_cnt <<= 1;
10335 10329 tte4m_cnt <<= 1;
10336 10330 sectsb_thresh <<= 1;
10337 10331 }
10338 10332
10339 10333 /*
10340 10334 * Check to see if our TSB is the right size; we may need to
10341 10335 * grow or shrink it. If the process is small, our work is
10342 10336 * finished at this point.
10343 10337 */
10344 10338 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10345 10339 return;
10346 10340 }
10347 10341 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10348 10342 }
10349 10343
10350 10344 static void
10351 10345 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10352 10346 uint64_t tte4m_cnt, int sectsb_thresh)
10353 10347 {
10354 10348 int tsb_bits;
10355 10349 uint_t tsb_szc;
10356 10350 struct tsb_info *tsbinfop;
10357 10351 hatlock_t *hatlockp = NULL;
10358 10352
10359 10353 hatlockp = sfmmu_hat_enter(sfmmup);
10360 10354 ASSERT(hatlockp != NULL);
10361 10355 tsbinfop = sfmmup->sfmmu_tsb;
10362 10356 ASSERT(tsbinfop != NULL);
10363 10357
10364 10358 /*
10365 10359 * If we're growing, select the size based on RSS. If we're
10366 10360 * shrinking, leave some room so we don't have to turn around and
10367 10361 * grow again immediately.
10368 10362 */
10369 10363 if (growing)
10370 10364 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
10371 10365 else
10372 10366 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1);
10373 10367
10374 10368 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10375 10369 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10376 10370 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10377 10371 hatlockp, TSB_SHRINK);
10378 10372 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) {
10379 10373 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc,
10380 10374 hatlockp, TSB_GROW);
10381 10375 }
10382 10376 tsbinfop = sfmmup->sfmmu_tsb;
10383 10377
10384 10378 /*
10385 10379 * With the TLB and first TSB out of the way, we need to see if
10386 10380 * we need a second TSB for 4M pages. If we managed to reprogram
10387 10381 * the TLB page sizes above, the process will start using this new
10388 10382 * TSB right away; otherwise, it will start using it on the next
10389 10383 * context switch. Either way, it's no big deal so there's no
10390 10384 * synchronization with the trap handlers here unless we grow the
10391 10385 * TSB (in which case it's required to prevent using the old one
10392 10386 * after it's freed). Note: second tsb is required for 32M/256M
10393 10387 * page sizes.
10394 10388 */
10395 10389 if (tte4m_cnt > sectsb_thresh) {
10396 10390 /*
10397 10391 * If we're growing, select the size based on RSS. If we're
10398 10392 * shrinking, leave some room so we don't have to turn
10399 10393 * around and grow again immediately.
10400 10394 */
10401 10395 if (growing)
10402 10396 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
10403 10397 else
10404 10398 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1);
10405 10399 if (tsbinfop->tsb_next == NULL) {
10406 10400 struct tsb_info *newtsb;
10407 10401 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)?
10408 10402 0 : TSB_ALLOC;
10409 10403
10410 10404 sfmmu_hat_exit(hatlockp);
10411 10405
10412 10406 /*
10413 10407 * Try to allocate a TSB for 4[32|256]M pages. If we
10414 10408 * can't get the size we want, retry w/a minimum sized
10415 10409 * TSB. If that still didn't work, give up; we can
10416 10410 * still run without one.
10417 10411 */
10418 10412 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)?
10419 10413 TSB4M|TSB32M|TSB256M:TSB4M;
10420 10414 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits,
10421 10415 allocflags, sfmmup)) &&
10422 10416 (tsb_szc <= TSB_4M_SZCODE ||
10423 10417 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
10424 10418 tsb_bits, allocflags, sfmmup)) &&
10425 10419 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE,
10426 10420 tsb_bits, allocflags, sfmmup)) {
10427 10421 return;
10428 10422 }
10429 10423
10430 10424 hatlockp = sfmmu_hat_enter(sfmmup);
10431 10425
10432 10426 sfmmu_invalidate_ctx(sfmmup);
10433 10427
10434 10428 if (sfmmup->sfmmu_tsb->tsb_next == NULL) {
10435 10429 sfmmup->sfmmu_tsb->tsb_next = newtsb;
10436 10430 SFMMU_STAT(sf_tsb_sectsb_create);
10437 10431 sfmmu_hat_exit(hatlockp);
10438 10432 return;
10439 10433 } else {
10440 10434 /*
10441 10435 * It's annoying, but possible for us
10442 10436 * to get here.. we dropped the HAT lock
10443 10437 * because of locking order in the kmem
10444 10438 * allocator, and while we were off getting
10445 10439 * our memory, some other thread decided to
10446 10440 * do us a favor and won the race to get a
10447 10441 * second TSB for this process. Sigh.
10448 10442 */
10449 10443 sfmmu_hat_exit(hatlockp);
10450 10444 sfmmu_tsbinfo_free(newtsb);
10451 10445 return;
10452 10446 }
10453 10447 }
10454 10448
10455 10449 /*
10456 10450 * We have a second TSB, see if it's big enough.
10457 10451 */
10458 10452 tsbinfop = tsbinfop->tsb_next;
10459 10453
10460 10454 /*
10461 10455 * Check to see if our second TSB is the right size;
10462 10456 * we may need to grow or shrink it.
10463 10457 * To prevent thrashing (e.g. growing the TSB on a
10464 10458 * subsequent map operation), only try to shrink if
10465 10459 * the TSB reach exceeds twice the virtual address
10466 10460 * space size.
10467 10461 */
10468 10462 if (!growing && (tsb_szc < tsbinfop->tsb_szc) &&
10469 10463 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) {
10470 10464 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10471 10465 tsb_szc, hatlockp, TSB_SHRINK);
10472 10466 } else if (growing && tsb_szc > tsbinfop->tsb_szc &&
10473 10467 TSB_OK_GROW()) {
10474 10468 (void) sfmmu_replace_tsb(sfmmup, tsbinfop,
10475 10469 tsb_szc, hatlockp, TSB_GROW);
10476 10470 }
10477 10471 }
10478 10472
10479 10473 sfmmu_hat_exit(hatlockp);
10480 10474 }
10481 10475
10482 10476 /*
10483 10477 * Free up a sfmmu
10484 10478 * Since the sfmmu is currently embedded in the hat struct we simply zero
10485 10479 * out our fields and free up the ism map blk list if any.
10486 10480 */
10487 10481 static void
10488 10482 sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10489 10483 {
10490 10484 ism_blk_t *blkp, *nx_blkp;
10491 10485 #ifdef DEBUG
10492 10486 ism_map_t *map;
10493 10487 int i;
10494 10488 #endif
10495 10489
10496 10490 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10497 10491 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10498 10492 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10499 10493 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10500 10494 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10501 10495 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10502 10496 ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10503 10497
10504 10498 sfmmup->sfmmu_free = 0;
10505 10499 sfmmup->sfmmu_ismhat = 0;
10506 10500
10507 10501 blkp = sfmmup->sfmmu_iblk;
10508 10502 sfmmup->sfmmu_iblk = NULL;
10509 10503
10510 10504 while (blkp) {
10511 10505 #ifdef DEBUG
10512 10506 map = blkp->iblk_maps;
10513 10507 for (i = 0; i < ISM_MAP_SLOTS; i++) {
10514 10508 ASSERT(map[i].imap_seg == 0);
10515 10509 ASSERT(map[i].imap_ismhat == NULL);
10516 10510 ASSERT(map[i].imap_ment == NULL);
10517 10511 }
10518 10512 #endif
10519 10513 nx_blkp = blkp->iblk_next;
10520 10514 blkp->iblk_next = NULL;
10521 10515 blkp->iblk_nextpa = (uint64_t)-1;
10522 10516 kmem_cache_free(ism_blk_cache, blkp);
10523 10517 blkp = nx_blkp;
10524 10518 }
10525 10519 }
10526 10520
10527 10521 /*
10528 10522 * Locking primitves accessed by HATLOCK macros
10529 10523 */
10530 10524
10531 10525 #define SFMMU_SPL_MTX (0x0)
10532 10526 #define SFMMU_ML_MTX (0x1)
10533 10527
10534 10528 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \
10535 10529 SPL_HASH(pg) : MLIST_HASH(pg))
10536 10530
10537 10531 kmutex_t *
10538 10532 sfmmu_page_enter(struct page *pp)
10539 10533 {
10540 10534 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX));
10541 10535 }
10542 10536
10543 10537 void
10544 10538 sfmmu_page_exit(kmutex_t *spl)
10545 10539 {
10546 10540 mutex_exit(spl);
10547 10541 }
10548 10542
10549 10543 int
10550 10544 sfmmu_page_spl_held(struct page *pp)
10551 10545 {
10552 10546 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX));
10553 10547 }
10554 10548
10555 10549 kmutex_t *
10556 10550 sfmmu_mlist_enter(struct page *pp)
10557 10551 {
10558 10552 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX));
10559 10553 }
10560 10554
10561 10555 void
10562 10556 sfmmu_mlist_exit(kmutex_t *mml)
10563 10557 {
10564 10558 mutex_exit(mml);
10565 10559 }
10566 10560
10567 10561 int
10568 10562 sfmmu_mlist_held(struct page *pp)
10569 10563 {
10570 10564
10571 10565 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX));
10572 10566 }
10573 10567
10574 10568 /*
10575 10569 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For
10576 10570 * sfmmu_mlist_enter() case mml_table lock array is used and for
10577 10571 * sfmmu_page_enter() sfmmu_page_lock lock array is used.
10578 10572 *
10579 10573 * The lock is taken on a root page so that it protects an operation on all
10580 10574 * constituent pages of a large page pp belongs to.
10581 10575 *
10582 10576 * The routine takes a lock from the appropriate array. The lock is determined
10583 10577 * by hashing the root page. After taking the lock this routine checks if the
10584 10578 * root page has the same size code that was used to determine the root (i.e
10585 10579 * that root hasn't changed). If root page has the expected p_szc field we
10586 10580 * have the right lock and it's returned to the caller. If root's p_szc
10587 10581 * decreased we release the lock and retry from the beginning. This case can
10588 10582 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc
10589 10583 * value and taking the lock. The number of retries due to p_szc decrease is
10590 10584 * limited by the maximum p_szc value. If p_szc is 0 we return the lock
10591 10585 * determined by hashing pp itself.
10592 10586 *
10593 10587 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also
10594 10588 * possible that p_szc can increase. To increase p_szc a thread has to lock
10595 10589 * all constituent pages EXCL and do hat_pageunload() on all of them. All the
10596 10590 * callers that don't hold a page locked recheck if hmeblk through which pp
10597 10591 * was found still maps this pp. If it doesn't map it anymore returned lock
10598 10592 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of
10599 10593 * p_szc increase after taking the lock it returns this lock without further
10600 10594 * retries because in this case the caller doesn't care about which lock was
10601 10595 * taken. The caller will drop it right away.
10602 10596 *
10603 10597 * After the routine returns it's guaranteed that hat_page_demote() can't
10604 10598 * change p_szc field of any of constituent pages of a large page pp belongs
10605 10599 * to as long as pp was either locked at least SHARED prior to this call or
10606 10600 * the caller finds that hment that pointed to this pp still references this
10607 10601 * pp (this also assumes that the caller holds hme hash bucket lock so that
10608 10602 * the same pp can't be remapped into the same hmeblk after it was unmapped by
10609 10603 * hat_pageunload()).
10610 10604 */
10611 10605 static kmutex_t *
10612 10606 sfmmu_mlspl_enter(struct page *pp, int type)
10613 10607 {
10614 10608 kmutex_t *mtx;
10615 10609 uint_t prev_rszc = UINT_MAX;
10616 10610 page_t *rootpp;
10617 10611 uint_t szc;
10618 10612 uint_t rszc;
10619 10613 uint_t pszc = pp->p_szc;
10620 10614
10621 10615 ASSERT(pp != NULL);
10622 10616
10623 10617 again:
10624 10618 if (pszc == 0) {
10625 10619 mtx = SFMMU_MLSPL_MTX(type, pp);
10626 10620 mutex_enter(mtx);
10627 10621 return (mtx);
10628 10622 }
10629 10623
10630 10624 /* The lock lives in the root page */
10631 10625 rootpp = PP_GROUPLEADER(pp, pszc);
10632 10626 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10633 10627 mutex_enter(mtx);
10634 10628
10635 10629 /*
10636 10630 * Return mml in the following 3 cases:
10637 10631 *
10638 10632 * 1) If pp itself is root since if its p_szc decreased before we took
10639 10633 * the lock pp is still the root of smaller szc page. And if its p_szc
10640 10634 * increased it doesn't matter what lock we return (see comment in
10641 10635 * front of this routine).
10642 10636 *
10643 10637 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size
10644 10638 * large page we have the right lock since any previous potential
10645 10639 * hat_page_demote() is done demoting from greater than current root's
10646 10640 * p_szc because hat_page_demote() changes root's p_szc last. No
10647 10641 * further hat_page_demote() can start or be in progress since it
10648 10642 * would need the same lock we currently hold.
10649 10643 *
10650 10644 * 3) If rootpp's p_szc increased since previous iteration it doesn't
10651 10645 * matter what lock we return (see comment in front of this routine).
10652 10646 */
10653 10647 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc ||
10654 10648 rszc >= prev_rszc) {
10655 10649 return (mtx);
10656 10650 }
10657 10651
10658 10652 /*
10659 10653 * hat_page_demote() could have decreased root's p_szc.
10660 10654 * In this case pp's p_szc must also be smaller than pszc.
10661 10655 * Retry.
10662 10656 */
10663 10657 if (rszc < pszc) {
10664 10658 szc = pp->p_szc;
10665 10659 if (szc < pszc) {
10666 10660 mutex_exit(mtx);
10667 10661 pszc = szc;
10668 10662 goto again;
10669 10663 }
10670 10664 /*
10671 10665 * pp's p_szc increased after it was decreased.
10672 10666 * page cannot be mapped. Return current lock. The caller
10673 10667 * will drop it right away.
10674 10668 */
10675 10669 return (mtx);
10676 10670 }
10677 10671
10678 10672 /*
10679 10673 * root's p_szc is greater than pp's p_szc.
10680 10674 * hat_page_demote() is not done with all pages
10681 10675 * yet. Wait for it to complete.
10682 10676 */
10683 10677 mutex_exit(mtx);
10684 10678 rootpp = PP_GROUPLEADER(rootpp, rszc);
10685 10679 mtx = SFMMU_MLSPL_MTX(type, rootpp);
10686 10680 mutex_enter(mtx);
10687 10681 mutex_exit(mtx);
10688 10682 prev_rszc = rszc;
10689 10683 goto again;
10690 10684 }
10691 10685
10692 10686 static int
10693 10687 sfmmu_mlspl_held(struct page *pp, int type)
10694 10688 {
10695 10689 kmutex_t *mtx;
10696 10690
10697 10691 ASSERT(pp != NULL);
10698 10692 /* The lock lives in the root page */
10699 10693 pp = PP_PAGEROOT(pp);
10700 10694 ASSERT(pp != NULL);
10701 10695
10702 10696 mtx = SFMMU_MLSPL_MTX(type, pp);
10703 10697 return (MUTEX_HELD(mtx));
10704 10698 }
10705 10699
10706 10700 static uint_t
10707 10701 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical)
10708 10702 {
10709 10703 struct hme_blk *hblkp;
10710 10704
10711 10705
10712 10706 if (freehblkp != NULL) {
10713 10707 mutex_enter(&freehblkp_lock);
10714 10708 if (freehblkp != NULL) {
10715 10709 /*
10716 10710 * If the current thread is owning hblk_reserve OR
10717 10711 * critical request from sfmmu_hblk_steal()
10718 10712 * let it succeed even if freehblkcnt is really low.
10719 10713 */
10720 10714 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) {
10721 10715 SFMMU_STAT(sf_get_free_throttle);
10722 10716 mutex_exit(&freehblkp_lock);
10723 10717 return (0);
10724 10718 }
10725 10719 freehblkcnt--;
10726 10720 *hmeblkpp = freehblkp;
10727 10721 hblkp = *hmeblkpp;
10728 10722 freehblkp = hblkp->hblk_next;
10729 10723 mutex_exit(&freehblkp_lock);
10730 10724 hblkp->hblk_next = NULL;
10731 10725 SFMMU_STAT(sf_get_free_success);
10732 10726
10733 10727 ASSERT(hblkp->hblk_hmecnt == 0);
10734 10728 ASSERT(hblkp->hblk_vcnt == 0);
10735 10729 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp));
10736 10730
10737 10731 return (1);
10738 10732 }
10739 10733 mutex_exit(&freehblkp_lock);
10740 10734 }
10741 10735
10742 10736 /* Check cpu hblk pending queues */
10743 10737 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) {
10744 10738 hblkp = *hmeblkpp;
10745 10739 hblkp->hblk_next = NULL;
10746 10740 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp);
10747 10741
10748 10742 ASSERT(hblkp->hblk_hmecnt == 0);
10749 10743 ASSERT(hblkp->hblk_vcnt == 0);
10750 10744
10751 10745 return (1);
10752 10746 }
10753 10747
10754 10748 SFMMU_STAT(sf_get_free_fail);
10755 10749 return (0);
10756 10750 }
10757 10751
10758 10752 static uint_t
10759 10753 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical)
10760 10754 {
10761 10755 struct hme_blk *hblkp;
10762 10756
10763 10757 ASSERT(hmeblkp->hblk_hmecnt == 0);
10764 10758 ASSERT(hmeblkp->hblk_vcnt == 0);
10765 10759 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
10766 10760
10767 10761 /*
10768 10762 * If the current thread is mapping into kernel space,
10769 10763 * let it succede even if freehblkcnt is max
10770 10764 * so that it will avoid freeing it to kmem.
10771 10765 * This will prevent stack overflow due to
10772 10766 * possible recursion since kmem_cache_free()
10773 10767 * might require creation of a slab which
10774 10768 * in turn needs an hmeblk to map that slab;
10775 10769 * let's break this vicious chain at the first
10776 10770 * opportunity.
10777 10771 */
10778 10772 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10779 10773 mutex_enter(&freehblkp_lock);
10780 10774 if (freehblkcnt < HBLK_RESERVE_CNT || critical) {
10781 10775 SFMMU_STAT(sf_put_free_success);
10782 10776 freehblkcnt++;
10783 10777 hmeblkp->hblk_next = freehblkp;
10784 10778 freehblkp = hmeblkp;
10785 10779 mutex_exit(&freehblkp_lock);
10786 10780 return (1);
10787 10781 }
10788 10782 mutex_exit(&freehblkp_lock);
10789 10783 }
10790 10784
10791 10785 /*
10792 10786 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here
10793 10787 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and*
10794 10788 * we are not in the process of mapping into kernel space.
10795 10789 */
10796 10790 ASSERT(!critical);
10797 10791 while (freehblkcnt > HBLK_RESERVE_CNT) {
10798 10792 mutex_enter(&freehblkp_lock);
10799 10793 if (freehblkcnt > HBLK_RESERVE_CNT) {
10800 10794 freehblkcnt--;
10801 10795 hblkp = freehblkp;
10802 10796 freehblkp = hblkp->hblk_next;
10803 10797 mutex_exit(&freehblkp_lock);
10804 10798 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache);
10805 10799 kmem_cache_free(sfmmu8_cache, hblkp);
10806 10800 continue;
10807 10801 }
10808 10802 mutex_exit(&freehblkp_lock);
10809 10803 }
10810 10804 SFMMU_STAT(sf_put_free_fail);
10811 10805 return (0);
10812 10806 }
10813 10807
10814 10808 static void
10815 10809 sfmmu_hblk_swap(struct hme_blk *new)
10816 10810 {
10817 10811 struct hme_blk *old, *hblkp, *prev;
10818 10812 uint64_t newpa;
10819 10813 caddr_t base, vaddr, endaddr;
10820 10814 struct hmehash_bucket *hmebp;
10821 10815 struct sf_hment *osfhme, *nsfhme;
10822 10816 page_t *pp;
10823 10817 kmutex_t *pml;
10824 10818 tte_t tte;
10825 10819 struct hme_blk *list = NULL;
10826 10820
10827 10821 #ifdef DEBUG
10828 10822 hmeblk_tag hblktag;
10829 10823 struct hme_blk *found;
10830 10824 #endif
10831 10825 old = HBLK_RESERVE;
10832 10826 ASSERT(!old->hblk_shared);
10833 10827
10834 10828 /*
10835 10829 * save pa before bcopy clobbers it
10836 10830 */
10837 10831 newpa = new->hblk_nextpa;
10838 10832
10839 10833 base = (caddr_t)get_hblk_base(old);
10840 10834 endaddr = base + get_hblk_span(old);
10841 10835
10842 10836 /*
10843 10837 * acquire hash bucket lock.
10844 10838 */
10845 10839 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K,
10846 10840 SFMMU_INVALID_SHMERID);
10847 10841
10848 10842 /*
10849 10843 * copy contents from old to new
10850 10844 */
10851 10845 bcopy((void *)old, (void *)new, HME8BLK_SZ);
10852 10846
10853 10847 /*
10854 10848 * add new to hash chain
10855 10849 */
10856 10850 sfmmu_hblk_hash_add(hmebp, new, newpa);
10857 10851
10858 10852 /*
10859 10853 * search hash chain for hblk_reserve; this needs to be performed
10860 10854 * after adding new, otherwise prev won't correspond to the hblk which
10861 10855 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to
10862 10856 * remove old later.
10863 10857 */
10864 10858 for (prev = NULL,
10865 10859 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old;
10866 10860 prev = hblkp, hblkp = hblkp->hblk_next)
10867 10861 ;
10868 10862
10869 10863 if (hblkp != old)
10870 10864 panic("sfmmu_hblk_swap: hblk_reserve not found");
10871 10865
10872 10866 /*
10873 10867 * p_mapping list is still pointing to hments in hblk_reserve;
10874 10868 * fix up p_mapping list so that they point to hments in new.
10875 10869 *
10876 10870 * Since all these mappings are created by hblk_reserve_thread
10877 10871 * on the way and it's using at least one of the buffers from each of
10878 10872 * the newly minted slabs, there is no danger of any of these
10879 10873 * mappings getting unloaded by another thread.
10880 10874 *
10881 10875 * tsbmiss could only modify ref/mod bits of hments in old/new.
10882 10876 * Since all of these hments hold mappings established by segkmem
10883 10877 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits
10884 10878 * have no meaning for the mappings in hblk_reserve. hments in
10885 10879 * old and new are identical except for ref/mod bits.
10886 10880 */
10887 10881 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) {
10888 10882
10889 10883 HBLKTOHME(osfhme, old, vaddr);
10890 10884 sfmmu_copytte(&osfhme->hme_tte, &tte);
10891 10885
10892 10886 if (TTE_IS_VALID(&tte)) {
10893 10887 if ((pp = osfhme->hme_page) == NULL)
10894 10888 panic("sfmmu_hblk_swap: page not mapped");
10895 10889
10896 10890 pml = sfmmu_mlist_enter(pp);
10897 10891
10898 10892 if (pp != osfhme->hme_page)
10899 10893 panic("sfmmu_hblk_swap: mapping changed");
10900 10894
10901 10895 HBLKTOHME(nsfhme, new, vaddr);
10902 10896
10903 10897 HME_ADD(nsfhme, pp);
10904 10898 HME_SUB(osfhme, pp);
10905 10899
10906 10900 sfmmu_mlist_exit(pml);
10907 10901 }
10908 10902 }
10909 10903
10910 10904 /*
10911 10905 * remove old from hash chain
10912 10906 */
10913 10907 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1);
10914 10908
10915 10909 #ifdef DEBUG
10916 10910
10917 10911 hblktag.htag_id = ksfmmup;
10918 10912 hblktag.htag_rid = SFMMU_INVALID_SHMERID;
10919 10913 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K));
10920 10914 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K);
10921 10915 HME_HASH_FAST_SEARCH(hmebp, hblktag, found);
10922 10916
10923 10917 if (found != new)
10924 10918 panic("sfmmu_hblk_swap: new hblk not found");
10925 10919 #endif
10926 10920
10927 10921 SFMMU_HASH_UNLOCK(hmebp);
10928 10922
10929 10923 /*
10930 10924 * Reset hblk_reserve
10931 10925 */
10932 10926 bzero((void *)old, HME8BLK_SZ);
10933 10927 old->hblk_nextpa = va_to_pa((caddr_t)old);
10934 10928 }
10935 10929
10936 10930 /*
10937 10931 * Grab the mlist mutex for both pages passed in.
10938 10932 *
10939 10933 * low and high will be returned as pointers to the mutexes for these pages.
10940 10934 * low refers to the mutex residing in the lower bin of the mlist hash, while
10941 10935 * high refers to the mutex residing in the higher bin of the mlist hash. This
10942 10936 * is due to the locking order restrictions on the same thread grabbing
10943 10937 * multiple mlist mutexes. The low lock must be acquired before the high lock.
10944 10938 *
10945 10939 * If both pages hash to the same mutex, only grab that single mutex, and
10946 10940 * high will be returned as NULL
10947 10941 * If the pages hash to different bins in the hash, grab the lower addressed
10948 10942 * lock first and then the higher addressed lock in order to follow the locking
10949 10943 * rules involved with the same thread grabbing multiple mlist mutexes.
10950 10944 * low and high will both have non-NULL values.
10951 10945 */
10952 10946 static void
10953 10947 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl,
10954 10948 kmutex_t **low, kmutex_t **high)
10955 10949 {
10956 10950 kmutex_t *mml_targ, *mml_repl;
10957 10951
10958 10952 /*
10959 10953 * no need to do the dance around szc as in sfmmu_mlist_enter()
10960 10954 * because this routine is only called by hat_page_relocate() and all
10961 10955 * targ and repl pages are already locked EXCL so szc can't change.
10962 10956 */
10963 10957
10964 10958 mml_targ = MLIST_HASH(PP_PAGEROOT(targ));
10965 10959 mml_repl = MLIST_HASH(PP_PAGEROOT(repl));
10966 10960
10967 10961 if (mml_targ == mml_repl) {
10968 10962 *low = mml_targ;
10969 10963 *high = NULL;
10970 10964 } else {
10971 10965 if (mml_targ < mml_repl) {
10972 10966 *low = mml_targ;
10973 10967 *high = mml_repl;
10974 10968 } else {
10975 10969 *low = mml_repl;
10976 10970 *high = mml_targ;
10977 10971 }
10978 10972 }
10979 10973
10980 10974 mutex_enter(*low);
10981 10975 if (*high)
10982 10976 mutex_enter(*high);
10983 10977 }
10984 10978
10985 10979 static void
10986 10980 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high)
10987 10981 {
10988 10982 if (high)
10989 10983 mutex_exit(high);
10990 10984 mutex_exit(low);
10991 10985 }
10992 10986
10993 10987 static hatlock_t *
10994 10988 sfmmu_hat_enter(sfmmu_t *sfmmup)
10995 10989 {
10996 10990 hatlock_t *hatlockp;
10997 10991
10998 10992 if (sfmmup != ksfmmup) {
10999 10993 hatlockp = TSB_HASH(sfmmup);
11000 10994 mutex_enter(HATLOCK_MUTEXP(hatlockp));
11001 10995 return (hatlockp);
11002 10996 }
11003 10997 return (NULL);
11004 10998 }
11005 10999
11006 11000 static hatlock_t *
11007 11001 sfmmu_hat_tryenter(sfmmu_t *sfmmup)
11008 11002 {
11009 11003 hatlock_t *hatlockp;
11010 11004
11011 11005 if (sfmmup != ksfmmup) {
11012 11006 hatlockp = TSB_HASH(sfmmup);
11013 11007 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0)
11014 11008 return (NULL);
11015 11009 return (hatlockp);
11016 11010 }
11017 11011 return (NULL);
11018 11012 }
11019 11013
11020 11014 static void
11021 11015 sfmmu_hat_exit(hatlock_t *hatlockp)
11022 11016 {
11023 11017 if (hatlockp != NULL)
11024 11018 mutex_exit(HATLOCK_MUTEXP(hatlockp));
11025 11019 }
11026 11020
11027 11021 static void
11028 11022 sfmmu_hat_lock_all(void)
11029 11023 {
11030 11024 int i;
11031 11025 for (i = 0; i < SFMMU_NUM_LOCK; i++)
11032 11026 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i]));
11033 11027 }
11034 11028
11035 11029 static void
11036 11030 sfmmu_hat_unlock_all(void)
11037 11031 {
11038 11032 int i;
11039 11033 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--)
11040 11034 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i]));
11041 11035 }
11042 11036
11043 11037 int
11044 11038 sfmmu_hat_lock_held(sfmmu_t *sfmmup)
11045 11039 {
11046 11040 ASSERT(sfmmup != ksfmmup);
11047 11041 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup))));
11048 11042 }
11049 11043
11050 11044 /*
11051 11045 * Locking primitives to provide consistency between ISM unmap
11052 11046 * and other operations. Since ISM unmap can take a long time, we
11053 11047 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating
11054 11048 * contention on the hatlock buckets while ISM segments are being
11055 11049 * unmapped. The tradeoff is that the flags don't prevent priority
11056 11050 * inversion from occurring, so we must request kernel priority in
11057 11051 * case we have to sleep to keep from getting buried while holding
11058 11052 * the HAT_ISMBUSY flag set, which in turn could block other kernel
11059 11053 * threads from running (for example, in sfmmu_uvatopfn()).
11060 11054 */
11061 11055 static void
11062 11056 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
11063 11057 {
11064 11058 hatlock_t *hatlockp;
11065 11059
11066 11060 THREAD_KPRI_REQUEST();
11067 11061 if (!hatlock_held)
11068 11062 hatlockp = sfmmu_hat_enter(sfmmup);
11069 11063 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
11070 11064 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11071 11065 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
11072 11066 if (!hatlock_held)
11073 11067 sfmmu_hat_exit(hatlockp);
11074 11068 }
11075 11069
11076 11070 static void
11077 11071 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held)
11078 11072 {
11079 11073 hatlock_t *hatlockp;
11080 11074
11081 11075 if (!hatlock_held)
11082 11076 hatlockp = sfmmu_hat_enter(sfmmup);
11083 11077 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
11084 11078 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
11085 11079 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11086 11080 if (!hatlock_held)
11087 11081 sfmmu_hat_exit(hatlockp);
11088 11082 THREAD_KPRI_RELEASE();
11089 11083 }
11090 11084
11091 11085 /*
11092 11086 *
11093 11087 * Algorithm:
11094 11088 *
11095 11089 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
11096 11090 * hblks.
11097 11091 *
11098 11092 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
11099 11093 *
11100 11094 * (a) try to return an hblk from reserve pool of free hblks;
11101 11095 * (b) if the reserve pool is empty, acquire hblk_reserve_lock
11102 11096 * and return hblk_reserve.
11103 11097 *
11104 11098 * (3) call kmem_cache_alloc() to allocate hblk;
11105 11099 *
11106 11100 * (a) if hblk_reserve_lock is held by the current thread,
11107 11101 * atomically replace hblk_reserve by the hblk that is
11108 11102 * returned by kmem_cache_alloc; release hblk_reserve_lock
11109 11103 * and call kmem_cache_alloc() again.
11110 11104 * (b) if reserve pool is not full, add the hblk that is
11111 11105 * returned by kmem_cache_alloc to reserve pool and
11112 11106 * call kmem_cache_alloc again.
11113 11107 *
11114 11108 */
11115 11109 static struct hme_blk *
11116 11110 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
11117 11111 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
11118 11112 uint_t flags, uint_t rid)
11119 11113 {
11120 11114 struct hme_blk *hmeblkp = NULL;
11121 11115 struct hme_blk *newhblkp;
11122 11116 struct hme_blk *shw_hblkp = NULL;
11123 11117 struct kmem_cache *sfmmu_cache = NULL;
11124 11118 uint64_t hblkpa;
11125 11119 ulong_t index;
11126 11120 uint_t owner; /* set to 1 if using hblk_reserve */
11127 11121 uint_t forcefree;
11128 11122 int sleep;
11129 11123 sf_srd_t *srdp;
11130 11124 sf_region_t *rgnp;
11131 11125
11132 11126 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11133 11127 ASSERT(hblktag.htag_rid == rid);
11134 11128 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
11135 11129 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11136 11130 IS_P2ALIGNED(vaddr, TTEBYTES(size)));
11137 11131
11138 11132 /*
11139 11133 * If segkmem is not created yet, allocate from static hmeblks
11140 11134 * created at the end of startup_modules(). See the block comment
11141 11135 * in startup_modules() describing how we estimate the number of
11142 11136 * static hmeblks that will be needed during re-map.
11143 11137 */
11144 11138 if (!hblk_alloc_dynamic) {
11145 11139
11146 11140 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11147 11141
11148 11142 if (size == TTE8K) {
11149 11143 index = nucleus_hblk8.index;
11150 11144 if (index >= nucleus_hblk8.len) {
11151 11145 /*
11152 11146 * If we panic here, see startup_modules() to
11153 11147 * make sure that we are calculating the
11154 11148 * number of hblk8's that we need correctly.
11155 11149 */
11156 11150 prom_panic("no nucleus hblk8 to allocate");
11157 11151 }
11158 11152 hmeblkp =
11159 11153 (struct hme_blk *)&nucleus_hblk8.list[index];
11160 11154 nucleus_hblk8.index++;
11161 11155 SFMMU_STAT(sf_hblk8_nalloc);
11162 11156 } else {
11163 11157 index = nucleus_hblk1.index;
11164 11158 if (nucleus_hblk1.index >= nucleus_hblk1.len) {
11165 11159 /*
11166 11160 * If we panic here, see startup_modules().
11167 11161 * Most likely you need to update the
11168 11162 * calculation of the number of hblk1 elements
11169 11163 * that the kernel needs to boot.
11170 11164 */
11171 11165 prom_panic("no nucleus hblk1 to allocate");
11172 11166 }
11173 11167 hmeblkp =
11174 11168 (struct hme_blk *)&nucleus_hblk1.list[index];
11175 11169 nucleus_hblk1.index++;
11176 11170 SFMMU_STAT(sf_hblk1_nalloc);
11177 11171 }
11178 11172
11179 11173 goto hblk_init;
11180 11174 }
11181 11175
11182 11176 SFMMU_HASH_UNLOCK(hmebp);
11183 11177
11184 11178 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) {
11185 11179 if (mmu_page_sizes == max_mmu_page_sizes) {
11186 11180 if (size < TTE256M)
11187 11181 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11188 11182 size, flags);
11189 11183 } else {
11190 11184 if (size < TTE4M)
11191 11185 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr,
11192 11186 size, flags);
11193 11187 }
11194 11188 } else if (SFMMU_IS_SHMERID_VALID(rid)) {
11195 11189 /*
11196 11190 * Shared hmes use per region bitmaps in rgn_hmeflag
11197 11191 * rather than shadow hmeblks to keep track of the
11198 11192 * mapping sizes which have been allocated for the region.
11199 11193 * Here we cleanup old invalid hmeblks with this rid,
11200 11194 * which may be left around by pageunload().
11201 11195 */
11202 11196 int ttesz;
11203 11197 caddr_t va;
11204 11198 caddr_t eva = vaddr + TTEBYTES(size);
11205 11199
11206 11200 ASSERT(sfmmup != KHATID);
11207 11201
11208 11202 srdp = sfmmup->sfmmu_srdp;
11209 11203 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11210 11204 rgnp = srdp->srd_hmergnp[rid];
11211 11205 ASSERT(rgnp != NULL && rgnp->rgn_id == rid);
11212 11206 ASSERT(rgnp->rgn_refcnt != 0);
11213 11207 ASSERT(size <= rgnp->rgn_pgszc);
11214 11208
11215 11209 ttesz = HBLK_MIN_TTESZ;
11216 11210 do {
11217 11211 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) {
11218 11212 continue;
11219 11213 }
11220 11214
11221 11215 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) {
11222 11216 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz);
11223 11217 } else if (ttesz < size) {
11224 11218 for (va = vaddr; va < eva;
11225 11219 va += TTEBYTES(ttesz)) {
11226 11220 sfmmu_cleanup_rhblk(srdp, va, rid,
11227 11221 ttesz);
11228 11222 }
11229 11223 }
11230 11224 } while (++ttesz <= rgnp->rgn_pgszc);
11231 11225 }
11232 11226
11233 11227 fill_hblk:
11234 11228 owner = (hblk_reserve_thread == curthread) ? 1 : 0;
11235 11229
11236 11230 if (owner && size == TTE8K) {
11237 11231
11238 11232 ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
11239 11233 /*
11240 11234 * We are really in a tight spot. We already own
11241 11235 * hblk_reserve and we need another hblk. In anticipation
11242 11236 * of this kind of scenario, we specifically set aside
11243 11237 * HBLK_RESERVE_MIN number of hblks to be used exclusively
11244 11238 * by owner of hblk_reserve.
11245 11239 */
11246 11240 SFMMU_STAT(sf_hblk_recurse_cnt);
11247 11241
11248 11242 if (!sfmmu_get_free_hblk(&hmeblkp, 1))
11249 11243 panic("sfmmu_hblk_alloc: reserve list is empty");
11250 11244
11251 11245 goto hblk_verify;
11252 11246 }
11253 11247
11254 11248 ASSERT(!owner);
11255 11249
11256 11250 if ((flags & HAT_NO_KALLOC) == 0) {
11257 11251
11258 11252 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache);
11259 11253 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP);
11260 11254
11261 11255 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) {
11262 11256 hmeblkp = sfmmu_hblk_steal(size);
11263 11257 } else {
11264 11258 /*
11265 11259 * if we are the owner of hblk_reserve,
11266 11260 * swap hblk_reserve with hmeblkp and
11267 11261 * start a fresh life. Hope things go
11268 11262 * better this time.
11269 11263 */
11270 11264 if (hblk_reserve_thread == curthread) {
11271 11265 ASSERT(sfmmu_cache == sfmmu8_cache);
11272 11266 sfmmu_hblk_swap(hmeblkp);
11273 11267 hblk_reserve_thread = NULL;
11274 11268 mutex_exit(&hblk_reserve_lock);
11275 11269 goto fill_hblk;
11276 11270 }
11277 11271 /*
11278 11272 * let's donate this hblk to our reserve list if
11279 11273 * we are not mapping kernel range
11280 11274 */
11281 11275 if (size == TTE8K && sfmmup != KHATID) {
11282 11276 if (sfmmu_put_free_hblk(hmeblkp, 0))
11283 11277 goto fill_hblk;
11284 11278 }
11285 11279 }
11286 11280 } else {
11287 11281 /*
11288 11282 * We are here to map the slab in sfmmu8_cache; let's
11289 11283 * check if we could tap our reserve list; if successful,
11290 11284 * this will avoid the pain of going thru sfmmu_hblk_swap
11291 11285 */
11292 11286 SFMMU_STAT(sf_hblk_slab_cnt);
11293 11287 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) {
11294 11288 /*
11295 11289 * let's start hblk_reserve dance
11296 11290 */
11297 11291 SFMMU_STAT(sf_hblk_reserve_cnt);
11298 11292 owner = 1;
11299 11293 mutex_enter(&hblk_reserve_lock);
11300 11294 hmeblkp = HBLK_RESERVE;
11301 11295 hblk_reserve_thread = curthread;
11302 11296 }
11303 11297 }
11304 11298
11305 11299 hblk_verify:
11306 11300 ASSERT(hmeblkp != NULL);
11307 11301 set_hblk_sz(hmeblkp, size);
11308 11302 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp));
11309 11303 SFMMU_HASH_LOCK(hmebp);
11310 11304 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11311 11305 if (newhblkp != NULL) {
11312 11306 SFMMU_HASH_UNLOCK(hmebp);
11313 11307 if (hmeblkp != HBLK_RESERVE) {
11314 11308 /*
11315 11309 * This is really tricky!
11316 11310 *
11317 11311 * vmem_alloc(vmem_seg_arena)
11318 11312 * vmem_alloc(vmem_internal_arena)
11319 11313 * segkmem_alloc(heap_arena)
11320 11314 * vmem_alloc(heap_arena)
11321 11315 * page_create()
11322 11316 * hat_memload()
11323 11317 * kmem_cache_free()
11324 11318 * kmem_cache_alloc()
11325 11319 * kmem_slab_create()
11326 11320 * vmem_alloc(kmem_internal_arena)
11327 11321 * segkmem_alloc(heap_arena)
11328 11322 * vmem_alloc(heap_arena)
11329 11323 * page_create()
11330 11324 * hat_memload()
11331 11325 * kmem_cache_free()
11332 11326 * ...
11333 11327 *
11334 11328 * Thus, hat_memload() could call kmem_cache_free
11335 11329 * for enough number of times that we could easily
11336 11330 * hit the bottom of the stack or run out of reserve
11337 11331 * list of vmem_seg structs. So, we must donate
11338 11332 * this hblk to reserve list if it's allocated
11339 11333 * from sfmmu8_cache *and* mapping kernel range.
11340 11334 * We don't need to worry about freeing hmeblk1's
11341 11335 * to kmem since they don't map any kmem slabs.
11342 11336 *
11343 11337 * Note: When segkmem supports largepages, we must
11344 11338 * free hmeblk1's to reserve list as well.
11345 11339 */
11346 11340 forcefree = (sfmmup == KHATID) ? 1 : 0;
11347 11341 if (size == TTE8K &&
11348 11342 sfmmu_put_free_hblk(hmeblkp, forcefree)) {
11349 11343 goto re_verify;
11350 11344 }
11351 11345 ASSERT(sfmmup != KHATID);
11352 11346 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp);
11353 11347 } else {
11354 11348 /*
11355 11349 * Hey! we don't need hblk_reserve any more.
11356 11350 */
11357 11351 ASSERT(owner);
11358 11352 hblk_reserve_thread = NULL;
11359 11353 mutex_exit(&hblk_reserve_lock);
11360 11354 owner = 0;
11361 11355 }
11362 11356 re_verify:
11363 11357 /*
11364 11358 * let's check if the goodies are still present
11365 11359 */
11366 11360 SFMMU_HASH_LOCK(hmebp);
11367 11361 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp);
11368 11362 if (newhblkp != NULL) {
11369 11363 /*
11370 11364 * return newhblkp if it's not hblk_reserve;
11371 11365 * if newhblkp is hblk_reserve, return it
11372 11366 * _only if_ we are the owner of hblk_reserve.
11373 11367 */
11374 11368 if (newhblkp != HBLK_RESERVE || owner) {
11375 11369 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) ||
11376 11370 newhblkp->hblk_shared);
11377 11371 ASSERT(SFMMU_IS_SHMERID_VALID(rid) ||
11378 11372 !newhblkp->hblk_shared);
11379 11373 return (newhblkp);
11380 11374 } else {
11381 11375 /*
11382 11376 * we just hit hblk_reserve in the hash and
11383 11377 * we are not the owner of that;
11384 11378 *
11385 11379 * block until hblk_reserve_thread completes
11386 11380 * swapping hblk_reserve and try the dance
11387 11381 * once again.
11388 11382 */
11389 11383 SFMMU_HASH_UNLOCK(hmebp);
11390 11384 mutex_enter(&hblk_reserve_lock);
11391 11385 mutex_exit(&hblk_reserve_lock);
11392 11386 SFMMU_STAT(sf_hblk_reserve_hit);
11393 11387 goto fill_hblk;
11394 11388 }
11395 11389 } else {
11396 11390 /*
11397 11391 * it's no more! try the dance once again.
11398 11392 */
11399 11393 SFMMU_HASH_UNLOCK(hmebp);
11400 11394 goto fill_hblk;
11401 11395 }
11402 11396 }
11403 11397
11404 11398 hblk_init:
11405 11399 if (SFMMU_IS_SHMERID_VALID(rid)) {
11406 11400 uint16_t tteflag = 0x1 <<
11407 11401 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size);
11408 11402
11409 11403 if (!(rgnp->rgn_hmeflags & tteflag)) {
11410 11404 atomic_or_16(&rgnp->rgn_hmeflags, tteflag);
11411 11405 }
11412 11406 hmeblkp->hblk_shared = 1;
11413 11407 } else {
11414 11408 hmeblkp->hblk_shared = 0;
11415 11409 }
11416 11410 set_hblk_sz(hmeblkp, size);
11417 11411 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11418 11412 hmeblkp->hblk_next = (struct hme_blk *)NULL;
11419 11413 hmeblkp->hblk_tag = hblktag;
11420 11414 hmeblkp->hblk_shadow = shw_hblkp;
11421 11415 hblkpa = hmeblkp->hblk_nextpa;
11422 11416 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
11423 11417
11424 11418 ASSERT(get_hblk_ttesz(hmeblkp) == size);
11425 11419 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size));
11426 11420 ASSERT(hmeblkp->hblk_hmecnt == 0);
11427 11421 ASSERT(hmeblkp->hblk_vcnt == 0);
11428 11422 ASSERT(hmeblkp->hblk_lckcnt == 0);
11429 11423 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp));
11430 11424 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa);
11431 11425 return (hmeblkp);
11432 11426 }
11433 11427
11434 11428 /*
11435 11429 * This function cleans up the hme_blk and returns it to the free list.
11436 11430 */
11437 11431 /* ARGSUSED */
11438 11432 static void
11439 11433 sfmmu_hblk_free(struct hme_blk **listp)
11440 11434 {
11441 11435 struct hme_blk *hmeblkp, *next_hmeblkp;
11442 11436 int size;
11443 11437 uint_t critical;
11444 11438 uint64_t hblkpa;
11445 11439
11446 11440 ASSERT(*listp != NULL);
11447 11441
11448 11442 hmeblkp = *listp;
11449 11443 while (hmeblkp != NULL) {
11450 11444 next_hmeblkp = hmeblkp->hblk_next;
11451 11445 ASSERT(!hmeblkp->hblk_hmecnt);
11452 11446 ASSERT(!hmeblkp->hblk_vcnt);
11453 11447 ASSERT(!hmeblkp->hblk_lckcnt);
11454 11448 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve);
11455 11449 ASSERT(hmeblkp->hblk_shared == 0);
11456 11450 ASSERT(hmeblkp->hblk_shw_bit == 0);
11457 11451 ASSERT(hmeblkp->hblk_shadow == NULL);
11458 11452
11459 11453 hblkpa = va_to_pa((caddr_t)hmeblkp);
11460 11454 ASSERT(hblkpa != (uint64_t)-1);
11461 11455 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0;
11462 11456
11463 11457 size = get_hblk_ttesz(hmeblkp);
11464 11458 hmeblkp->hblk_next = NULL;
11465 11459 hmeblkp->hblk_nextpa = hblkpa;
11466 11460
11467 11461 if (hmeblkp->hblk_nuc_bit == 0) {
11468 11462
11469 11463 if (size != TTE8K ||
11470 11464 !sfmmu_put_free_hblk(hmeblkp, critical))
11471 11465 kmem_cache_free(get_hblk_cache(hmeblkp),
11472 11466 hmeblkp);
11473 11467 }
11474 11468 hmeblkp = next_hmeblkp;
11475 11469 }
11476 11470 }
11477 11471
11478 11472 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30
11479 11473 #define SFMMU_HBLK_STEAL_THRESHOLD 5
11480 11474
11481 11475 static uint_t sfmmu_hblk_steal_twice;
11482 11476 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count;
11483 11477
11484 11478 /*
11485 11479 * Steal a hmeblk from user or kernel hme hash lists.
11486 11480 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to
11487 11481 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts
11488 11482 * tap into critical reserve of freehblkp.
11489 11483 * Note: We remain looping in this routine until we find one.
11490 11484 */
11491 11485 static struct hme_blk *
11492 11486 sfmmu_hblk_steal(int size)
11493 11487 {
11494 11488 static struct hmehash_bucket *uhmehash_steal_hand = NULL;
11495 11489 struct hmehash_bucket *hmebp;
11496 11490 struct hme_blk *hmeblkp = NULL, *pr_hblk;
11497 11491 uint64_t hblkpa;
11498 11492 int i;
11499 11493 uint_t loop_cnt = 0, critical;
11500 11494
11501 11495 for (;;) {
11502 11496 /* Check cpu hblk pending queues */
11503 11497 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) {
11504 11498 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp);
11505 11499 ASSERT(hmeblkp->hblk_hmecnt == 0);
11506 11500 ASSERT(hmeblkp->hblk_vcnt == 0);
11507 11501 return (hmeblkp);
11508 11502 }
11509 11503
11510 11504 if (size == TTE8K) {
11511 11505 critical =
11512 11506 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0;
11513 11507 if (sfmmu_get_free_hblk(&hmeblkp, critical))
11514 11508 return (hmeblkp);
11515 11509 }
11516 11510
11517 11511 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash :
11518 11512 uhmehash_steal_hand;
11519 11513 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]);
11520 11514
11521 11515 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ +
11522 11516 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) {
11523 11517 SFMMU_HASH_LOCK(hmebp);
11524 11518 hmeblkp = hmebp->hmeblkp;
11525 11519 hblkpa = hmebp->hmeh_nextpa;
11526 11520 pr_hblk = NULL;
11527 11521 while (hmeblkp) {
11528 11522 /*
11529 11523 * check if it is a hmeblk that is not locked
11530 11524 * and not shared. skip shadow hmeblks with
11531 11525 * shadow_mask set i.e valid count non zero.
11532 11526 */
11533 11527 if ((get_hblk_ttesz(hmeblkp) == size) &&
11534 11528 (hmeblkp->hblk_shw_bit == 0 ||
11535 11529 hmeblkp->hblk_vcnt == 0) &&
11536 11530 (hmeblkp->hblk_lckcnt == 0)) {
11537 11531 /*
11538 11532 * there is a high probability that we
11539 11533 * will find a free one. search some
11540 11534 * buckets for a free hmeblk initially
11541 11535 * before unloading a valid hmeblk.
11542 11536 */
11543 11537 if ((hmeblkp->hblk_vcnt == 0 &&
11544 11538 hmeblkp->hblk_hmecnt == 0) || (i >=
11545 11539 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) {
11546 11540 if (sfmmu_steal_this_hblk(hmebp,
11547 11541 hmeblkp, hblkpa, pr_hblk)) {
11548 11542 /*
11549 11543 * Hblk is unloaded
11550 11544 * successfully
11551 11545 */
11552 11546 break;
11553 11547 }
11554 11548 }
11555 11549 }
11556 11550 pr_hblk = hmeblkp;
11557 11551 hblkpa = hmeblkp->hblk_nextpa;
11558 11552 hmeblkp = hmeblkp->hblk_next;
11559 11553 }
11560 11554
11561 11555 SFMMU_HASH_UNLOCK(hmebp);
11562 11556 if (hmebp++ == &uhme_hash[UHMEHASH_SZ])
11563 11557 hmebp = uhme_hash;
11564 11558 }
11565 11559 uhmehash_steal_hand = hmebp;
11566 11560
11567 11561 if (hmeblkp != NULL)
11568 11562 break;
11569 11563
11570 11564 /*
11571 11565 * in the worst case, look for a free one in the kernel
11572 11566 * hash table.
11573 11567 */
11574 11568 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) {
11575 11569 SFMMU_HASH_LOCK(hmebp);
11576 11570 hmeblkp = hmebp->hmeblkp;
11577 11571 hblkpa = hmebp->hmeh_nextpa;
11578 11572 pr_hblk = NULL;
11579 11573 while (hmeblkp) {
11580 11574 /*
11581 11575 * check if it is free hmeblk
11582 11576 */
11583 11577 if ((get_hblk_ttesz(hmeblkp) == size) &&
11584 11578 (hmeblkp->hblk_lckcnt == 0) &&
11585 11579 (hmeblkp->hblk_vcnt == 0) &&
11586 11580 (hmeblkp->hblk_hmecnt == 0)) {
11587 11581 if (sfmmu_steal_this_hblk(hmebp,
11588 11582 hmeblkp, hblkpa, pr_hblk)) {
11589 11583 break;
11590 11584 } else {
11591 11585 /*
11592 11586 * Cannot fail since we have
11593 11587 * hash lock.
11594 11588 */
11595 11589 panic("fail to steal?");
11596 11590 }
11597 11591 }
11598 11592
11599 11593 pr_hblk = hmeblkp;
11600 11594 hblkpa = hmeblkp->hblk_nextpa;
11601 11595 hmeblkp = hmeblkp->hblk_next;
11602 11596 }
11603 11597
11604 11598 SFMMU_HASH_UNLOCK(hmebp);
11605 11599 if (hmebp++ == &khme_hash[KHMEHASH_SZ])
11606 11600 hmebp = khme_hash;
11607 11601 }
11608 11602
11609 11603 if (hmeblkp != NULL)
11610 11604 break;
11611 11605 sfmmu_hblk_steal_twice++;
11612 11606 }
11613 11607 return (hmeblkp);
11614 11608 }
11615 11609
11616 11610 /*
11617 11611 * This routine does real work to prepare a hblk to be "stolen" by
11618 11612 * unloading the mappings, updating shadow counts ....
11619 11613 * It returns 1 if the block is ready to be reused (stolen), or 0
11620 11614 * means the block cannot be stolen yet- pageunload is still working
11621 11615 * on this hblk.
11622 11616 */
11623 11617 static int
11624 11618 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11625 11619 uint64_t hblkpa, struct hme_blk *pr_hblk)
11626 11620 {
11627 11621 int shw_size, vshift;
11628 11622 struct hme_blk *shw_hblkp;
11629 11623 caddr_t vaddr;
11630 11624 uint_t shw_mask, newshw_mask;
11631 11625 struct hme_blk *list = NULL;
11632 11626
11633 11627 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11634 11628
11635 11629 /*
11636 11630 * check if the hmeblk is free, unload if necessary
11637 11631 */
11638 11632 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11639 11633 sfmmu_t *sfmmup;
11640 11634 demap_range_t dmr;
11641 11635
11642 11636 sfmmup = hblktosfmmu(hmeblkp);
11643 11637 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) {
11644 11638 return (0);
11645 11639 }
11646 11640 DEMAP_RANGE_INIT(sfmmup, &dmr);
11647 11641 (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
11648 11642 (caddr_t)get_hblk_base(hmeblkp),
11649 11643 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD);
11650 11644 DEMAP_RANGE_FLUSH(&dmr);
11651 11645 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) {
11652 11646 /*
11653 11647 * Pageunload is working on the same hblk.
11654 11648 */
11655 11649 return (0);
11656 11650 }
11657 11651
11658 11652 sfmmu_hblk_steal_unload_count++;
11659 11653 }
11660 11654
11661 11655 ASSERT(hmeblkp->hblk_lckcnt == 0);
11662 11656 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0);
11663 11657
11664 11658 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1);
11665 11659 hmeblkp->hblk_nextpa = hblkpa;
11666 11660
11667 11661 shw_hblkp = hmeblkp->hblk_shadow;
11668 11662 if (shw_hblkp) {
11669 11663 ASSERT(!hmeblkp->hblk_shared);
11670 11664 shw_size = get_hblk_ttesz(shw_hblkp);
11671 11665 vaddr = (caddr_t)get_hblk_base(hmeblkp);
11672 11666 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
11673 11667 ASSERT(vshift < 8);
11674 11668 /*
11675 11669 * Atomically clear shadow mask bit
11676 11670 */
11677 11671 do {
11678 11672 shw_mask = shw_hblkp->hblk_shw_mask;
11679 11673 ASSERT(shw_mask & (1 << vshift));
11680 11674 newshw_mask = shw_mask & ~(1 << vshift);
11681 11675 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
11682 11676 shw_mask, newshw_mask);
11683 11677 } while (newshw_mask != shw_mask);
11684 11678 hmeblkp->hblk_shadow = NULL;
11685 11679 }
11686 11680
11687 11681 /*
11688 11682 * remove shadow bit if we are stealing an unused shadow hmeblk.
11689 11683 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if
11690 11684 * we are indeed allocating a shadow hmeblk.
11691 11685 */
11692 11686 hmeblkp->hblk_shw_bit = 0;
11693 11687
11694 11688 if (hmeblkp->hblk_shared) {
11695 11689 sf_srd_t *srdp;
11696 11690 sf_region_t *rgnp;
11697 11691 uint_t rid;
11698 11692
11699 11693 srdp = hblktosrd(hmeblkp);
11700 11694 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
11701 11695 rid = hmeblkp->hblk_tag.htag_rid;
11702 11696 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
11703 11697 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
11704 11698 rgnp = srdp->srd_hmergnp[rid];
11705 11699 ASSERT(rgnp != NULL);
11706 11700 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
11707 11701 hmeblkp->hblk_shared = 0;
11708 11702 }
11709 11703
11710 11704 sfmmu_hblk_steal_count++;
11711 11705 SFMMU_STAT(sf_steal_count);
11712 11706
11713 11707 return (1);
11714 11708 }
11715 11709
11716 11710 struct hme_blk *
11717 11711 sfmmu_hmetohblk(struct sf_hment *sfhme)
11718 11712 {
11719 11713 struct hme_blk *hmeblkp;
11720 11714 struct sf_hment *sfhme0;
11721 11715 struct hme_blk *hblk_dummy = 0;
11722 11716
11723 11717 /*
11724 11718 * No dummy sf_hments, please.
11725 11719 */
11726 11720 ASSERT(sfhme->hme_tte.ll != 0);
11727 11721
11728 11722 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum;
11729 11723 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 -
11730 11724 (uintptr_t)&hblk_dummy->hblk_hme[0]);
11731 11725
11732 11726 return (hmeblkp);
11733 11727 }
11734 11728
11735 11729 /*
11736 11730 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag.
11737 11731 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using
11738 11732 * KM_SLEEP allocation.
11739 11733 *
11740 11734 * Return 0 on success, -1 otherwise.
11741 11735 */
11742 11736 static void
11743 11737 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp)
11744 11738 {
11745 11739 struct tsb_info *tsbinfop, *next;
11746 11740 tsb_replace_rc_t rc;
11747 11741 boolean_t gotfirst = B_FALSE;
11748 11742
11749 11743 ASSERT(sfmmup != ksfmmup);
11750 11744 ASSERT(sfmmu_hat_lock_held(sfmmup));
11751 11745
11752 11746 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) {
11753 11747 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
11754 11748 }
11755 11749
11756 11750 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
11757 11751 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN);
11758 11752 } else {
11759 11753 return;
11760 11754 }
11761 11755
11762 11756 ASSERT(sfmmup->sfmmu_tsb != NULL);
11763 11757
11764 11758 /*
11765 11759 * Loop over all tsbinfo's replacing them with ones that actually have
11766 11760 * a TSB. If any of the replacements ever fail, bail out of the loop.
11767 11761 */
11768 11762 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) {
11769 11763 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED);
11770 11764 next = tsbinfop->tsb_next;
11771 11765 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc,
11772 11766 hatlockp, TSB_SWAPIN);
11773 11767 if (rc != TSB_SUCCESS) {
11774 11768 break;
11775 11769 }
11776 11770 gotfirst = B_TRUE;
11777 11771 }
11778 11772
11779 11773 switch (rc) {
11780 11774 case TSB_SUCCESS:
11781 11775 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11782 11776 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11783 11777 return;
11784 11778 case TSB_LOSTRACE:
11785 11779 break;
11786 11780 case TSB_ALLOCFAIL:
11787 11781 break;
11788 11782 default:
11789 11783 panic("sfmmu_replace_tsb returned unrecognized failure code "
11790 11784 "%d", rc);
11791 11785 }
11792 11786
11793 11787 /*
11794 11788 * In this case, we failed to get one of our TSBs. If we failed to
11795 11789 * get the first TSB, get one of minimum size (8KB). Walk the list
11796 11790 * and throw away the tsbinfos, starting where the allocation failed;
11797 11791 * we can get by with just one TSB as long as we don't leave the
11798 11792 * SWAPPED tsbinfo structures lying around.
11799 11793 */
11800 11794 tsbinfop = sfmmup->sfmmu_tsb;
11801 11795 next = tsbinfop->tsb_next;
11802 11796 tsbinfop->tsb_next = NULL;
11803 11797
11804 11798 sfmmu_hat_exit(hatlockp);
11805 11799 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) {
11806 11800 next = tsbinfop->tsb_next;
11807 11801 sfmmu_tsbinfo_free(tsbinfop);
11808 11802 }
11809 11803 hatlockp = sfmmu_hat_enter(sfmmup);
11810 11804
11811 11805 /*
11812 11806 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K
11813 11807 * pages.
11814 11808 */
11815 11809 if (!gotfirst) {
11816 11810 tsbinfop = sfmmup->sfmmu_tsb;
11817 11811 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE,
11818 11812 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC);
11819 11813 ASSERT(rc == TSB_SUCCESS);
11820 11814 }
11821 11815
11822 11816 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN);
11823 11817 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
11824 11818 }
11825 11819
11826 11820 static int
11827 11821 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw)
11828 11822 {
11829 11823 ulong_t bix = 0;
11830 11824 uint_t rid;
11831 11825 sf_region_t *rgnp;
11832 11826
11833 11827 ASSERT(srdp != NULL);
11834 11828 ASSERT(srdp->srd_refcnt != 0);
11835 11829
11836 11830 w <<= BT_ULSHIFT;
11837 11831 while (bmw) {
11838 11832 if (!(bmw & 0x1)) {
11839 11833 bix++;
11840 11834 bmw >>= 1;
11841 11835 continue;
11842 11836 }
11843 11837 rid = w | bix;
11844 11838 rgnp = srdp->srd_hmergnp[rid];
11845 11839 ASSERT(rgnp->rgn_refcnt > 0);
11846 11840 ASSERT(rgnp->rgn_id == rid);
11847 11841 if (addr < rgnp->rgn_saddr ||
11848 11842 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) {
11849 11843 bix++;
11850 11844 bmw >>= 1;
11851 11845 } else {
11852 11846 return (1);
11853 11847 }
11854 11848 }
11855 11849 return (0);
11856 11850 }
11857 11851
11858 11852 /*
11859 11853 * Handle exceptions for low level tsb_handler.
11860 11854 *
11861 11855 * There are many scenarios that could land us here:
11862 11856 *
11863 11857 * If the context is invalid we land here. The context can be invalid
11864 11858 * for 3 reasons: 1) we couldn't allocate a new context and now need to
11865 11859 * perform a wrap around operation in order to allocate a new context.
11866 11860 * 2) Context was invalidated to change pagesize programming 3) ISMs or
11867 11861 * TSBs configuration is changeing for this process and we are forced into
11868 11862 * here to do a syncronization operation. If the context is valid we can
11869 11863 * be here from window trap hanlder. In this case just call trap to handle
11870 11864 * the fault.
11871 11865 *
11872 11866 * Note that the process will run in INVALID_CONTEXT before
11873 11867 * faulting into here and subsequently loading the MMU registers
11874 11868 * (including the TSB base register) associated with this process.
11875 11869 * For this reason, the trap handlers must all test for
11876 11870 * INVALID_CONTEXT before attempting to access any registers other
11877 11871 * than the context registers.
11878 11872 */
11879 11873 void
11880 11874 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype)
11881 11875 {
11882 11876 sfmmu_t *sfmmup, *shsfmmup;
11883 11877 uint_t ctxtype;
11884 11878 klwp_id_t lwp;
11885 11879 char lwp_save_state;
11886 11880 hatlock_t *hatlockp, *shatlockp;
11887 11881 struct tsb_info *tsbinfop;
11888 11882 struct tsbmiss *tsbmp;
11889 11883 sf_scd_t *scdp;
11890 11884
11891 11885 SFMMU_STAT(sf_tsb_exceptions);
11892 11886 SFMMU_MMU_STAT(mmu_tsb_exceptions);
11893 11887 sfmmup = astosfmmu(curthread->t_procp->p_as);
11894 11888 /*
11895 11889 * note that in sun4u, tagacces register contains ctxnum
11896 11890 * while sun4v passes ctxtype in the tagaccess register.
11897 11891 */
11898 11892 ctxtype = tagaccess & TAGACC_CTX_MASK;
11899 11893
11900 11894 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT);
11901 11895 ASSERT(sfmmup->sfmmu_ismhat == 0);
11902 11896 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) ||
11903 11897 ctxtype == INVALID_CONTEXT);
11904 11898
11905 11899 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) {
11906 11900 /*
11907 11901 * We may land here because shme bitmap and pagesize
11908 11902 * flags are updated lazily in tsbmiss area on other cpus.
11909 11903 * If we detect here that tsbmiss area is out of sync with
11910 11904 * sfmmu update it and retry the trapped instruction.
11911 11905 * Otherwise call trap().
11912 11906 */
11913 11907 int ret = 0;
11914 11908 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K);
11915 11909 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK);
11916 11910
11917 11911 /*
11918 11912 * Must set lwp state to LWP_SYS before
11919 11913 * trying to acquire any adaptive lock
11920 11914 */
11921 11915 lwp = ttolwp(curthread);
11922 11916 ASSERT(lwp);
11923 11917 lwp_save_state = lwp->lwp_state;
11924 11918 lwp->lwp_state = LWP_SYS;
11925 11919
11926 11920 hatlockp = sfmmu_hat_enter(sfmmup);
11927 11921 kpreempt_disable();
11928 11922 tsbmp = &tsbmiss_area[CPU->cpu_id];
11929 11923 ASSERT(sfmmup == tsbmp->usfmmup);
11930 11924 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) &
11931 11925 ~tteflag_mask) ||
11932 11926 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) &
11933 11927 ~tteflag_mask)) {
11934 11928 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags;
11935 11929 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags;
11936 11930 ret = 1;
11937 11931 }
11938 11932 if (sfmmup->sfmmu_srdp != NULL) {
11939 11933 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap;
11940 11934 ulong_t *tm = tsbmp->shmermap;
11941 11935 ulong_t i;
11942 11936 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
11943 11937 ulong_t d = tm[i] ^ sm[i];
11944 11938 if (d) {
11945 11939 if (d & sm[i]) {
11946 11940 if (!ret && sfmmu_is_rgnva(
11947 11941 sfmmup->sfmmu_srdp,
11948 11942 addr, i, d & sm[i])) {
11949 11943 ret = 1;
11950 11944 }
11951 11945 }
11952 11946 tm[i] = sm[i];
11953 11947 }
11954 11948 }
11955 11949 }
11956 11950 kpreempt_enable();
11957 11951 sfmmu_hat_exit(hatlockp);
11958 11952 lwp->lwp_state = lwp_save_state;
11959 11953 if (ret) {
11960 11954 return;
11961 11955 }
11962 11956 } else if (ctxtype == INVALID_CONTEXT) {
11963 11957 /*
11964 11958 * First, make sure we come out of here with a valid ctx,
11965 11959 * since if we don't get one we'll simply loop on the
11966 11960 * faulting instruction.
11967 11961 *
11968 11962 * If the ISM mappings are changing, the TSB is relocated,
11969 11963 * the process is swapped, the process is joining SCD or
11970 11964 * leaving SCD or shared regions we serialize behind the
11971 11965 * controlling thread with hat lock, sfmmu_flags and
11972 11966 * sfmmu_tsb_cv condition variable.
11973 11967 */
11974 11968
11975 11969 /*
11976 11970 * Must set lwp state to LWP_SYS before
11977 11971 * trying to acquire any adaptive lock
11978 11972 */
11979 11973 lwp = ttolwp(curthread);
11980 11974 ASSERT(lwp);
11981 11975 lwp_save_state = lwp->lwp_state;
11982 11976 lwp->lwp_state = LWP_SYS;
11983 11977
11984 11978 hatlockp = sfmmu_hat_enter(sfmmup);
11985 11979 retry:
11986 11980 if ((scdp = sfmmup->sfmmu_scdp) != NULL) {
11987 11981 shsfmmup = scdp->scd_sfmmup;
11988 11982 ASSERT(shsfmmup != NULL);
11989 11983
11990 11984 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL;
11991 11985 tsbinfop = tsbinfop->tsb_next) {
11992 11986 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
11993 11987 /* drop the private hat lock */
11994 11988 sfmmu_hat_exit(hatlockp);
11995 11989 /* acquire the shared hat lock */
11996 11990 shatlockp = sfmmu_hat_enter(shsfmmup);
11997 11991 /*
11998 11992 * recheck to see if anything changed
11999 11993 * after we drop the private hat lock.
12000 11994 */
12001 11995 if (sfmmup->sfmmu_scdp == scdp &&
12002 11996 shsfmmup == scdp->scd_sfmmup) {
12003 11997 sfmmu_tsb_chk_reloc(shsfmmup,
12004 11998 shatlockp);
12005 11999 }
12006 12000 sfmmu_hat_exit(shatlockp);
12007 12001 hatlockp = sfmmu_hat_enter(sfmmup);
12008 12002 goto retry;
12009 12003 }
12010 12004 }
12011 12005 }
12012 12006
12013 12007 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
12014 12008 tsbinfop = tsbinfop->tsb_next) {
12015 12009 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) {
12016 12010 cv_wait(&sfmmup->sfmmu_tsb_cv,
12017 12011 HATLOCK_MUTEXP(hatlockp));
12018 12012 goto retry;
12019 12013 }
12020 12014 }
12021 12015
12022 12016 /*
12023 12017 * Wait for ISM maps to be updated.
12024 12018 */
12025 12019 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12026 12020 cv_wait(&sfmmup->sfmmu_tsb_cv,
12027 12021 HATLOCK_MUTEXP(hatlockp));
12028 12022 goto retry;
12029 12023 }
12030 12024
12031 12025 /* Is this process joining an SCD? */
12032 12026 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
12033 12027 /*
12034 12028 * Flush private TSB and setup shared TSB.
12035 12029 * sfmmu_finish_join_scd() does not drop the
12036 12030 * hat lock.
12037 12031 */
12038 12032 sfmmu_finish_join_scd(sfmmup);
12039 12033 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
12040 12034 }
12041 12035
12042 12036 /*
12043 12037 * If we're swapping in, get TSB(s). Note that we must do
12044 12038 * this before we get a ctx or load the MMU state. Once
12045 12039 * we swap in we have to recheck to make sure the TSB(s) and
12046 12040 * ISM mappings didn't change while we slept.
12047 12041 */
12048 12042 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
12049 12043 sfmmu_tsb_swapin(sfmmup, hatlockp);
12050 12044 goto retry;
12051 12045 }
12052 12046
12053 12047 sfmmu_get_ctx(sfmmup);
12054 12048
12055 12049 sfmmu_hat_exit(hatlockp);
12056 12050 /*
12057 12051 * Must restore lwp_state if not calling
12058 12052 * trap() for further processing. Restore
12059 12053 * it anyway.
12060 12054 */
12061 12055 lwp->lwp_state = lwp_save_state;
12062 12056 return;
12063 12057 }
12064 12058 trap(rp, (caddr_t)tagaccess, traptype, 0);
12065 12059 }
12066 12060
12067 12061 static void
12068 12062 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp)
12069 12063 {
12070 12064 struct tsb_info *tp;
12071 12065
12072 12066 ASSERT(sfmmu_hat_lock_held(sfmmup));
12073 12067
12074 12068 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) {
12075 12069 if (tp->tsb_flags & TSB_RELOC_FLAG) {
12076 12070 cv_wait(&sfmmup->sfmmu_tsb_cv,
12077 12071 HATLOCK_MUTEXP(hatlockp));
12078 12072 break;
12079 12073 }
12080 12074 }
12081 12075 }
12082 12076
12083 12077 /*
12084 12078 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and
12085 12079 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock
12086 12080 * rather than spinning to avoid send mondo timeouts with
12087 12081 * interrupts enabled. When the lock is acquired it is immediately
12088 12082 * released and we return back to sfmmu_vatopfn just after
12089 12083 * the GET_TTE call.
12090 12084 */
12091 12085 void
12092 12086 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep)
12093 12087 {
12094 12088 struct page **pp;
12095 12089
12096 12090 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE);
12097 12091 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE);
12098 12092 }
12099 12093
12100 12094 /*
12101 12095 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and
12102 12096 * TTE_SUSPENDED bit set in tte. We do this so that we can handle
12103 12097 * cross traps which cannot be handled while spinning in the
12104 12098 * trap handlers. Simply enter and exit the kpr_suspendlock spin
12105 12099 * mutex, which is held by the holder of the suspend bit, and then
12106 12100 * retry the trapped instruction after unwinding.
12107 12101 */
12108 12102 /*ARGSUSED*/
12109 12103 void
12110 12104 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype)
12111 12105 {
12112 12106 ASSERT(curthread != kreloc_thread);
12113 12107 mutex_enter(&kpr_suspendlock);
12114 12108 mutex_exit(&kpr_suspendlock);
12115 12109 }
12116 12110
12117 12111 /*
12118 12112 * This routine could be optimized to reduce the number of xcalls by flushing
12119 12113 * the entire TLBs if region reference count is above some threshold but the
12120 12114 * tradeoff will depend on the size of the TLB. So for now flush the specific
12121 12115 * page a context at a time.
12122 12116 *
12123 12117 * If uselocks is 0 then it's called after all cpus were captured and all the
12124 12118 * hat locks were taken. In this case don't take the region lock by relying on
12125 12119 * the order of list region update operations in hat_join_region(),
12126 12120 * hat_leave_region() and hat_dup_region(). The ordering in those routines
12127 12121 * guarantees that list is always forward walkable and reaches active sfmmus
12128 12122 * regardless of where xc_attention() captures a cpu.
12129 12123 */
12130 12124 cpuset_t
12131 12125 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp,
12132 12126 struct hme_blk *hmeblkp, int uselocks)
12133 12127 {
12134 12128 sfmmu_t *sfmmup;
12135 12129 cpuset_t cpuset;
12136 12130 cpuset_t rcpuset;
12137 12131 hatlock_t *hatlockp;
12138 12132 uint_t rid = rgnp->rgn_id;
12139 12133 sf_rgn_link_t *rlink;
12140 12134 sf_scd_t *scdp;
12141 12135
12142 12136 ASSERT(hmeblkp->hblk_shared);
12143 12137 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
12144 12138 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
12145 12139
12146 12140 CPUSET_ZERO(rcpuset);
12147 12141 if (uselocks) {
12148 12142 mutex_enter(&rgnp->rgn_mutex);
12149 12143 }
12150 12144 sfmmup = rgnp->rgn_sfmmu_head;
12151 12145 while (sfmmup != NULL) {
12152 12146 if (uselocks) {
12153 12147 hatlockp = sfmmu_hat_enter(sfmmup);
12154 12148 }
12155 12149
12156 12150 /*
12157 12151 * When an SCD is created the SCD hat is linked on the sfmmu
12158 12152 * region lists for each hme region which is part of the
12159 12153 * SCD. If we find an SCD hat, when walking these lists,
12160 12154 * then we flush the shared TSBs, if we find a private hat,
12161 12155 * which is part of an SCD, but where the region
12162 12156 * is not part of the SCD then we flush the private TSBs.
12163 12157 */
12164 12158 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12165 12159 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
12166 12160 scdp = sfmmup->sfmmu_scdp;
12167 12161 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
12168 12162 if (uselocks) {
12169 12163 sfmmu_hat_exit(hatlockp);
12170 12164 }
12171 12165 goto next;
12172 12166 }
12173 12167 }
12174 12168
12175 12169 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12176 12170
12177 12171 kpreempt_disable();
12178 12172 cpuset = sfmmup->sfmmu_cpusran;
12179 12173 CPUSET_AND(cpuset, cpu_ready_set);
12180 12174 CPUSET_DEL(cpuset, CPU->cpu_id);
12181 12175 SFMMU_XCALL_STATS(sfmmup);
12182 12176 xt_some(cpuset, vtag_flushpage_tl1,
12183 12177 (uint64_t)addr, (uint64_t)sfmmup);
12184 12178 vtag_flushpage(addr, (uint64_t)sfmmup);
12185 12179 if (uselocks) {
12186 12180 sfmmu_hat_exit(hatlockp);
12187 12181 }
12188 12182 kpreempt_enable();
12189 12183 CPUSET_OR(rcpuset, cpuset);
12190 12184
12191 12185 next:
12192 12186 /* LINTED: constant in conditional context */
12193 12187 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
12194 12188 ASSERT(rlink != NULL);
12195 12189 sfmmup = rlink->next;
12196 12190 }
12197 12191 if (uselocks) {
12198 12192 mutex_exit(&rgnp->rgn_mutex);
12199 12193 }
12200 12194 return (rcpuset);
12201 12195 }
12202 12196
12203 12197 /*
12204 12198 * This routine takes an sfmmu pointer and the va for an adddress in an
12205 12199 * ISM region as input and returns the corresponding region id in ism_rid.
12206 12200 * The return value of 1 indicates that a region has been found and ism_rid
12207 12201 * is valid, otherwise 0 is returned.
12208 12202 */
12209 12203 static int
12210 12204 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid)
12211 12205 {
12212 12206 ism_blk_t *ism_blkp;
12213 12207 int i;
12214 12208 ism_map_t *ism_map;
12215 12209 #ifdef DEBUG
12216 12210 struct hat *ism_hatid;
12217 12211 #endif
12218 12212 ASSERT(sfmmu_hat_lock_held(sfmmup));
12219 12213
12220 12214 ism_blkp = sfmmup->sfmmu_iblk;
12221 12215 while (ism_blkp != NULL) {
12222 12216 ism_map = ism_blkp->iblk_maps;
12223 12217 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) {
12224 12218 if ((va >= ism_start(ism_map[i])) &&
12225 12219 (va < ism_end(ism_map[i]))) {
12226 12220
12227 12221 *ism_rid = ism_map[i].imap_rid;
12228 12222 #ifdef DEBUG
12229 12223 ism_hatid = ism_map[i].imap_ismhat;
12230 12224 ASSERT(ism_hatid == ism_sfmmup);
12231 12225 ASSERT(ism_hatid->sfmmu_ismhat);
12232 12226 #endif
12233 12227 return (1);
12234 12228 }
12235 12229 }
12236 12230 ism_blkp = ism_blkp->iblk_next;
12237 12231 }
12238 12232 return (0);
12239 12233 }
12240 12234
12241 12235 /*
12242 12236 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12243 12237 * This routine may be called with all cpu's captured. Therefore, the
12244 12238 * caller is responsible for holding all locks and disabling kernel
12245 12239 * preemption.
12246 12240 */
12247 12241 /* ARGSUSED */
12248 12242 static void
12249 12243 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12250 12244 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12251 12245 {
12252 12246 cpuset_t cpuset;
12253 12247 caddr_t va;
12254 12248 ism_ment_t *ment;
12255 12249 sfmmu_t *sfmmup;
12256 12250 #ifdef VAC
12257 12251 int vcolor;
12258 12252 #endif
12259 12253
12260 12254 sf_scd_t *scdp;
12261 12255 uint_t ism_rid;
12262 12256
12263 12257 ASSERT(!hmeblkp->hblk_shared);
12264 12258 /*
12265 12259 * Walk the ism_hat's mapping list and flush the page
12266 12260 * from every hat sharing this ism_hat. This routine
12267 12261 * may be called while all cpu's have been captured.
12268 12262 * Therefore we can't attempt to grab any locks. For now
12269 12263 * this means we will protect the ism mapping list under
12270 12264 * a single lock which will be grabbed by the caller.
12271 12265 * If hat_share/unshare scalibility becomes a performance
12272 12266 * problem then we may need to re-think ism mapping list locking.
12273 12267 */
12274 12268 ASSERT(ism_sfmmup->sfmmu_ismhat);
12275 12269 ASSERT(MUTEX_HELD(&ism_mlist_lock));
12276 12270 addr = addr - ISMID_STARTADDR;
12277 12271
12278 12272 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) {
12279 12273
12280 12274 sfmmup = ment->iment_hat;
12281 12275
12282 12276 va = ment->iment_base_va;
12283 12277 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr);
12284 12278
12285 12279 /*
12286 12280 * When an SCD is created the SCD hat is linked on the ism
12287 12281 * mapping lists for each ISM segment which is part of the
12288 12282 * SCD. If we find an SCD hat, when walking these lists,
12289 12283 * then we flush the shared TSBs, if we find a private hat,
12290 12284 * which is part of an SCD, but where the region
12291 12285 * corresponding to this va is not part of the SCD then we
12292 12286 * flush the private TSBs.
12293 12287 */
12294 12288 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL &&
12295 12289 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) &&
12296 12290 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) {
12297 12291 if (!find_ism_rid(sfmmup, ism_sfmmup, va,
12298 12292 &ism_rid)) {
12299 12293 cmn_err(CE_PANIC,
12300 12294 "can't find matching ISM rid!");
12301 12295 }
12302 12296
12303 12297 scdp = sfmmup->sfmmu_scdp;
12304 12298 if (SFMMU_IS_ISMRID_VALID(ism_rid) &&
12305 12299 SF_RGNMAP_TEST(scdp->scd_ismregion_map,
12306 12300 ism_rid)) {
12307 12301 continue;
12308 12302 }
12309 12303 }
12310 12304 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1);
12311 12305
12312 12306 cpuset = sfmmup->sfmmu_cpusran;
12313 12307 CPUSET_AND(cpuset, cpu_ready_set);
12314 12308 CPUSET_DEL(cpuset, CPU->cpu_id);
12315 12309 SFMMU_XCALL_STATS(sfmmup);
12316 12310 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va,
12317 12311 (uint64_t)sfmmup);
12318 12312 vtag_flushpage(va, (uint64_t)sfmmup);
12319 12313
12320 12314 #ifdef VAC
12321 12315 /*
12322 12316 * Flush D$
12323 12317 * When flushing D$ we must flush all
12324 12318 * cpu's. See sfmmu_cache_flush().
12325 12319 */
12326 12320 if (cache_flush_flag == CACHE_FLUSH) {
12327 12321 cpuset = cpu_ready_set;
12328 12322 CPUSET_DEL(cpuset, CPU->cpu_id);
12329 12323
12330 12324 SFMMU_XCALL_STATS(sfmmup);
12331 12325 vcolor = addr_to_vcolor(va);
12332 12326 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12333 12327 vac_flushpage(pfnum, vcolor);
12334 12328 }
12335 12329 #endif /* VAC */
12336 12330 }
12337 12331 }
12338 12332
12339 12333 /*
12340 12334 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12341 12335 * a particular virtual address and ctx. If noflush is set we do not
12342 12336 * flush the TLB/TSB. This function may or may not be called with the
12343 12337 * HAT lock held.
12344 12338 */
12345 12339 static void
12346 12340 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12347 12341 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12348 12342 int hat_lock_held)
12349 12343 {
12350 12344 #ifdef VAC
12351 12345 int vcolor;
12352 12346 #endif
12353 12347 cpuset_t cpuset;
12354 12348 hatlock_t *hatlockp;
12355 12349
12356 12350 ASSERT(!hmeblkp->hblk_shared);
12357 12351
12358 12352 #if defined(lint) && !defined(VAC)
12359 12353 pfnum = pfnum;
12360 12354 cpu_flag = cpu_flag;
12361 12355 cache_flush_flag = cache_flush_flag;
12362 12356 #endif
12363 12357
12364 12358 /*
12365 12359 * There is no longer a need to protect against ctx being
12366 12360 * stolen here since we don't store the ctx in the TSB anymore.
12367 12361 */
12368 12362 #ifdef VAC
12369 12363 vcolor = addr_to_vcolor(addr);
12370 12364 #endif
12371 12365
12372 12366 /*
12373 12367 * We must hold the hat lock during the flush of TLB,
12374 12368 * to avoid a race with sfmmu_invalidate_ctx(), where
12375 12369 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12376 12370 * causing TLB demap routine to skip flush on that MMU.
12377 12371 * If the context on a MMU has already been set to
12378 12372 * INVALID_CONTEXT, we just get an extra flush on
12379 12373 * that MMU.
12380 12374 */
12381 12375 if (!hat_lock_held && !tlb_noflush)
12382 12376 hatlockp = sfmmu_hat_enter(sfmmup);
12383 12377
12384 12378 kpreempt_disable();
12385 12379 if (!tlb_noflush) {
12386 12380 /*
12387 12381 * Flush the TSB and TLB.
12388 12382 */
12389 12383 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12390 12384
12391 12385 cpuset = sfmmup->sfmmu_cpusran;
12392 12386 CPUSET_AND(cpuset, cpu_ready_set);
12393 12387 CPUSET_DEL(cpuset, CPU->cpu_id);
12394 12388
12395 12389 SFMMU_XCALL_STATS(sfmmup);
12396 12390
12397 12391 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr,
12398 12392 (uint64_t)sfmmup);
12399 12393
12400 12394 vtag_flushpage(addr, (uint64_t)sfmmup);
12401 12395 }
12402 12396
12403 12397 if (!hat_lock_held && !tlb_noflush)
12404 12398 sfmmu_hat_exit(hatlockp);
12405 12399
12406 12400 #ifdef VAC
12407 12401 /*
12408 12402 * Flush the D$
12409 12403 *
12410 12404 * Even if the ctx is stolen, we need to flush the
12411 12405 * cache. Our ctx stealer only flushes the TLBs.
12412 12406 */
12413 12407 if (cache_flush_flag == CACHE_FLUSH) {
12414 12408 if (cpu_flag & FLUSH_ALL_CPUS) {
12415 12409 cpuset = cpu_ready_set;
12416 12410 } else {
12417 12411 cpuset = sfmmup->sfmmu_cpusran;
12418 12412 CPUSET_AND(cpuset, cpu_ready_set);
12419 12413 }
12420 12414 CPUSET_DEL(cpuset, CPU->cpu_id);
12421 12415 SFMMU_XCALL_STATS(sfmmup);
12422 12416 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12423 12417 vac_flushpage(pfnum, vcolor);
12424 12418 }
12425 12419 #endif /* VAC */
12426 12420 kpreempt_enable();
12427 12421 }
12428 12422
12429 12423 /*
12430 12424 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12431 12425 * address and ctx. If noflush is set we do not currently do anything.
12432 12426 * This function may or may not be called with the HAT lock held.
12433 12427 */
12434 12428 static void
12435 12429 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12436 12430 int tlb_noflush, int hat_lock_held)
12437 12431 {
12438 12432 cpuset_t cpuset;
12439 12433 hatlock_t *hatlockp;
12440 12434
12441 12435 ASSERT(!hmeblkp->hblk_shared);
12442 12436
12443 12437 /*
12444 12438 * If the process is exiting we have nothing to do.
12445 12439 */
12446 12440 if (tlb_noflush)
12447 12441 return;
12448 12442
12449 12443 /*
12450 12444 * Flush TSB.
12451 12445 */
12452 12446 if (!hat_lock_held)
12453 12447 hatlockp = sfmmu_hat_enter(sfmmup);
12454 12448 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0);
12455 12449
12456 12450 kpreempt_disable();
12457 12451
12458 12452 cpuset = sfmmup->sfmmu_cpusran;
12459 12453 CPUSET_AND(cpuset, cpu_ready_set);
12460 12454 CPUSET_DEL(cpuset, CPU->cpu_id);
12461 12455
12462 12456 SFMMU_XCALL_STATS(sfmmup);
12463 12457 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup);
12464 12458
12465 12459 vtag_flushpage(addr, (uint64_t)sfmmup);
12466 12460
12467 12461 if (!hat_lock_held)
12468 12462 sfmmu_hat_exit(hatlockp);
12469 12463
12470 12464 kpreempt_enable();
12471 12465
12472 12466 }
12473 12467
12474 12468 /*
12475 12469 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall
12476 12470 * call handler that can flush a range of pages to save on xcalls.
12477 12471 */
12478 12472 static int sfmmu_xcall_save;
12479 12473
12480 12474 /*
12481 12475 * this routine is never used for demaping addresses backed by SRD hmeblks.
12482 12476 */
12483 12477 static void
12484 12478 sfmmu_tlb_range_demap(demap_range_t *dmrp)
12485 12479 {
12486 12480 sfmmu_t *sfmmup = dmrp->dmr_sfmmup;
12487 12481 hatlock_t *hatlockp;
12488 12482 cpuset_t cpuset;
12489 12483 uint64_t sfmmu_pgcnt;
12490 12484 pgcnt_t pgcnt = 0;
12491 12485 int pgunload = 0;
12492 12486 int dirtypg = 0;
12493 12487 caddr_t addr = dmrp->dmr_addr;
12494 12488 caddr_t eaddr;
12495 12489 uint64_t bitvec = dmrp->dmr_bitvec;
12496 12490
12497 12491 ASSERT(bitvec & 1);
12498 12492
12499 12493 /*
12500 12494 * Flush TSB and calculate number of pages to flush.
12501 12495 */
12502 12496 while (bitvec != 0) {
12503 12497 dirtypg = 0;
12504 12498 /*
12505 12499 * Find the first page to flush and then count how many
12506 12500 * pages there are after it that also need to be flushed.
12507 12501 * This way the number of TSB flushes is minimized.
12508 12502 */
12509 12503 while ((bitvec & 1) == 0) {
12510 12504 pgcnt++;
12511 12505 addr += MMU_PAGESIZE;
12512 12506 bitvec >>= 1;
12513 12507 }
12514 12508 while (bitvec & 1) {
12515 12509 dirtypg++;
12516 12510 bitvec >>= 1;
12517 12511 }
12518 12512 eaddr = addr + ptob(dirtypg);
12519 12513 hatlockp = sfmmu_hat_enter(sfmmup);
12520 12514 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K);
12521 12515 sfmmu_hat_exit(hatlockp);
12522 12516 pgunload += dirtypg;
12523 12517 addr = eaddr;
12524 12518 pgcnt += dirtypg;
12525 12519 }
12526 12520
12527 12521 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr);
12528 12522 if (sfmmup->sfmmu_free == 0) {
12529 12523 addr = dmrp->dmr_addr;
12530 12524 bitvec = dmrp->dmr_bitvec;
12531 12525
12532 12526 /*
12533 12527 * make sure it has SFMMU_PGCNT_SHIFT bits only,
12534 12528 * as it will be used to pack argument for xt_some
12535 12529 */
12536 12530 ASSERT((pgcnt > 0) &&
12537 12531 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT)));
12538 12532
12539 12533 /*
12540 12534 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in
12541 12535 * the low 6 bits of sfmmup. This is doable since pgcnt
12542 12536 * always >= 1.
12543 12537 */
12544 12538 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK));
12545 12539 sfmmu_pgcnt = (uint64_t)sfmmup |
12546 12540 ((pgcnt - 1) & SFMMU_PGCNT_MASK);
12547 12541
12548 12542 /*
12549 12543 * We must hold the hat lock during the flush of TLB,
12550 12544 * to avoid a race with sfmmu_invalidate_ctx(), where
12551 12545 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT,
12552 12546 * causing TLB demap routine to skip flush on that MMU.
12553 12547 * If the context on a MMU has already been set to
12554 12548 * INVALID_CONTEXT, we just get an extra flush on
12555 12549 * that MMU.
12556 12550 */
12557 12551 hatlockp = sfmmu_hat_enter(sfmmup);
12558 12552 kpreempt_disable();
12559 12553
12560 12554 cpuset = sfmmup->sfmmu_cpusran;
12561 12555 CPUSET_AND(cpuset, cpu_ready_set);
12562 12556 CPUSET_DEL(cpuset, CPU->cpu_id);
12563 12557
12564 12558 SFMMU_XCALL_STATS(sfmmup);
12565 12559 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr,
12566 12560 sfmmu_pgcnt);
12567 12561
12568 12562 for (; bitvec != 0; bitvec >>= 1) {
12569 12563 if (bitvec & 1)
12570 12564 vtag_flushpage(addr, (uint64_t)sfmmup);
12571 12565 addr += MMU_PAGESIZE;
12572 12566 }
12573 12567 kpreempt_enable();
12574 12568 sfmmu_hat_exit(hatlockp);
12575 12569
12576 12570 sfmmu_xcall_save += (pgunload-1);
12577 12571 }
12578 12572 dmrp->dmr_bitvec = 0;
12579 12573 }
12580 12574
12581 12575 /*
12582 12576 * In cases where we need to synchronize with TLB/TSB miss trap
12583 12577 * handlers, _and_ need to flush the TLB, it's a lot easier to
12584 12578 * throw away the context from the process than to do a
12585 12579 * special song and dance to keep things consistent for the
12586 12580 * handlers.
12587 12581 *
12588 12582 * Since the process suddenly ends up without a context and our caller
12589 12583 * holds the hat lock, threads that fault after this function is called
12590 12584 * will pile up on the lock. We can then do whatever we need to
12591 12585 * atomically from the context of the caller. The first blocked thread
12592 12586 * to resume executing will get the process a new context, and the
12593 12587 * process will resume executing.
12594 12588 *
12595 12589 * One added advantage of this approach is that on MMUs that
12596 12590 * support a "flush all" operation, we will delay the flush until
12597 12591 * cnum wrap-around, and then flush the TLB one time. This
12598 12592 * is rather rare, so it's a lot less expensive than making 8000
12599 12593 * x-calls to flush the TLB 8000 times.
12600 12594 *
12601 12595 * A per-process (PP) lock is used to synchronize ctx allocations in
12602 12596 * resume() and ctx invalidations here.
12603 12597 */
12604 12598 static void
12605 12599 sfmmu_invalidate_ctx(sfmmu_t *sfmmup)
12606 12600 {
12607 12601 cpuset_t cpuset;
12608 12602 int cnum, currcnum;
12609 12603 mmu_ctx_t *mmu_ctxp;
12610 12604 int i;
12611 12605 uint_t pstate_save;
12612 12606
12613 12607 SFMMU_STAT(sf_ctx_inv);
12614 12608
12615 12609 ASSERT(sfmmu_hat_lock_held(sfmmup));
12616 12610 ASSERT(sfmmup != ksfmmup);
12617 12611
12618 12612 kpreempt_disable();
12619 12613
12620 12614 mmu_ctxp = CPU_MMU_CTXP(CPU);
12621 12615 ASSERT(mmu_ctxp);
12622 12616 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms);
12623 12617 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12624 12618
12625 12619 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12626 12620
12627 12621 pstate_save = sfmmu_disable_intrs();
12628 12622
12629 12623 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */
12630 12624 /* set HAT cnum invalid across all context domains. */
12631 12625 for (i = 0; i < max_mmu_ctxdoms; i++) {
12632 12626
12633 12627 cnum = sfmmup->sfmmu_ctxs[i].cnum;
12634 12628 if (cnum == INVALID_CONTEXT) {
12635 12629 continue;
12636 12630 }
12637 12631
12638 12632 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12639 12633 }
12640 12634 membar_enter(); /* make sure globally visible to all CPUs */
12641 12635 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */
12642 12636
12643 12637 sfmmu_enable_intrs(pstate_save);
12644 12638
12645 12639 cpuset = sfmmup->sfmmu_cpusran;
12646 12640 CPUSET_DEL(cpuset, CPU->cpu_id);
12647 12641 CPUSET_AND(cpuset, cpu_ready_set);
12648 12642 if (!CPUSET_ISNULL(cpuset)) {
12649 12643 SFMMU_XCALL_STATS(sfmmup);
12650 12644 xt_some(cpuset, sfmmu_raise_tsb_exception,
12651 12645 (uint64_t)sfmmup, INVALID_CONTEXT);
12652 12646 xt_sync(cpuset);
12653 12647 SFMMU_STAT(sf_tsb_raise_exception);
12654 12648 SFMMU_MMU_STAT(mmu_tsb_raise_exception);
12655 12649 }
12656 12650
12657 12651 /*
12658 12652 * If the hat to-be-invalidated is the same as the current
12659 12653 * process on local CPU we need to invalidate
12660 12654 * this CPU context as well.
12661 12655 */
12662 12656 if ((sfmmu_getctx_sec() == currcnum) &&
12663 12657 (currcnum != INVALID_CONTEXT)) {
12664 12658 /* sets shared context to INVALID too */
12665 12659 sfmmu_setctx_sec(INVALID_CONTEXT);
12666 12660 sfmmu_clear_utsbinfo();
12667 12661 }
12668 12662
12669 12663 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID);
12670 12664
12671 12665 kpreempt_enable();
12672 12666
12673 12667 /*
12674 12668 * we hold the hat lock, so nobody should allocate a context
12675 12669 * for us yet
12676 12670 */
12677 12671 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT);
12678 12672 }
12679 12673
12680 12674 #ifdef VAC
12681 12675 /*
12682 12676 * We need to flush the cache in all cpus. It is possible that
12683 12677 * a process referenced a page as cacheable but has sinced exited
12684 12678 * and cleared the mapping list. We still to flush it but have no
12685 12679 * state so all cpus is the only alternative.
12686 12680 */
12687 12681 void
12688 12682 sfmmu_cache_flush(pfn_t pfnum, int vcolor)
12689 12683 {
12690 12684 cpuset_t cpuset;
12691 12685
12692 12686 kpreempt_disable();
12693 12687 cpuset = cpu_ready_set;
12694 12688 CPUSET_DEL(cpuset, CPU->cpu_id);
12695 12689 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12696 12690 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor);
12697 12691 xt_sync(cpuset);
12698 12692 vac_flushpage(pfnum, vcolor);
12699 12693 kpreempt_enable();
12700 12694 }
12701 12695
12702 12696 void
12703 12697 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum)
12704 12698 {
12705 12699 cpuset_t cpuset;
12706 12700
12707 12701 ASSERT(vcolor >= 0);
12708 12702
12709 12703 kpreempt_disable();
12710 12704 cpuset = cpu_ready_set;
12711 12705 CPUSET_DEL(cpuset, CPU->cpu_id);
12712 12706 SFMMU_XCALL_STATS(NULL); /* account to any ctx */
12713 12707 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum);
12714 12708 xt_sync(cpuset);
12715 12709 vac_flushcolor(vcolor, pfnum);
12716 12710 kpreempt_enable();
12717 12711 }
12718 12712 #endif /* VAC */
12719 12713
12720 12714 /*
12721 12715 * We need to prevent processes from accessing the TSB using a cached physical
12722 12716 * address. It's alright if they try to access the TSB via virtual address
12723 12717 * since they will just fault on that virtual address once the mapping has
12724 12718 * been suspended.
12725 12719 */
12726 12720 #pragma weak sendmondo_in_recover
12727 12721
12728 12722 /* ARGSUSED */
12729 12723 static int
12730 12724 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo)
12731 12725 {
12732 12726 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12733 12727 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12734 12728 hatlock_t *hatlockp;
12735 12729 sf_scd_t *scdp;
12736 12730
12737 12731 if (flags != HAT_PRESUSPEND)
12738 12732 return (0);
12739 12733
12740 12734 /*
12741 12735 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must
12742 12736 * be a shared hat, then set SCD's tsbinfo's flag.
12743 12737 * If tsb is not shared, sfmmup is a private hat, then set
12744 12738 * its private tsbinfo's flag.
12745 12739 */
12746 12740 hatlockp = sfmmu_hat_enter(sfmmup);
12747 12741 tsbinfop->tsb_flags |= TSB_RELOC_FLAG;
12748 12742
12749 12743 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) {
12750 12744 sfmmu_tsb_inv_ctx(sfmmup);
12751 12745 sfmmu_hat_exit(hatlockp);
12752 12746 } else {
12753 12747 /* release lock on the shared hat */
12754 12748 sfmmu_hat_exit(hatlockp);
12755 12749 /* sfmmup is a shared hat */
12756 12750 ASSERT(sfmmup->sfmmu_scdhat);
12757 12751 scdp = sfmmup->sfmmu_scdp;
12758 12752 ASSERT(scdp != NULL);
12759 12753 /* get private hat from the scd list */
12760 12754 mutex_enter(&scdp->scd_mutex);
12761 12755 sfmmup = scdp->scd_sf_list;
12762 12756 while (sfmmup != NULL) {
12763 12757 hatlockp = sfmmu_hat_enter(sfmmup);
12764 12758 /*
12765 12759 * We do not call sfmmu_tsb_inv_ctx here because
12766 12760 * sendmondo_in_recover check is only needed for
12767 12761 * sun4u.
12768 12762 */
12769 12763 sfmmu_invalidate_ctx(sfmmup);
12770 12764 sfmmu_hat_exit(hatlockp);
12771 12765 sfmmup = sfmmup->sfmmu_scd_link.next;
12772 12766
12773 12767 }
12774 12768 mutex_exit(&scdp->scd_mutex);
12775 12769 }
12776 12770 return (0);
12777 12771 }
12778 12772
12779 12773 static void
12780 12774 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup)
12781 12775 {
12782 12776 extern uint32_t sendmondo_in_recover;
12783 12777
12784 12778 ASSERT(sfmmu_hat_lock_held(sfmmup));
12785 12779
12786 12780 /*
12787 12781 * For Cheetah+ Erratum 25:
12788 12782 * Wait for any active recovery to finish. We can't risk
12789 12783 * relocating the TSB of the thread running mondo_recover_proc()
12790 12784 * since, if we did that, we would deadlock. The scenario we are
12791 12785 * trying to avoid is as follows:
12792 12786 *
12793 12787 * THIS CPU RECOVER CPU
12794 12788 * -------- -----------
12795 12789 * Begins recovery, walking through TSB
12796 12790 * hat_pagesuspend() TSB TTE
12797 12791 * TLB miss on TSB TTE, spins at TL1
12798 12792 * xt_sync()
12799 12793 * send_mondo_timeout()
12800 12794 * mondo_recover_proc()
12801 12795 * ((deadlocked))
12802 12796 *
12803 12797 * The second half of the workaround is that mondo_recover_proc()
12804 12798 * checks to see if the tsb_info has the RELOC flag set, and if it
12805 12799 * does, it skips over that TSB without ever touching tsbinfop->tsb_va
12806 12800 * and hence avoiding the TLB miss that could result in a deadlock.
12807 12801 */
12808 12802 if (&sendmondo_in_recover) {
12809 12803 membar_enter(); /* make sure RELOC flag visible */
12810 12804 while (sendmondo_in_recover) {
12811 12805 drv_usecwait(1);
12812 12806 membar_consumer();
12813 12807 }
12814 12808 }
12815 12809
12816 12810 sfmmu_invalidate_ctx(sfmmup);
12817 12811 }
12818 12812
12819 12813 /* ARGSUSED */
12820 12814 static int
12821 12815 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12822 12816 void *tsbinfo, pfn_t newpfn)
12823 12817 {
12824 12818 hatlock_t *hatlockp;
12825 12819 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12826 12820 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12827 12821
12828 12822 if (flags != HAT_POSTUNSUSPEND)
12829 12823 return (0);
12830 12824
12831 12825 hatlockp = sfmmu_hat_enter(sfmmup);
12832 12826
12833 12827 SFMMU_STAT(sf_tsb_reloc);
12834 12828
12835 12829 /*
12836 12830 * The process may have swapped out while we were relocating one
12837 12831 * of its TSBs. If so, don't bother doing the setup since the
12838 12832 * process can't be using the memory anymore.
12839 12833 */
12840 12834 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) {
12841 12835 ASSERT(va == tsbinfop->tsb_va);
12842 12836 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn);
12843 12837
12844 12838 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) {
12845 12839 sfmmu_inv_tsb(tsbinfop->tsb_va,
12846 12840 TSB_BYTES(tsbinfop->tsb_szc));
12847 12841 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED;
12848 12842 }
12849 12843 }
12850 12844
12851 12845 membar_exit();
12852 12846 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG;
12853 12847 cv_broadcast(&sfmmup->sfmmu_tsb_cv);
12854 12848
12855 12849 sfmmu_hat_exit(hatlockp);
12856 12850
12857 12851 return (0);
12858 12852 }
12859 12853
12860 12854 /*
12861 12855 * Allocate and initialize a tsb_info structure. Note that we may or may not
12862 12856 * allocate a TSB here, depending on the flags passed in.
12863 12857 */
12864 12858 static int
12865 12859 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12866 12860 uint_t flags, sfmmu_t *sfmmup)
12867 12861 {
12868 12862 int err;
12869 12863
12870 12864 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12871 12865 sfmmu_tsbinfo_cache, KM_SLEEP);
12872 12866
12873 12867 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12874 12868 tsb_szc, flags, sfmmup)) != 0) {
12875 12869 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12876 12870 SFMMU_STAT(sf_tsb_allocfail);
12877 12871 *tsbinfopp = NULL;
12878 12872 return (err);
12879 12873 }
12880 12874 SFMMU_STAT(sf_tsb_alloc);
12881 12875
12882 12876 /*
12883 12877 * Bump the TSB size counters for this TSB size.
12884 12878 */
12885 12879 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++;
12886 12880 return (0);
12887 12881 }
12888 12882
12889 12883 static void
12890 12884 sfmmu_tsb_free(struct tsb_info *tsbinfo)
12891 12885 {
12892 12886 caddr_t tsbva = tsbinfo->tsb_va;
12893 12887 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc);
12894 12888 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache;
12895 12889 vmem_t *vmp = tsbinfo->tsb_vmp;
12896 12890
12897 12891 /*
12898 12892 * If we allocated this TSB from relocatable kernel memory, then we
12899 12893 * need to uninstall the callback handler.
12900 12894 */
12901 12895 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) {
12902 12896 uintptr_t slab_mask;
12903 12897 caddr_t slab_vaddr;
12904 12898 page_t **ppl;
12905 12899 int ret;
12906 12900
12907 12901 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena);
12908 12902 if (tsb_size > MMU_PAGESIZE4M)
12909 12903 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
12910 12904 else
12911 12905 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
12912 12906 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask);
12913 12907
12914 12908 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE);
12915 12909 ASSERT(ret == 0);
12916 12910 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo,
12917 12911 0, NULL);
12918 12912 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE);
12919 12913 }
12920 12914
12921 12915 if (kmem_cachep != NULL) {
12922 12916 kmem_cache_free(kmem_cachep, tsbva);
12923 12917 } else {
12924 12918 vmem_xfree(vmp, (void *)tsbva, tsb_size);
12925 12919 }
12926 12920 tsbinfo->tsb_va = (caddr_t)0xbad00bad;
12927 12921 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size);
12928 12922 }
12929 12923
12930 12924 static void
12931 12925 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo)
12932 12926 {
12933 12927 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) {
12934 12928 sfmmu_tsb_free(tsbinfo);
12935 12929 }
12936 12930 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo);
12937 12931
12938 12932 }
12939 12933
12940 12934 /*
12941 12935 * Setup all the references to physical memory for this tsbinfo.
12942 12936 * The underlying page(s) must be locked.
12943 12937 */
12944 12938 static void
12945 12939 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn)
12946 12940 {
12947 12941 ASSERT(pfn != PFN_INVALID);
12948 12942 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va));
12949 12943
12950 12944 #ifndef sun4v
12951 12945 if (tsbinfo->tsb_szc == 0) {
12952 12946 sfmmu_memtte(&tsbinfo->tsb_tte, pfn,
12953 12947 PROT_WRITE|PROT_READ, TTE8K);
12954 12948 } else {
12955 12949 /*
12956 12950 * Round down PA and use a large mapping; the handlers will
12957 12951 * compute the TSB pointer at the correct offset into the
12958 12952 * big virtual page. NOTE: this assumes all TSBs larger
12959 12953 * than 8K must come from physically contiguous slabs of
12960 12954 * size tsb_slab_size.
12961 12955 */
12962 12956 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask,
12963 12957 PROT_WRITE|PROT_READ, tsb_slab_ttesz);
12964 12958 }
12965 12959 tsbinfo->tsb_pa = ptob(pfn);
12966 12960
12967 12961 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */
12968 12962 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */
12969 12963
12970 12964 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte));
12971 12965 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte));
12972 12966 #else /* sun4v */
12973 12967 tsbinfo->tsb_pa = ptob(pfn);
12974 12968 #endif /* sun4v */
12975 12969 }
12976 12970
12977 12971
12978 12972 /*
12979 12973 * Returns zero on success, ENOMEM if over the high water mark,
12980 12974 * or EAGAIN if the caller needs to retry with a smaller TSB
12981 12975 * size (or specify TSB_FORCEALLOC if the allocation can't fail).
12982 12976 *
12983 12977 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC
12984 12978 * is specified and the TSB requested is PAGESIZE, though it
12985 12979 * may sleep waiting for memory if sufficient memory is not
12986 12980 * available.
12987 12981 */
12988 12982 static int
12989 12983 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask,
12990 12984 int tsbcode, uint_t flags, sfmmu_t *sfmmup)
12991 12985 {
12992 12986 caddr_t vaddr = NULL;
12993 12987 caddr_t slab_vaddr;
12994 12988 uintptr_t slab_mask;
12995 12989 int tsbbytes = TSB_BYTES(tsbcode);
12996 12990 int lowmem = 0;
12997 12991 struct kmem_cache *kmem_cachep = NULL;
12998 12992 vmem_t *vmp = NULL;
12999 12993 lgrp_id_t lgrpid = LGRP_NONE;
13000 12994 pfn_t pfn;
13001 12995 uint_t cbflags = HAC_SLEEP;
13002 12996 page_t **pplist;
13003 12997 int ret;
13004 12998
13005 12999 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena);
13006 13000 if (tsbbytes > MMU_PAGESIZE4M)
13007 13001 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT;
13008 13002 else
13009 13003 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT;
13010 13004
13011 13005 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK))
13012 13006 flags |= TSB_ALLOC;
13013 13007
13014 13008 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE);
13015 13009
13016 13010 tsbinfo->tsb_sfmmu = sfmmup;
13017 13011
13018 13012 /*
13019 13013 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and
13020 13014 * return.
13021 13015 */
13022 13016 if ((flags & TSB_ALLOC) == 0) {
13023 13017 tsbinfo->tsb_szc = tsbcode;
13024 13018 tsbinfo->tsb_ttesz_mask = tteszmask;
13025 13019 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef;
13026 13020 tsbinfo->tsb_pa = -1;
13027 13021 tsbinfo->tsb_tte.ll = 0;
13028 13022 tsbinfo->tsb_next = NULL;
13029 13023 tsbinfo->tsb_flags = TSB_SWAPPED;
13030 13024 tsbinfo->tsb_cache = NULL;
13031 13025 tsbinfo->tsb_vmp = NULL;
13032 13026 return (0);
13033 13027 }
13034 13028
13035 13029 #ifdef DEBUG
13036 13030 /*
13037 13031 * For debugging:
13038 13032 * Randomly force allocation failures every tsb_alloc_mtbf
13039 13033 * tries if TSB_FORCEALLOC is not specified. This will
13040 13034 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if
13041 13035 * it is even, to allow testing of both failure paths...
13042 13036 */
13043 13037 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) &&
13044 13038 (tsb_alloc_count++ == tsb_alloc_mtbf)) {
13045 13039 tsb_alloc_count = 0;
13046 13040 tsb_alloc_fail_mtbf++;
13047 13041 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN);
13048 13042 }
13049 13043 #endif /* DEBUG */
13050 13044
13051 13045 /*
13052 13046 * Enforce high water mark if we are not doing a forced allocation
13053 13047 * and are not shrinking a process' TSB.
13054 13048 */
13055 13049 if ((flags & TSB_SHRINK) == 0 &&
13056 13050 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) {
13057 13051 if ((flags & TSB_FORCEALLOC) == 0)
13058 13052 return (ENOMEM);
13059 13053 lowmem = 1;
13060 13054 }
13061 13055
13062 13056 /*
13063 13057 * Allocate from the correct location based upon the size of the TSB
13064 13058 * compared to the base page size, and what memory conditions dictate.
13065 13059 * Note we always do nonblocking allocations from the TSB arena since
13066 13060 * we don't want memory fragmentation to cause processes to block
13067 13061 * indefinitely waiting for memory; until the kernel algorithms that
13068 13062 * coalesce large pages are improved this is our best option.
13069 13063 *
13070 13064 * Algorithm:
13071 13065 * If allocating a "large" TSB (>8K), allocate from the
13072 13066 * appropriate kmem_tsb_default_arena vmem arena
13073 13067 * else if low on memory or the TSB_FORCEALLOC flag is set or
13074 13068 * tsb_forceheap is set
13075 13069 * Allocate from kernel heap via sfmmu_tsb8k_cache with
13076 13070 * KM_SLEEP (never fails)
13077 13071 * else
13078 13072 * Allocate from appropriate sfmmu_tsb_cache with
13079 13073 * KM_NOSLEEP
13080 13074 * endif
13081 13075 */
13082 13076 if (tsb_lgrp_affinity)
13083 13077 lgrpid = lgrp_home_id(curthread);
13084 13078 if (lgrpid == LGRP_NONE)
13085 13079 lgrpid = 0; /* use lgrp of boot CPU */
13086 13080
13087 13081 if (tsbbytes > MMU_PAGESIZE) {
13088 13082 if (tsbbytes > MMU_PAGESIZE4M) {
13089 13083 vmp = kmem_bigtsb_default_arena[lgrpid];
13090 13084 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
13091 13085 0, 0, NULL, NULL, VM_NOSLEEP);
13092 13086 } else {
13093 13087 vmp = kmem_tsb_default_arena[lgrpid];
13094 13088 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes,
13095 13089 0, 0, NULL, NULL, VM_NOSLEEP);
13096 13090 }
13097 13091 #ifdef DEBUG
13098 13092 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) {
13099 13093 #else /* !DEBUG */
13100 13094 } else if (lowmem || (flags & TSB_FORCEALLOC)) {
13101 13095 #endif /* DEBUG */
13102 13096 kmem_cachep = sfmmu_tsb8k_cache;
13103 13097 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP);
13104 13098 ASSERT(vaddr != NULL);
13105 13099 } else {
13106 13100 kmem_cachep = sfmmu_tsb_cache[lgrpid];
13107 13101 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
13108 13102 }
13109 13103
13110 13104 tsbinfo->tsb_cache = kmem_cachep;
13111 13105 tsbinfo->tsb_vmp = vmp;
13112 13106
13113 13107 if (vaddr == NULL) {
13114 13108 return (EAGAIN);
13115 13109 }
13116 13110
13117 13111 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes);
13118 13112 kmem_cachep = tsbinfo->tsb_cache;
13119 13113
13120 13114 /*
13121 13115 * If we are allocating from outside the cage, then we need to
13122 13116 * register a relocation callback handler. Note that for now
13123 13117 * since pseudo mappings always hang off of the slab's root page,
13124 13118 * we need only lock the first 8K of the TSB slab. This is a bit
13125 13119 * hacky but it is good for performance.
13126 13120 */
13127 13121 if (kmem_cachep != sfmmu_tsb8k_cache) {
13128 13122 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask);
13129 13123 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE);
13130 13124 ASSERT(ret == 0);
13131 13125 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes,
13132 13126 cbflags, (void *)tsbinfo, &pfn, NULL);
13133 13127
13134 13128 /*
13135 13129 * Need to free up resources if we could not successfully
13136 13130 * add the callback function and return an error condition.
13137 13131 */
13138 13132 if (ret != 0) {
13139 13133 if (kmem_cachep) {
13140 13134 kmem_cache_free(kmem_cachep, vaddr);
13141 13135 } else {
13142 13136 vmem_xfree(vmp, (void *)vaddr, tsbbytes);
13143 13137 }
13144 13138 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE,
13145 13139 S_WRITE);
13146 13140 return (EAGAIN);
13147 13141 }
13148 13142 } else {
13149 13143 /*
13150 13144 * Since allocation of 8K TSBs from heap is rare and occurs
13151 13145 * during memory pressure we allocate them from permanent
13152 13146 * memory rather than using callbacks to get the PFN.
13153 13147 */
13154 13148 pfn = hat_getpfnum(kas.a_hat, vaddr);
13155 13149 }
13156 13150
13157 13151 tsbinfo->tsb_va = vaddr;
13158 13152 tsbinfo->tsb_szc = tsbcode;
13159 13153 tsbinfo->tsb_ttesz_mask = tteszmask;
13160 13154 tsbinfo->tsb_next = NULL;
13161 13155 tsbinfo->tsb_flags = 0;
13162 13156
13163 13157 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn);
13164 13158
13165 13159 sfmmu_inv_tsb(vaddr, tsbbytes);
13166 13160
13167 13161 if (kmem_cachep != sfmmu_tsb8k_cache) {
13168 13162 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE);
13169 13163 }
13170 13164
13171 13165 return (0);
13172 13166 }
13173 13167
13174 13168 /*
13175 13169 * Initialize per cpu tsb and per cpu tsbmiss_area
13176 13170 */
13177 13171 void
13178 13172 sfmmu_init_tsbs(void)
13179 13173 {
13180 13174 int i;
13181 13175 struct tsbmiss *tsbmissp;
13182 13176 struct kpmtsbm *kpmtsbmp;
13183 13177 #ifndef sun4v
13184 13178 extern int dcache_line_mask;
13185 13179 #endif /* sun4v */
13186 13180 extern uint_t vac_colors;
13187 13181
13188 13182 /*
13189 13183 * Init. tsb miss area.
13190 13184 */
13191 13185 tsbmissp = tsbmiss_area;
13192 13186
13193 13187 for (i = 0; i < NCPU; tsbmissp++, i++) {
13194 13188 /*
13195 13189 * initialize the tsbmiss area.
13196 13190 * Do this for all possible CPUs as some may be added
13197 13191 * while the system is running. There is no cost to this.
13198 13192 */
13199 13193 tsbmissp->ksfmmup = ksfmmup;
13200 13194 #ifndef sun4v
13201 13195 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask;
13202 13196 #endif /* sun4v */
13203 13197 tsbmissp->khashstart =
13204 13198 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash);
13205 13199 tsbmissp->uhashstart =
13206 13200 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash);
13207 13201 tsbmissp->khashsz = khmehash_num;
13208 13202 tsbmissp->uhashsz = uhmehash_num;
13209 13203 }
13210 13204
13211 13205 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B',
13212 13206 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0);
13213 13207
13214 13208 if (kpm_enable == 0)
13215 13209 return;
13216 13210
13217 13211 /* -- Begin KPM specific init -- */
13218 13212
13219 13213 if (kpm_smallpages) {
13220 13214 /*
13221 13215 * If we're using base pagesize pages for seg_kpm
13222 13216 * mappings, we use the kernel TSB since we can't afford
13223 13217 * to allocate a second huge TSB for these mappings.
13224 13218 */
13225 13219 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13226 13220 kpm_tsbsz = ktsb_szcode;
13227 13221 kpmsm_tsbbase = kpm_tsbbase;
13228 13222 kpmsm_tsbsz = kpm_tsbsz;
13229 13223 } else {
13230 13224 /*
13231 13225 * In VAC conflict case, just put the entries in the
13232 13226 * kernel 8K indexed TSB for now so we can find them.
13233 13227 * This could really be changed in the future if we feel
13234 13228 * the need...
13235 13229 */
13236 13230 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base;
13237 13231 kpmsm_tsbsz = ktsb_szcode;
13238 13232 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base;
13239 13233 kpm_tsbsz = ktsb4m_szcode;
13240 13234 }
13241 13235
13242 13236 kpmtsbmp = kpmtsbm_area;
13243 13237 for (i = 0; i < NCPU; kpmtsbmp++, i++) {
13244 13238 /*
13245 13239 * Initialize the kpmtsbm area.
13246 13240 * Do this for all possible CPUs as some may be added
13247 13241 * while the system is running. There is no cost to this.
13248 13242 */
13249 13243 kpmtsbmp->vbase = kpm_vbase;
13250 13244 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors;
13251 13245 kpmtsbmp->sz_shift = kpm_size_shift;
13252 13246 kpmtsbmp->kpmp_shift = kpmp_shift;
13253 13247 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft;
13254 13248 if (kpm_smallpages == 0) {
13255 13249 kpmtsbmp->kpmp_table_sz = kpmp_table_sz;
13256 13250 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table);
13257 13251 } else {
13258 13252 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz;
13259 13253 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable);
13260 13254 }
13261 13255 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash);
13262 13256 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG;
13263 13257 #ifdef DEBUG
13264 13258 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0;
13265 13259 #endif /* DEBUG */
13266 13260 if (ktsb_phys)
13267 13261 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG;
13268 13262 }
13269 13263
13270 13264 /* -- End KPM specific init -- */
13271 13265 }
13272 13266
13273 13267 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */
13274 13268 struct tsb_info ktsb_info[2];
13275 13269
13276 13270 /*
13277 13271 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup.
13278 13272 */
13279 13273 void
13280 13274 sfmmu_init_ktsbinfo()
13281 13275 {
13282 13276 ASSERT(ksfmmup != NULL);
13283 13277 ASSERT(ksfmmup->sfmmu_tsb == NULL);
13284 13278 /*
13285 13279 * Allocate tsbinfos for kernel and copy in data
13286 13280 * to make debug easier and sun4v setup easier.
13287 13281 */
13288 13282 ktsb_info[0].tsb_sfmmu = ksfmmup;
13289 13283 ktsb_info[0].tsb_szc = ktsb_szcode;
13290 13284 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K;
13291 13285 ktsb_info[0].tsb_va = ktsb_base;
13292 13286 ktsb_info[0].tsb_pa = ktsb_pbase;
13293 13287 ktsb_info[0].tsb_flags = 0;
13294 13288 ktsb_info[0].tsb_tte.ll = 0;
13295 13289 ktsb_info[0].tsb_cache = NULL;
13296 13290
13297 13291 ktsb_info[1].tsb_sfmmu = ksfmmup;
13298 13292 ktsb_info[1].tsb_szc = ktsb4m_szcode;
13299 13293 ktsb_info[1].tsb_ttesz_mask = TSB4M;
13300 13294 ktsb_info[1].tsb_va = ktsb4m_base;
13301 13295 ktsb_info[1].tsb_pa = ktsb4m_pbase;
13302 13296 ktsb_info[1].tsb_flags = 0;
13303 13297 ktsb_info[1].tsb_tte.ll = 0;
13304 13298 ktsb_info[1].tsb_cache = NULL;
13305 13299
13306 13300 /* Link them into ksfmmup. */
13307 13301 ktsb_info[0].tsb_next = &ktsb_info[1];
13308 13302 ktsb_info[1].tsb_next = NULL;
13309 13303 ksfmmup->sfmmu_tsb = &ktsb_info[0];
13310 13304
13311 13305 sfmmu_setup_tsbinfo(ksfmmup);
13312 13306 }
13313 13307
13314 13308 /*
13315 13309 * Cache the last value returned from va_to_pa(). If the VA specified
13316 13310 * in the current call to cached_va_to_pa() maps to the same Page (as the
13317 13311 * previous call to cached_va_to_pa()), then compute the PA using
13318 13312 * cached info, else call va_to_pa().
13319 13313 *
13320 13314 * Note: this function is neither MT-safe nor consistent in the presence
13321 13315 * of multiple, interleaved threads. This function was created to enable
13322 13316 * an optimization used during boot (at a point when there's only one thread
13323 13317 * executing on the "boot CPU", and before startup_vm() has been called).
13324 13318 */
13325 13319 static uint64_t
13326 13320 cached_va_to_pa(void *vaddr)
13327 13321 {
13328 13322 static uint64_t prev_vaddr_base = 0;
13329 13323 static uint64_t prev_pfn = 0;
13330 13324
13331 13325 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) {
13332 13326 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET));
13333 13327 } else {
13334 13328 uint64_t pa = va_to_pa(vaddr);
13335 13329
13336 13330 if (pa != ((uint64_t)-1)) {
13337 13331 /*
13338 13332 * Computed physical address is valid. Cache its
13339 13333 * related info for the next cached_va_to_pa() call.
13340 13334 */
13341 13335 prev_pfn = pa & MMU_PAGEMASK;
13342 13336 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK;
13343 13337 }
13344 13338
13345 13339 return (pa);
13346 13340 }
13347 13341 }
13348 13342
13349 13343 /*
13350 13344 * Carve up our nucleus hblk region. We may allocate more hblks than
13351 13345 * asked due to rounding errors but we are guaranteed to have at least
13352 13346 * enough space to allocate the requested number of hblk8's and hblk1's.
13353 13347 */
13354 13348 void
13355 13349 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1)
13356 13350 {
13357 13351 struct hme_blk *hmeblkp;
13358 13352 size_t hme8blk_sz, hme1blk_sz;
13359 13353 size_t i;
13360 13354 size_t hblk8_bound;
13361 13355 ulong_t j = 0, k = 0;
13362 13356
13363 13357 ASSERT(addr != NULL && size != 0);
13364 13358
13365 13359 /* Need to use proper structure alignment */
13366 13360 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t));
13367 13361 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t));
13368 13362
13369 13363 nucleus_hblk8.list = (void *)addr;
13370 13364 nucleus_hblk8.index = 0;
13371 13365
13372 13366 /*
13373 13367 * Use as much memory as possible for hblk8's since we
13374 13368 * expect all bop_alloc'ed memory to be allocated in 8k chunks.
13375 13369 * We need to hold back enough space for the hblk1's which
13376 13370 * we'll allocate next.
13377 13371 */
13378 13372 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz;
13379 13373 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) {
13380 13374 hmeblkp = (struct hme_blk *)addr;
13381 13375 addr += hme8blk_sz;
13382 13376 hmeblkp->hblk_nuc_bit = 1;
13383 13377 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13384 13378 }
13385 13379 nucleus_hblk8.len = j;
13386 13380 ASSERT(j >= nhblk8);
13387 13381 SFMMU_STAT_ADD(sf_hblk8_ncreate, j);
13388 13382
13389 13383 nucleus_hblk1.list = (void *)addr;
13390 13384 nucleus_hblk1.index = 0;
13391 13385 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) {
13392 13386 hmeblkp = (struct hme_blk *)addr;
13393 13387 addr += hme1blk_sz;
13394 13388 hmeblkp->hblk_nuc_bit = 1;
13395 13389 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp);
13396 13390 }
13397 13391 ASSERT(k >= nhblk1);
13398 13392 nucleus_hblk1.len = k;
13399 13393 SFMMU_STAT_ADD(sf_hblk1_ncreate, k);
13400 13394 }
13401 13395
13402 13396 /*
13403 13397 * This function is currently not supported on this platform. For what
13404 13398 * it's supposed to do, see hat.c and hat_srmmu.c
13405 13399 */
13406 13400 /* ARGSUSED */
13407 13401 faultcode_t
13408 13402 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13409 13403 uint_t flags)
13410 13404 {
13411 13405 ASSERT(hat->sfmmu_xhat_provider == NULL);
13412 13406 return (FC_NOSUPPORT);
13413 13407 }
13414 13408
13415 13409 /*
13416 13410 * Searchs the mapping list of the page for a mapping of the same size. If not
13417 13411 * found the corresponding bit is cleared in the p_index field. When large
13418 13412 * pages are more prevalent in the system, we can maintain the mapping list
13419 13413 * in order and we don't have to traverse the list each time. Just check the
13420 13414 * next and prev entries, and if both are of different size, we clear the bit.
13421 13415 */
13422 13416 static void
13423 13417 sfmmu_rm_large_mappings(page_t *pp, int ttesz)
13424 13418 {
13425 13419 struct sf_hment *sfhmep;
13426 13420 struct hme_blk *hmeblkp;
13427 13421 int index;
13428 13422 pgcnt_t npgs;
13429 13423
13430 13424 ASSERT(ttesz > TTE8K);
13431 13425
13432 13426 ASSERT(sfmmu_mlist_held(pp));
13433 13427
13434 13428 ASSERT(PP_ISMAPPED_LARGE(pp));
13435 13429
13436 13430 /*
13437 13431 * Traverse mapping list looking for another mapping of same size.
13438 13432 * since we only want to clear index field if all mappings of
13439 13433 * that size are gone.
13440 13434 */
13441 13435
13442 13436 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13443 13437 if (IS_PAHME(sfhmep))
13444 13438 continue;
13445 13439 hmeblkp = sfmmu_hmetohblk(sfhmep);
13446 13440 if (hmeblkp->hblk_xhat_bit)
13447 13441 continue;
13448 13442 if (hme_size(sfhmep) == ttesz) {
13449 13443 /*
13450 13444 * another mapping of the same size. don't clear index.
13451 13445 */
13452 13446 return;
13453 13447 }
13454 13448 }
13455 13449
13456 13450 /*
13457 13451 * Clear the p_index bit for large page.
13458 13452 */
13459 13453 index = PAGESZ_TO_INDEX(ttesz);
13460 13454 npgs = TTEPAGES(ttesz);
13461 13455 while (npgs-- > 0) {
13462 13456 ASSERT(pp->p_index & index);
13463 13457 pp->p_index &= ~index;
13464 13458 pp = PP_PAGENEXT(pp);
13465 13459 }
13466 13460 }
13467 13461
13468 13462 /*
13469 13463 * return supported features
13470 13464 */
13471 13465 /* ARGSUSED */
13472 13466 int
13473 13467 hat_supported(enum hat_features feature, void *arg)
13474 13468 {
13475 13469 switch (feature) {
13476 13470 case HAT_SHARED_PT:
13477 13471 case HAT_DYNAMIC_ISM_UNMAP:
13478 13472 case HAT_VMODSORT:
13479 13473 return (1);
13480 13474 case HAT_SHARED_REGIONS:
13481 13475 if (shctx_on)
13482 13476 return (1);
13483 13477 else
13484 13478 return (0);
13485 13479 default:
13486 13480 return (0);
13487 13481 }
13488 13482 }
13489 13483
13490 13484 void
13491 13485 hat_enter(struct hat *hat)
13492 13486 {
13493 13487 hatlock_t *hatlockp;
13494 13488
13495 13489 if (hat != ksfmmup) {
13496 13490 hatlockp = TSB_HASH(hat);
13497 13491 mutex_enter(HATLOCK_MUTEXP(hatlockp));
13498 13492 }
13499 13493 }
13500 13494
13501 13495 void
13502 13496 hat_exit(struct hat *hat)
13503 13497 {
13504 13498 hatlock_t *hatlockp;
13505 13499
13506 13500 if (hat != ksfmmup) {
13507 13501 hatlockp = TSB_HASH(hat);
13508 13502 mutex_exit(HATLOCK_MUTEXP(hatlockp));
13509 13503 }
13510 13504 }
13511 13505
13512 13506 /*ARGSUSED*/
13513 13507 void
13514 13508 hat_reserve(struct as *as, caddr_t addr, size_t len)
13515 13509 {
13516 13510 }
13517 13511
13518 13512 static void
13519 13513 hat_kstat_init(void)
13520 13514 {
13521 13515 kstat_t *ksp;
13522 13516
13523 13517 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat",
13524 13518 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat),
13525 13519 KSTAT_FLAG_VIRTUAL);
13526 13520 if (ksp) {
13527 13521 ksp->ks_data = (void *) &sfmmu_global_stat;
13528 13522 kstat_install(ksp);
13529 13523 }
13530 13524 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat",
13531 13525 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat),
13532 13526 KSTAT_FLAG_VIRTUAL);
13533 13527 if (ksp) {
13534 13528 ksp->ks_data = (void *) &sfmmu_tsbsize_stat;
13535 13529 kstat_install(ksp);
13536 13530 }
13537 13531 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat",
13538 13532 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU,
13539 13533 KSTAT_FLAG_WRITABLE);
13540 13534 if (ksp) {
13541 13535 ksp->ks_update = sfmmu_kstat_percpu_update;
13542 13536 kstat_install(ksp);
13543 13537 }
13544 13538 }
13545 13539
13546 13540 /* ARGSUSED */
13547 13541 static int
13548 13542 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw)
13549 13543 {
13550 13544 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data;
13551 13545 struct tsbmiss *tsbm = tsbmiss_area;
13552 13546 struct kpmtsbm *kpmtsbm = kpmtsbm_area;
13553 13547 int i;
13554 13548
13555 13549 ASSERT(cpu_kstat);
13556 13550 if (rw == KSTAT_READ) {
13557 13551 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) {
13558 13552 cpu_kstat->sf_itlb_misses = 0;
13559 13553 cpu_kstat->sf_dtlb_misses = 0;
13560 13554 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses -
13561 13555 tsbm->uprot_traps;
13562 13556 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses +
13563 13557 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps;
13564 13558 cpu_kstat->sf_tsb_hits = 0;
13565 13559 cpu_kstat->sf_umod_faults = tsbm->uprot_traps;
13566 13560 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps;
13567 13561 }
13568 13562 } else {
13569 13563 /* KSTAT_WRITE is used to clear stats */
13570 13564 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) {
13571 13565 tsbm->utsb_misses = 0;
13572 13566 tsbm->ktsb_misses = 0;
13573 13567 tsbm->uprot_traps = 0;
13574 13568 tsbm->kprot_traps = 0;
13575 13569 kpmtsbm->kpm_dtlb_misses = 0;
13576 13570 kpmtsbm->kpm_tsb_misses = 0;
13577 13571 }
13578 13572 }
13579 13573 return (0);
13580 13574 }
13581 13575
13582 13576 #ifdef DEBUG
13583 13577
13584 13578 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU];
13585 13579
13586 13580 /*
13587 13581 * A tte checker. *orig_old is the value we read before cas.
13588 13582 * *cur is the value returned by cas.
13589 13583 * *new is the desired value when we do the cas.
13590 13584 *
13591 13585 * *hmeblkp is currently unused.
13592 13586 */
13593 13587
13594 13588 /* ARGSUSED */
13595 13589 void
13596 13590 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp)
13597 13591 {
13598 13592 pfn_t i, j, k;
13599 13593 int cpuid = CPU->cpu_id;
13600 13594
13601 13595 gorig[cpuid] = orig_old;
13602 13596 gcur[cpuid] = cur;
13603 13597 gnew[cpuid] = new;
13604 13598
13605 13599 #ifdef lint
13606 13600 hmeblkp = hmeblkp;
13607 13601 #endif
13608 13602
13609 13603 if (TTE_IS_VALID(orig_old)) {
13610 13604 if (TTE_IS_VALID(cur)) {
13611 13605 i = TTE_TO_TTEPFN(orig_old);
13612 13606 j = TTE_TO_TTEPFN(cur);
13613 13607 k = TTE_TO_TTEPFN(new);
13614 13608 if (i != j) {
13615 13609 /* remap error? */
13616 13610 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j);
13617 13611 }
13618 13612
13619 13613 if (i != k) {
13620 13614 /* remap error? */
13621 13615 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k);
13622 13616 }
13623 13617 } else {
13624 13618 if (TTE_IS_VALID(new)) {
13625 13619 panic("chk_tte: invalid cur? ");
13626 13620 }
13627 13621
13628 13622 i = TTE_TO_TTEPFN(orig_old);
13629 13623 k = TTE_TO_TTEPFN(new);
13630 13624 if (i != k) {
13631 13625 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k);
13632 13626 }
13633 13627 }
13634 13628 } else {
13635 13629 if (TTE_IS_VALID(cur)) {
13636 13630 j = TTE_TO_TTEPFN(cur);
13637 13631 if (TTE_IS_VALID(new)) {
13638 13632 k = TTE_TO_TTEPFN(new);
13639 13633 if (j != k) {
13640 13634 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx",
13641 13635 j, k);
13642 13636 }
13643 13637 } else {
13644 13638 panic("chk_tte: why here?");
13645 13639 }
13646 13640 } else {
13647 13641 if (!TTE_IS_VALID(new)) {
13648 13642 panic("chk_tte: why here2 ?");
13649 13643 }
13650 13644 }
13651 13645 }
13652 13646 }
13653 13647
13654 13648 #endif /* DEBUG */
13655 13649
13656 13650 extern void prefetch_tsbe_read(struct tsbe *);
13657 13651 extern void prefetch_tsbe_write(struct tsbe *);
13658 13652
13659 13653
13660 13654 /*
13661 13655 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives
13662 13656 * us optimal performance on Cheetah+. You can only have 8 outstanding
13663 13657 * prefetches at any one time, so we opted for 7 read prefetches and 1 write
13664 13658 * prefetch to make the most utilization of the prefetch capability.
13665 13659 */
13666 13660 #define TSBE_PREFETCH_STRIDE (7)
13667 13661
13668 13662 void
13669 13663 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo)
13670 13664 {
13671 13665 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc);
13672 13666 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc);
13673 13667 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc);
13674 13668 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc);
13675 13669 struct tsbe *old;
13676 13670 struct tsbe *new;
13677 13671 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va;
13678 13672 uint64_t va;
13679 13673 int new_offset;
13680 13674 int i;
13681 13675 int vpshift;
13682 13676 int last_prefetch;
13683 13677
13684 13678 if (old_bytes == new_bytes) {
13685 13679 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes);
13686 13680 } else {
13687 13681
13688 13682 /*
13689 13683 * A TSBE is 16 bytes which means there are four TSBE's per
13690 13684 * P$ line (64 bytes), thus every 4 TSBE's we prefetch.
13691 13685 */
13692 13686 old = (struct tsbe *)old_tsbinfo->tsb_va;
13693 13687 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1));
13694 13688 for (i = 0; i < old_entries; i++, old++) {
13695 13689 if (((i & (4-1)) == 0) && (i < last_prefetch))
13696 13690 prefetch_tsbe_read(old);
13697 13691 if (!old->tte_tag.tag_invalid) {
13698 13692 /*
13699 13693 * We have a valid TTE to remap. Check the
13700 13694 * size. We won't remap 64K or 512K TTEs
13701 13695 * because they span more than one TSB entry
13702 13696 * and are indexed using an 8K virt. page.
13703 13697 * Ditto for 32M and 256M TTEs.
13704 13698 */
13705 13699 if (TTE_CSZ(&old->tte_data) == TTE64K ||
13706 13700 TTE_CSZ(&old->tte_data) == TTE512K)
13707 13701 continue;
13708 13702 if (mmu_page_sizes == max_mmu_page_sizes) {
13709 13703 if (TTE_CSZ(&old->tte_data) == TTE32M ||
13710 13704 TTE_CSZ(&old->tte_data) == TTE256M)
13711 13705 continue;
13712 13706 }
13713 13707
13714 13708 /* clear the lower 22 bits of the va */
13715 13709 va = *(uint64_t *)old << 22;
13716 13710 /* turn va into a virtual pfn */
13717 13711 va >>= 22 - TSB_START_SIZE;
13718 13712 /*
13719 13713 * or in bits from the offset in the tsb
13720 13714 * to get the real virtual pfn. These
13721 13715 * correspond to bits [21:13] in the va
13722 13716 */
13723 13717 vpshift =
13724 13718 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) &
13725 13719 0x1ff;
13726 13720 va |= (i << vpshift);
13727 13721 va >>= vpshift;
13728 13722 new_offset = va & (new_entries - 1);
13729 13723 new = new_base + new_offset;
13730 13724 prefetch_tsbe_write(new);
13731 13725 *new = *old;
13732 13726 }
13733 13727 }
13734 13728 }
13735 13729 }
13736 13730
13737 13731 /*
13738 13732 * unused in sfmmu
13739 13733 */
13740 13734 void
13741 13735 hat_dump(void)
13742 13736 {
13743 13737 }
13744 13738
13745 13739 /*
13746 13740 * Called when a thread is exiting and we have switched to the kernel address
13747 13741 * space. Perform the same VM initialization resume() uses when switching
13748 13742 * processes.
13749 13743 *
13750 13744 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but
13751 13745 * we call it anyway in case the semantics change in the future.
13752 13746 */
13753 13747 /*ARGSUSED*/
13754 13748 void
13755 13749 hat_thread_exit(kthread_t *thd)
13756 13750 {
13757 13751 uint_t pgsz_cnum;
13758 13752 uint_t pstate_save;
13759 13753
13760 13754 ASSERT(thd->t_procp->p_as == &kas);
13761 13755
13762 13756 pgsz_cnum = KCONTEXT;
13763 13757 #ifdef sun4u
13764 13758 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT);
13765 13759 #endif
13766 13760
13767 13761 /*
13768 13762 * Note that sfmmu_load_mmustate() is currently a no-op for
13769 13763 * kernel threads. We need to disable interrupts here,
13770 13764 * simply because otherwise sfmmu_load_mmustate() would panic
13771 13765 * if the caller does not disable interrupts.
13772 13766 */
13773 13767 pstate_save = sfmmu_disable_intrs();
13774 13768
13775 13769 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */
13776 13770 sfmmu_setctx_sec(pgsz_cnum);
13777 13771 sfmmu_load_mmustate(ksfmmup);
13778 13772 sfmmu_enable_intrs(pstate_save);
13779 13773 }
13780 13774
13781 13775
13782 13776 /*
13783 13777 * SRD support
13784 13778 */
13785 13779 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \
13786 13780 (((uintptr_t)(vp)) >> 11)) & \
13787 13781 srd_hashmask)
13788 13782
13789 13783 /*
13790 13784 * Attach the process to the srd struct associated with the exec vnode
13791 13785 * from which the process is started.
13792 13786 */
13793 13787 void
13794 13788 hat_join_srd(struct hat *sfmmup, vnode_t *evp)
13795 13789 {
13796 13790 uint_t hash = SRD_HASH_FUNCTION(evp);
13797 13791 sf_srd_t *srdp;
13798 13792 sf_srd_t *newsrdp;
13799 13793
13800 13794 ASSERT(sfmmup != ksfmmup);
13801 13795 ASSERT(sfmmup->sfmmu_srdp == NULL);
13802 13796
13803 13797 if (!shctx_on) {
13804 13798 return;
13805 13799 }
13806 13800
13807 13801 VN_HOLD(evp);
13808 13802
13809 13803 if (srd_buckets[hash].srdb_srdp != NULL) {
13810 13804 mutex_enter(&srd_buckets[hash].srdb_lock);
13811 13805 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13812 13806 srdp = srdp->srd_hash) {
13813 13807 if (srdp->srd_evp == evp) {
13814 13808 ASSERT(srdp->srd_refcnt >= 0);
13815 13809 sfmmup->sfmmu_srdp = srdp;
13816 13810 atomic_inc_32(
13817 13811 (volatile uint_t *)&srdp->srd_refcnt);
13818 13812 mutex_exit(&srd_buckets[hash].srdb_lock);
13819 13813 return;
13820 13814 }
13821 13815 }
13822 13816 mutex_exit(&srd_buckets[hash].srdb_lock);
13823 13817 }
13824 13818 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP);
13825 13819 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0);
13826 13820
13827 13821 newsrdp->srd_evp = evp;
13828 13822 newsrdp->srd_refcnt = 1;
13829 13823 newsrdp->srd_hmergnfree = NULL;
13830 13824 newsrdp->srd_ismrgnfree = NULL;
13831 13825
13832 13826 mutex_enter(&srd_buckets[hash].srdb_lock);
13833 13827 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL;
13834 13828 srdp = srdp->srd_hash) {
13835 13829 if (srdp->srd_evp == evp) {
13836 13830 ASSERT(srdp->srd_refcnt >= 0);
13837 13831 sfmmup->sfmmu_srdp = srdp;
13838 13832 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt);
13839 13833 mutex_exit(&srd_buckets[hash].srdb_lock);
13840 13834 kmem_cache_free(srd_cache, newsrdp);
13841 13835 return;
13842 13836 }
13843 13837 }
13844 13838 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp;
13845 13839 srd_buckets[hash].srdb_srdp = newsrdp;
13846 13840 sfmmup->sfmmu_srdp = newsrdp;
13847 13841
13848 13842 mutex_exit(&srd_buckets[hash].srdb_lock);
13849 13843
13850 13844 }
13851 13845
13852 13846 static void
13853 13847 sfmmu_leave_srd(sfmmu_t *sfmmup)
13854 13848 {
13855 13849 vnode_t *evp;
13856 13850 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13857 13851 uint_t hash;
13858 13852 sf_srd_t **prev_srdpp;
13859 13853 sf_region_t *rgnp;
13860 13854 sf_region_t *nrgnp;
13861 13855 #ifdef DEBUG
13862 13856 int rgns = 0;
13863 13857 #endif
13864 13858 int i;
13865 13859
13866 13860 ASSERT(sfmmup != ksfmmup);
13867 13861 ASSERT(srdp != NULL);
13868 13862 ASSERT(srdp->srd_refcnt > 0);
13869 13863 ASSERT(sfmmup->sfmmu_scdp == NULL);
13870 13864 ASSERT(sfmmup->sfmmu_free == 1);
13871 13865
13872 13866 sfmmup->sfmmu_srdp = NULL;
13873 13867 evp = srdp->srd_evp;
13874 13868 ASSERT(evp != NULL);
13875 13869 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) {
13876 13870 VN_RELE(evp);
13877 13871 return;
13878 13872 }
13879 13873
13880 13874 hash = SRD_HASH_FUNCTION(evp);
13881 13875 mutex_enter(&srd_buckets[hash].srdb_lock);
13882 13876 for (prev_srdpp = &srd_buckets[hash].srdb_srdp;
13883 13877 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) {
13884 13878 if (srdp->srd_evp == evp) {
13885 13879 break;
13886 13880 }
13887 13881 }
13888 13882 if (srdp == NULL || srdp->srd_refcnt) {
13889 13883 mutex_exit(&srd_buckets[hash].srdb_lock);
13890 13884 VN_RELE(evp);
13891 13885 return;
13892 13886 }
13893 13887 *prev_srdpp = srdp->srd_hash;
13894 13888 mutex_exit(&srd_buckets[hash].srdb_lock);
13895 13889
13896 13890 ASSERT(srdp->srd_refcnt == 0);
13897 13891 VN_RELE(evp);
13898 13892
13899 13893 #ifdef DEBUG
13900 13894 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) {
13901 13895 ASSERT(srdp->srd_rgnhash[i] == NULL);
13902 13896 }
13903 13897 #endif /* DEBUG */
13904 13898
13905 13899 /* free each hme regions in the srd */
13906 13900 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) {
13907 13901 nrgnp = rgnp->rgn_next;
13908 13902 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid);
13909 13903 ASSERT(rgnp->rgn_refcnt == 0);
13910 13904 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13911 13905 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13912 13906 ASSERT(rgnp->rgn_hmeflags == 0);
13913 13907 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp);
13914 13908 #ifdef DEBUG
13915 13909 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13916 13910 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13917 13911 }
13918 13912 rgns++;
13919 13913 #endif /* DEBUG */
13920 13914 kmem_cache_free(region_cache, rgnp);
13921 13915 }
13922 13916 ASSERT(rgns == srdp->srd_next_hmerid);
13923 13917
13924 13918 #ifdef DEBUG
13925 13919 rgns = 0;
13926 13920 #endif
13927 13921 /* free each ism rgns in the srd */
13928 13922 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) {
13929 13923 nrgnp = rgnp->rgn_next;
13930 13924 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid);
13931 13925 ASSERT(rgnp->rgn_refcnt == 0);
13932 13926 ASSERT(rgnp->rgn_sfmmu_head == NULL);
13933 13927 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
13934 13928 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp);
13935 13929 #ifdef DEBUG
13936 13930 for (i = 0; i < MMU_PAGE_SIZES; i++) {
13937 13931 ASSERT(rgnp->rgn_ttecnt[i] == 0);
13938 13932 }
13939 13933 rgns++;
13940 13934 #endif /* DEBUG */
13941 13935 kmem_cache_free(region_cache, rgnp);
13942 13936 }
13943 13937 ASSERT(rgns == srdp->srd_next_ismrid);
13944 13938 ASSERT(srdp->srd_ismbusyrgns == 0);
13945 13939 ASSERT(srdp->srd_hmebusyrgns == 0);
13946 13940
13947 13941 srdp->srd_next_ismrid = 0;
13948 13942 srdp->srd_next_hmerid = 0;
13949 13943
13950 13944 bzero((void *)srdp->srd_ismrgnp,
13951 13945 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS);
13952 13946 bzero((void *)srdp->srd_hmergnp,
13953 13947 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS);
13954 13948
13955 13949 ASSERT(srdp->srd_scdp == NULL);
13956 13950 kmem_cache_free(srd_cache, srdp);
13957 13951 }
13958 13952
13959 13953 /* ARGSUSED */
13960 13954 static int
13961 13955 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags)
13962 13956 {
13963 13957 sf_srd_t *srdp = (sf_srd_t *)buf;
13964 13958 bzero(buf, sizeof (*srdp));
13965 13959
13966 13960 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL);
13967 13961 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL);
13968 13962 return (0);
13969 13963 }
13970 13964
13971 13965 /* ARGSUSED */
13972 13966 static void
13973 13967 sfmmu_srdcache_destructor(void *buf, void *cdrarg)
13974 13968 {
13975 13969 sf_srd_t *srdp = (sf_srd_t *)buf;
13976 13970
13977 13971 mutex_destroy(&srdp->srd_mutex);
13978 13972 mutex_destroy(&srdp->srd_scd_mutex);
13979 13973 }
13980 13974
13981 13975 /*
13982 13976 * The caller makes sure hat_join_region()/hat_leave_region() can't be called
13983 13977 * at the same time for the same process and address range. This is ensured by
13984 13978 * the fact that address space is locked as writer when a process joins the
13985 13979 * regions. Therefore there's no need to hold an srd lock during the entire
13986 13980 * execution of hat_join_region()/hat_leave_region().
13987 13981 */
13988 13982
13989 13983 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \
13990 13984 (((uintptr_t)(obj)) >> 11)) & \
13991 13985 srd_rgn_hashmask)
13992 13986 /*
13993 13987 * This routine implements the shared context functionality required when
13994 13988 * attaching a segment to an address space. It must be called from
13995 13989 * hat_share() for D(ISM) segments and from segvn_create() for segments
13996 13990 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13997 13991 * which is saved in the private segment data for hme segments and
13998 13992 * the ism_map structure for ism segments.
13999 13993 */
14000 13994 hat_region_cookie_t
14001 13995 hat_join_region(struct hat *sfmmup,
14002 13996 caddr_t r_saddr,
14003 13997 size_t r_size,
14004 13998 void *r_obj,
14005 13999 u_offset_t r_objoff,
14006 14000 uchar_t r_perm,
14007 14001 uchar_t r_pgszc,
14008 14002 hat_rgn_cb_func_t r_cb_function,
14009 14003 uint_t flags)
14010 14004 {
14011 14005 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14012 14006 uint_t rhash;
14013 14007 uint_t rid;
14014 14008 hatlock_t *hatlockp;
14015 14009 sf_region_t *rgnp;
14016 14010 sf_region_t *new_rgnp = NULL;
14017 14011 int i;
14018 14012 uint16_t *nextidp;
14019 14013 sf_region_t **freelistp;
14020 14014 int maxids;
14021 14015 sf_region_t **rarrp;
14022 14016 uint16_t *busyrgnsp;
14023 14017 ulong_t rttecnt;
↓ open down ↓ |
6034 lines elided |
↑ open up ↑ |
14024 14018 uchar_t tteflag;
14025 14019 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14026 14020 int text = (r_type == HAT_REGION_TEXT);
14027 14021
14028 14022 if (srdp == NULL || r_size == 0) {
14029 14023 return (HAT_INVALID_REGION_COOKIE);
14030 14024 }
14031 14025
14032 14026 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14033 14027 ASSERT(sfmmup != ksfmmup);
14034 - ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14028 + ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
14035 14029 ASSERT(srdp->srd_refcnt > 0);
14036 14030 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14037 14031 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14038 14032 ASSERT(r_pgszc < mmu_page_sizes);
14039 14033 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
14040 14034 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
14041 14035 panic("hat_join_region: region addr or size is not aligned\n");
14042 14036 }
14043 14037
14044 14038
14045 14039 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14046 14040 SFMMU_REGION_HME;
14047 14041 /*
14048 14042 * Currently only support shared hmes for the read only main text
14049 14043 * region.
14050 14044 */
14051 14045 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) ||
14052 14046 (r_perm & PROT_WRITE))) {
14053 14047 return (HAT_INVALID_REGION_COOKIE);
14054 14048 }
14055 14049
14056 14050 rhash = RGN_HASH_FUNCTION(r_obj);
14057 14051
14058 14052 if (r_type == SFMMU_REGION_ISM) {
14059 14053 nextidp = &srdp->srd_next_ismrid;
14060 14054 freelistp = &srdp->srd_ismrgnfree;
14061 14055 maxids = SFMMU_MAX_ISM_REGIONS;
14062 14056 rarrp = srdp->srd_ismrgnp;
14063 14057 busyrgnsp = &srdp->srd_ismbusyrgns;
14064 14058 } else {
14065 14059 nextidp = &srdp->srd_next_hmerid;
14066 14060 freelistp = &srdp->srd_hmergnfree;
14067 14061 maxids = SFMMU_MAX_HME_REGIONS;
14068 14062 rarrp = srdp->srd_hmergnp;
14069 14063 busyrgnsp = &srdp->srd_hmebusyrgns;
14070 14064 }
14071 14065
14072 14066 mutex_enter(&srdp->srd_mutex);
14073 14067
14074 14068 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14075 14069 rgnp = rgnp->rgn_hash) {
14076 14070 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size &&
14077 14071 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff &&
14078 14072 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) {
14079 14073 break;
14080 14074 }
14081 14075 }
14082 14076
14083 14077 rfound:
14084 14078 if (rgnp != NULL) {
14085 14079 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14086 14080 ASSERT(rgnp->rgn_cb_function == r_cb_function);
14087 14081 ASSERT(rgnp->rgn_refcnt >= 0);
14088 14082 rid = rgnp->rgn_id;
14089 14083 ASSERT(rid < maxids);
14090 14084 ASSERT(rarrp[rid] == rgnp);
14091 14085 ASSERT(rid < *nextidp);
14092 14086 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14093 14087 mutex_exit(&srdp->srd_mutex);
14094 14088 if (new_rgnp != NULL) {
14095 14089 kmem_cache_free(region_cache, new_rgnp);
14096 14090 }
14097 14091 if (r_type == SFMMU_REGION_HME) {
14098 14092 int myjoin =
14099 14093 (sfmmup == astosfmmu(curthread->t_procp->p_as));
14100 14094
14101 14095 sfmmu_link_to_hmeregion(sfmmup, rgnp);
14102 14096 /*
14103 14097 * bitmap should be updated after linking sfmmu on
14104 14098 * region list so that pageunload() doesn't skip
14105 14099 * TSB/TLB flush. As soon as bitmap is updated another
14106 14100 * thread in this process can already start accessing
14107 14101 * this region.
14108 14102 */
14109 14103 /*
14110 14104 * Normally ttecnt accounting is done as part of
14111 14105 * pagefault handling. But a process may not take any
14112 14106 * pagefaults on shared hmeblks created by some other
14113 14107 * process. To compensate for this assume that the
14114 14108 * entire region will end up faulted in using
14115 14109 * the region's pagesize.
14116 14110 *
14117 14111 */
14118 14112 if (r_pgszc > TTE8K) {
14119 14113 tteflag = 1 << r_pgszc;
14120 14114 if (disable_large_pages & tteflag) {
14121 14115 tteflag = 0;
14122 14116 }
14123 14117 } else {
14124 14118 tteflag = 0;
14125 14119 }
14126 14120 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) {
14127 14121 hatlockp = sfmmu_hat_enter(sfmmup);
14128 14122 sfmmup->sfmmu_rtteflags |= tteflag;
14129 14123 sfmmu_hat_exit(hatlockp);
14130 14124 }
14131 14125 hatlockp = sfmmu_hat_enter(sfmmup);
14132 14126
14133 14127 /*
14134 14128 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M
14135 14129 * region to allow for large page allocation failure.
14136 14130 */
14137 14131 if (r_pgszc >= TTE4M) {
14138 14132 sfmmup->sfmmu_tsb0_4minflcnt +=
14139 14133 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14140 14134 }
14141 14135
14142 14136 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14143 14137 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14144 14138 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14145 14139 rttecnt);
14146 14140
14147 14141 if (text && r_pgszc >= TTE4M &&
14148 14142 (tteflag || ((disable_large_pages >> TTE4M) &
14149 14143 ((1 << (r_pgszc - TTE4M + 1)) - 1))) &&
14150 14144 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) {
14151 14145 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG);
14152 14146 }
14153 14147
14154 14148 sfmmu_hat_exit(hatlockp);
14155 14149 /*
14156 14150 * On Panther we need to make sure TLB is programmed
14157 14151 * to accept 32M/256M pages. Call
14158 14152 * sfmmu_check_page_sizes() now to make sure TLB is
14159 14153 * setup before making hmeregions visible to other
14160 14154 * threads.
14161 14155 */
14162 14156 sfmmu_check_page_sizes(sfmmup, 1);
14163 14157 hatlockp = sfmmu_hat_enter(sfmmup);
14164 14158 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14165 14159
14166 14160 /*
14167 14161 * if context is invalid tsb miss exception code will
14168 14162 * call sfmmu_check_page_sizes() and update tsbmiss
14169 14163 * area later.
14170 14164 */
14171 14165 kpreempt_disable();
14172 14166 if (myjoin &&
14173 14167 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum
14174 14168 != INVALID_CONTEXT)) {
14175 14169 struct tsbmiss *tsbmp;
14176 14170
14177 14171 tsbmp = &tsbmiss_area[CPU->cpu_id];
14178 14172 ASSERT(sfmmup == tsbmp->usfmmup);
14179 14173 BT_SET(tsbmp->shmermap, rid);
14180 14174 if (r_pgszc > TTE64K) {
14181 14175 tsbmp->uhat_rtteflags |= tteflag;
14182 14176 }
14183 14177
14184 14178 }
14185 14179 kpreempt_enable();
14186 14180
14187 14181 sfmmu_hat_exit(hatlockp);
14188 14182 ASSERT((hat_region_cookie_t)((uint64_t)rid) !=
14189 14183 HAT_INVALID_REGION_COOKIE);
14190 14184 } else {
14191 14185 hatlockp = sfmmu_hat_enter(sfmmup);
14192 14186 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid);
14193 14187 sfmmu_hat_exit(hatlockp);
14194 14188 }
14195 14189 ASSERT(rid < maxids);
14196 14190
14197 14191 if (r_type == SFMMU_REGION_ISM) {
14198 14192 sfmmu_find_scd(sfmmup);
14199 14193 }
14200 14194 return ((hat_region_cookie_t)((uint64_t)rid));
14201 14195 }
14202 14196
14203 14197 ASSERT(new_rgnp == NULL);
14204 14198
14205 14199 if (*busyrgnsp >= maxids) {
14206 14200 mutex_exit(&srdp->srd_mutex);
14207 14201 return (HAT_INVALID_REGION_COOKIE);
14208 14202 }
14209 14203
14210 14204 ASSERT(MUTEX_HELD(&srdp->srd_mutex));
14211 14205 if (*freelistp != NULL) {
14212 14206 rgnp = *freelistp;
14213 14207 *freelistp = rgnp->rgn_next;
14214 14208 ASSERT(rgnp->rgn_id < *nextidp);
14215 14209 ASSERT(rgnp->rgn_id < maxids);
14216 14210 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE);
14217 14211 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK)
14218 14212 == r_type);
14219 14213 ASSERT(rarrp[rgnp->rgn_id] == rgnp);
14220 14214 ASSERT(rgnp->rgn_hmeflags == 0);
14221 14215 } else {
14222 14216 /*
14223 14217 * release local locks before memory allocation.
14224 14218 */
14225 14219 mutex_exit(&srdp->srd_mutex);
14226 14220
14227 14221 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP);
14228 14222
14229 14223 mutex_enter(&srdp->srd_mutex);
14230 14224 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL;
14231 14225 rgnp = rgnp->rgn_hash) {
14232 14226 if (rgnp->rgn_saddr == r_saddr &&
14233 14227 rgnp->rgn_size == r_size &&
14234 14228 rgnp->rgn_obj == r_obj &&
14235 14229 rgnp->rgn_objoff == r_objoff &&
14236 14230 rgnp->rgn_perm == r_perm &&
14237 14231 rgnp->rgn_pgszc == r_pgszc) {
14238 14232 break;
14239 14233 }
14240 14234 }
14241 14235 if (rgnp != NULL) {
14242 14236 goto rfound;
14243 14237 }
14244 14238
14245 14239 if (*nextidp >= maxids) {
14246 14240 mutex_exit(&srdp->srd_mutex);
14247 14241 goto fail;
14248 14242 }
14249 14243 rgnp = new_rgnp;
14250 14244 new_rgnp = NULL;
14251 14245 rgnp->rgn_id = (*nextidp)++;
14252 14246 ASSERT(rgnp->rgn_id < maxids);
14253 14247 ASSERT(rarrp[rgnp->rgn_id] == NULL);
14254 14248 rarrp[rgnp->rgn_id] = rgnp;
14255 14249 }
14256 14250
14257 14251 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14258 14252 ASSERT(rgnp->rgn_hmeflags == 0);
14259 14253 #ifdef DEBUG
14260 14254 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14261 14255 ASSERT(rgnp->rgn_ttecnt[i] == 0);
14262 14256 }
14263 14257 #endif
14264 14258 rgnp->rgn_saddr = r_saddr;
14265 14259 rgnp->rgn_size = r_size;
14266 14260 rgnp->rgn_obj = r_obj;
14267 14261 rgnp->rgn_objoff = r_objoff;
14268 14262 rgnp->rgn_perm = r_perm;
14269 14263 rgnp->rgn_pgszc = r_pgszc;
14270 14264 rgnp->rgn_flags = r_type;
14271 14265 rgnp->rgn_refcnt = 0;
14272 14266 rgnp->rgn_cb_function = r_cb_function;
14273 14267 rgnp->rgn_hash = srdp->srd_rgnhash[rhash];
14274 14268 srdp->srd_rgnhash[rhash] = rgnp;
14275 14269 (*busyrgnsp)++;
14276 14270 ASSERT(*busyrgnsp <= maxids);
14277 14271 goto rfound;
14278 14272
14279 14273 fail:
14280 14274 ASSERT(new_rgnp != NULL);
14281 14275 kmem_cache_free(region_cache, new_rgnp);
14282 14276 return (HAT_INVALID_REGION_COOKIE);
14283 14277 }
14284 14278
14285 14279 /*
14286 14280 * This function implements the shared context functionality required
14287 14281 * when detaching a segment from an address space. It must be called
14288 14282 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(),
14289 14283 * for segments with a valid region_cookie.
14290 14284 * It will also be called from all seg_vn routines which change a
14291 14285 * segment's attributes such as segvn_setprot(), segvn_setpagesize(),
14292 14286 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault
14293 14287 * from segvn_fault().
14294 14288 */
14295 14289 void
14296 14290 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags)
14297 14291 {
14298 14292 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14299 14293 sf_scd_t *scdp;
14300 14294 uint_t rhash;
14301 14295 uint_t rid = (uint_t)((uint64_t)rcookie);
14302 14296 hatlock_t *hatlockp = NULL;
14303 14297 sf_region_t *rgnp;
14304 14298 sf_region_t **prev_rgnpp;
14305 14299 sf_region_t *cur_rgnp;
14306 14300 void *r_obj;
14307 14301 int i;
14308 14302 caddr_t r_saddr;
14309 14303 caddr_t r_eaddr;
14310 14304 size_t r_size;
14311 14305 uchar_t r_pgszc;
14312 14306 uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14313 14307
14314 14308 ASSERT(sfmmup != ksfmmup);
14315 14309 ASSERT(srdp != NULL);
14316 14310 ASSERT(srdp->srd_refcnt > 0);
14317 14311 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14318 14312 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14319 14313 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL);
14320 14314
14321 14315 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM :
14322 14316 SFMMU_REGION_HME;
14323 14317
14324 14318 if (r_type == SFMMU_REGION_ISM) {
14325 14319 ASSERT(SFMMU_IS_ISMRID_VALID(rid));
14326 14320 ASSERT(rid < SFMMU_MAX_ISM_REGIONS);
↓ open down ↓ |
282 lines elided |
↑ open up ↑ |
14327 14321 rgnp = srdp->srd_ismrgnp[rid];
14328 14322 } else {
14329 14323 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14330 14324 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14331 14325 rgnp = srdp->srd_hmergnp[rid];
14332 14326 }
14333 14327 ASSERT(rgnp != NULL);
14334 14328 ASSERT(rgnp->rgn_id == rid);
14335 14329 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14336 14330 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14337 - ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14331 + ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
14338 14332
14339 14333 ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14340 14334 if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
14341 14335 xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
14342 14336 rgnp->rgn_size, 0, NULL);
14343 14337 }
14344 14338
14345 14339 if (sfmmup->sfmmu_free) {
14346 14340 ulong_t rttecnt;
14347 14341 r_pgszc = rgnp->rgn_pgszc;
14348 14342 r_size = rgnp->rgn_size;
14349 14343
14350 14344 ASSERT(sfmmup->sfmmu_scdp == NULL);
14351 14345 if (r_type == SFMMU_REGION_ISM) {
14352 14346 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14353 14347 } else {
14354 14348 /* update shme rgns ttecnt in sfmmu_ttecnt */
14355 14349 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14356 14350 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14357 14351
14358 14352 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc],
14359 14353 -rttecnt);
14360 14354
14361 14355 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14362 14356 }
14363 14357 } else if (r_type == SFMMU_REGION_ISM) {
14364 14358 hatlockp = sfmmu_hat_enter(sfmmup);
14365 14359 ASSERT(rid < srdp->srd_next_ismrid);
14366 14360 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14367 14361 scdp = sfmmup->sfmmu_scdp;
14368 14362 if (scdp != NULL &&
14369 14363 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) {
14370 14364 sfmmu_leave_scd(sfmmup, r_type);
14371 14365 ASSERT(sfmmu_hat_lock_held(sfmmup));
14372 14366 }
14373 14367 sfmmu_hat_exit(hatlockp);
14374 14368 } else {
14375 14369 ulong_t rttecnt;
14376 14370 r_pgszc = rgnp->rgn_pgszc;
14377 14371 r_saddr = rgnp->rgn_saddr;
14378 14372 r_size = rgnp->rgn_size;
14379 14373 r_eaddr = r_saddr + r_size;
14380 14374
14381 14375 ASSERT(r_type == SFMMU_REGION_HME);
14382 14376 hatlockp = sfmmu_hat_enter(sfmmup);
14383 14377 ASSERT(rid < srdp->srd_next_hmerid);
14384 14378 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid);
14385 14379
14386 14380 /*
14387 14381 * If region is part of an SCD call sfmmu_leave_scd().
14388 14382 * Otherwise if process is not exiting and has valid context
14389 14383 * just drop the context on the floor to lose stale TLB
14390 14384 * entries and force the update of tsb miss area to reflect
14391 14385 * the new region map. After that clean our TSB entries.
14392 14386 */
14393 14387 scdp = sfmmup->sfmmu_scdp;
14394 14388 if (scdp != NULL &&
14395 14389 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) {
14396 14390 sfmmu_leave_scd(sfmmup, r_type);
14397 14391 ASSERT(sfmmu_hat_lock_held(sfmmup));
14398 14392 }
14399 14393 sfmmu_invalidate_ctx(sfmmup);
14400 14394
14401 14395 i = TTE8K;
14402 14396 while (i < mmu_page_sizes) {
14403 14397 if (rgnp->rgn_ttecnt[i] != 0) {
14404 14398 sfmmu_unload_tsb_range(sfmmup, r_saddr,
14405 14399 r_eaddr, i);
14406 14400 if (i < TTE4M) {
14407 14401 i = TTE4M;
14408 14402 continue;
14409 14403 } else {
14410 14404 break;
14411 14405 }
14412 14406 }
14413 14407 i++;
14414 14408 }
14415 14409 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */
14416 14410 if (r_pgszc >= TTE4M) {
14417 14411 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14418 14412 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
14419 14413 rttecnt);
14420 14414 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt;
14421 14415 }
14422 14416
14423 14417 /* update shme rgns ttecnt in sfmmu_ttecnt */
14424 14418 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc);
14425 14419 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt);
14426 14420 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt);
14427 14421
14428 14422 sfmmu_hat_exit(hatlockp);
14429 14423 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) {
14430 14424 /* sfmmup left the scd, grow private tsb */
14431 14425 sfmmu_check_page_sizes(sfmmup, 1);
14432 14426 } else {
14433 14427 sfmmu_check_page_sizes(sfmmup, 0);
14434 14428 }
14435 14429 }
14436 14430
14437 14431 if (r_type == SFMMU_REGION_HME) {
14438 14432 sfmmu_unlink_from_hmeregion(sfmmup, rgnp);
14439 14433 }
14440 14434
14441 14435 r_obj = rgnp->rgn_obj;
14442 14436 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) {
14443 14437 return;
14444 14438 }
14445 14439
14446 14440 /*
14447 14441 * looks like nobody uses this region anymore. Free it.
14448 14442 */
14449 14443 rhash = RGN_HASH_FUNCTION(r_obj);
14450 14444 mutex_enter(&srdp->srd_mutex);
14451 14445 for (prev_rgnpp = &srdp->srd_rgnhash[rhash];
14452 14446 (cur_rgnp = *prev_rgnpp) != NULL;
14453 14447 prev_rgnpp = &cur_rgnp->rgn_hash) {
14454 14448 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) {
14455 14449 break;
14456 14450 }
14457 14451 }
14458 14452
14459 14453 if (cur_rgnp == NULL) {
14460 14454 mutex_exit(&srdp->srd_mutex);
14461 14455 return;
14462 14456 }
14463 14457
14464 14458 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14465 14459 *prev_rgnpp = rgnp->rgn_hash;
14466 14460 if (r_type == SFMMU_REGION_ISM) {
14467 14461 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14468 14462 ASSERT(rid < srdp->srd_next_ismrid);
14469 14463 rgnp->rgn_next = srdp->srd_ismrgnfree;
14470 14464 srdp->srd_ismrgnfree = rgnp;
14471 14465 ASSERT(srdp->srd_ismbusyrgns > 0);
14472 14466 srdp->srd_ismbusyrgns--;
14473 14467 mutex_exit(&srdp->srd_mutex);
14474 14468 return;
14475 14469 }
14476 14470 mutex_exit(&srdp->srd_mutex);
14477 14471
14478 14472 /*
14479 14473 * Destroy region's hmeblks.
14480 14474 */
14481 14475 sfmmu_unload_hmeregion(srdp, rgnp);
14482 14476
14483 14477 rgnp->rgn_hmeflags = 0;
14484 14478
14485 14479 ASSERT(rgnp->rgn_sfmmu_head == NULL);
14486 14480 ASSERT(rgnp->rgn_id == rid);
14487 14481 for (i = 0; i < MMU_PAGE_SIZES; i++) {
14488 14482 rgnp->rgn_ttecnt[i] = 0;
14489 14483 }
14490 14484 rgnp->rgn_flags |= SFMMU_REGION_FREE;
14491 14485 mutex_enter(&srdp->srd_mutex);
14492 14486 ASSERT(rid < srdp->srd_next_hmerid);
14493 14487 rgnp->rgn_next = srdp->srd_hmergnfree;
14494 14488 srdp->srd_hmergnfree = rgnp;
14495 14489 ASSERT(srdp->srd_hmebusyrgns > 0);
14496 14490 srdp->srd_hmebusyrgns--;
14497 14491 mutex_exit(&srdp->srd_mutex);
14498 14492 }
14499 14493
14500 14494 /*
14501 14495 * For now only called for hmeblk regions and not for ISM regions.
14502 14496 */
14503 14497 void
14504 14498 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie)
14505 14499 {
14506 14500 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14507 14501 uint_t rid = (uint_t)((uint64_t)rcookie);
14508 14502 sf_region_t *rgnp;
14509 14503 sf_rgn_link_t *rlink;
14510 14504 sf_rgn_link_t *hrlink;
14511 14505 ulong_t rttecnt;
14512 14506
14513 14507 ASSERT(sfmmup != ksfmmup);
14514 14508 ASSERT(srdp != NULL);
14515 14509 ASSERT(srdp->srd_refcnt > 0);
14516 14510
14517 14511 ASSERT(rid < srdp->srd_next_hmerid);
14518 14512 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14519 14513 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14520 14514
14521 14515 rgnp = srdp->srd_hmergnp[rid];
14522 14516 ASSERT(rgnp->rgn_refcnt > 0);
14523 14517 ASSERT(rgnp->rgn_id == rid);
14524 14518 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME);
14525 14519 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14526 14520
14527 14521 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt);
14528 14522
14529 14523 /* LINTED: constant in conditional context */
14530 14524 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0);
14531 14525 ASSERT(rlink != NULL);
14532 14526 mutex_enter(&rgnp->rgn_mutex);
14533 14527 ASSERT(rgnp->rgn_sfmmu_head != NULL);
14534 14528 /* LINTED: constant in conditional context */
14535 14529 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0);
14536 14530 ASSERT(hrlink != NULL);
14537 14531 ASSERT(hrlink->prev == NULL);
14538 14532 rlink->next = rgnp->rgn_sfmmu_head;
14539 14533 rlink->prev = NULL;
14540 14534 hrlink->prev = sfmmup;
14541 14535 /*
14542 14536 * make sure rlink's next field is correct
14543 14537 * before making this link visible.
14544 14538 */
14545 14539 membar_stst();
14546 14540 rgnp->rgn_sfmmu_head = sfmmup;
14547 14541 mutex_exit(&rgnp->rgn_mutex);
14548 14542
14549 14543 /* update sfmmu_ttecnt with the shme rgn ttecnt */
14550 14544 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc);
14551 14545 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt);
14552 14546 /* update tsb0 inflation count */
14553 14547 if (rgnp->rgn_pgszc >= TTE4M) {
14554 14548 sfmmup->sfmmu_tsb0_4minflcnt +=
14555 14549 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2);
14556 14550 }
14557 14551 /*
14558 14552 * Update regionid bitmask without hat lock since no other thread
14559 14553 * can update this region bitmask right now.
14560 14554 */
14561 14555 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid);
14562 14556 }
14563 14557
14564 14558 /* ARGSUSED */
14565 14559 static int
14566 14560 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags)
14567 14561 {
14568 14562 sf_region_t *rgnp = (sf_region_t *)buf;
14569 14563 bzero(buf, sizeof (*rgnp));
14570 14564
14571 14565 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL);
14572 14566
14573 14567 return (0);
14574 14568 }
14575 14569
14576 14570 /* ARGSUSED */
14577 14571 static void
14578 14572 sfmmu_rgncache_destructor(void *buf, void *cdrarg)
14579 14573 {
14580 14574 sf_region_t *rgnp = (sf_region_t *)buf;
14581 14575 mutex_destroy(&rgnp->rgn_mutex);
14582 14576 }
14583 14577
14584 14578 static int
14585 14579 sfrgnmap_isnull(sf_region_map_t *map)
14586 14580 {
14587 14581 int i;
14588 14582
14589 14583 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14590 14584 if (map->bitmap[i] != 0) {
14591 14585 return (0);
14592 14586 }
14593 14587 }
14594 14588 return (1);
14595 14589 }
14596 14590
14597 14591 static int
14598 14592 sfhmergnmap_isnull(sf_hmeregion_map_t *map)
14599 14593 {
14600 14594 int i;
14601 14595
14602 14596 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) {
14603 14597 if (map->bitmap[i] != 0) {
14604 14598 return (0);
14605 14599 }
14606 14600 }
14607 14601 return (1);
14608 14602 }
14609 14603
14610 14604 #ifdef DEBUG
14611 14605 static void
14612 14606 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist)
14613 14607 {
14614 14608 sfmmu_t *sp;
14615 14609 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
14616 14610
14617 14611 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) {
14618 14612 ASSERT(srdp == sp->sfmmu_srdp);
14619 14613 if (sp == sfmmup) {
14620 14614 if (onlist) {
14621 14615 return;
14622 14616 } else {
14623 14617 panic("shctx: sfmmu 0x%p found on scd"
14624 14618 "list 0x%p", (void *)sfmmup,
14625 14619 (void *)*headp);
14626 14620 }
14627 14621 }
14628 14622 }
14629 14623 if (onlist) {
14630 14624 panic("shctx: sfmmu 0x%p not found on scd list 0x%p",
14631 14625 (void *)sfmmup, (void *)*headp);
14632 14626 } else {
14633 14627 return;
14634 14628 }
14635 14629 }
14636 14630 #else /* DEBUG */
14637 14631 #define check_scd_sfmmu_list(headp, sfmmup, onlist)
14638 14632 #endif /* DEBUG */
14639 14633
14640 14634 /*
14641 14635 * Removes an sfmmu from the SCD sfmmu list.
14642 14636 */
14643 14637 static void
14644 14638 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14645 14639 {
14646 14640 ASSERT(sfmmup->sfmmu_srdp != NULL);
14647 14641 check_scd_sfmmu_list(headp, sfmmup, 1);
14648 14642 if (sfmmup->sfmmu_scd_link.prev != NULL) {
14649 14643 ASSERT(*headp != sfmmup);
14650 14644 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next =
14651 14645 sfmmup->sfmmu_scd_link.next;
14652 14646 } else {
14653 14647 ASSERT(*headp == sfmmup);
14654 14648 *headp = sfmmup->sfmmu_scd_link.next;
14655 14649 }
14656 14650 if (sfmmup->sfmmu_scd_link.next != NULL) {
14657 14651 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev =
14658 14652 sfmmup->sfmmu_scd_link.prev;
14659 14653 }
14660 14654 }
14661 14655
14662 14656
14663 14657 /*
14664 14658 * Adds an sfmmu to the start of the queue.
14665 14659 */
14666 14660 static void
14667 14661 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup)
14668 14662 {
14669 14663 check_scd_sfmmu_list(headp, sfmmup, 0);
14670 14664 sfmmup->sfmmu_scd_link.prev = NULL;
14671 14665 sfmmup->sfmmu_scd_link.next = *headp;
14672 14666 if (*headp != NULL)
14673 14667 (*headp)->sfmmu_scd_link.prev = sfmmup;
14674 14668 *headp = sfmmup;
14675 14669 }
14676 14670
14677 14671 /*
14678 14672 * Remove an scd from the start of the queue.
14679 14673 */
14680 14674 static void
14681 14675 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp)
14682 14676 {
14683 14677 if (scdp->scd_prev != NULL) {
14684 14678 ASSERT(*headp != scdp);
14685 14679 scdp->scd_prev->scd_next = scdp->scd_next;
14686 14680 } else {
14687 14681 ASSERT(*headp == scdp);
14688 14682 *headp = scdp->scd_next;
14689 14683 }
14690 14684
14691 14685 if (scdp->scd_next != NULL) {
14692 14686 scdp->scd_next->scd_prev = scdp->scd_prev;
14693 14687 }
14694 14688 }
14695 14689
14696 14690 /*
14697 14691 * Add an scd to the start of the queue.
14698 14692 */
14699 14693 static void
14700 14694 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp)
14701 14695 {
14702 14696 scdp->scd_prev = NULL;
14703 14697 scdp->scd_next = *headp;
14704 14698 if (*headp != NULL) {
14705 14699 (*headp)->scd_prev = scdp;
14706 14700 }
14707 14701 *headp = scdp;
14708 14702 }
14709 14703
14710 14704 static int
14711 14705 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp)
14712 14706 {
14713 14707 uint_t rid;
14714 14708 uint_t i;
14715 14709 uint_t j;
14716 14710 ulong_t w;
14717 14711 sf_region_t *rgnp;
14718 14712 ulong_t tte8k_cnt = 0;
14719 14713 ulong_t tte4m_cnt = 0;
14720 14714 uint_t tsb_szc;
14721 14715 sfmmu_t *scsfmmup = scdp->scd_sfmmup;
14722 14716 sfmmu_t *ism_hatid;
14723 14717 struct tsb_info *newtsb;
14724 14718 int szc;
14725 14719
14726 14720 ASSERT(srdp != NULL);
14727 14721
14728 14722 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14729 14723 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14730 14724 continue;
14731 14725 }
14732 14726 j = 0;
14733 14727 while (w) {
14734 14728 if (!(w & 0x1)) {
14735 14729 j++;
14736 14730 w >>= 1;
14737 14731 continue;
14738 14732 }
14739 14733 rid = (i << BT_ULSHIFT) | j;
14740 14734 j++;
14741 14735 w >>= 1;
14742 14736
14743 14737 if (rid < SFMMU_MAX_HME_REGIONS) {
14744 14738 rgnp = srdp->srd_hmergnp[rid];
14745 14739 ASSERT(rgnp->rgn_id == rid);
14746 14740 ASSERT(rgnp->rgn_refcnt > 0);
14747 14741
14748 14742 if (rgnp->rgn_pgszc < TTE4M) {
14749 14743 tte8k_cnt += rgnp->rgn_size >>
14750 14744 TTE_PAGE_SHIFT(TTE8K);
14751 14745 } else {
14752 14746 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14753 14747 tte4m_cnt += rgnp->rgn_size >>
14754 14748 TTE_PAGE_SHIFT(TTE4M);
14755 14749 /*
14756 14750 * Inflate SCD tsb0 by preallocating
14757 14751 * 1/4 8k ttecnt for 4M regions to
14758 14752 * allow for lgpg alloc failure.
14759 14753 */
14760 14754 tte8k_cnt += rgnp->rgn_size >>
14761 14755 (TTE_PAGE_SHIFT(TTE8K) + 2);
14762 14756 }
14763 14757 } else {
14764 14758 rid -= SFMMU_MAX_HME_REGIONS;
14765 14759 rgnp = srdp->srd_ismrgnp[rid];
14766 14760 ASSERT(rgnp->rgn_id == rid);
14767 14761 ASSERT(rgnp->rgn_refcnt > 0);
14768 14762
14769 14763 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14770 14764 ASSERT(ism_hatid->sfmmu_ismhat);
14771 14765
14772 14766 for (szc = 0; szc < TTE4M; szc++) {
14773 14767 tte8k_cnt +=
14774 14768 ism_hatid->sfmmu_ttecnt[szc] <<
14775 14769 TTE_BSZS_SHIFT(szc);
14776 14770 }
14777 14771
14778 14772 ASSERT(rgnp->rgn_pgszc >= TTE4M);
14779 14773 if (rgnp->rgn_pgszc >= TTE4M) {
14780 14774 tte4m_cnt += rgnp->rgn_size >>
14781 14775 TTE_PAGE_SHIFT(TTE4M);
14782 14776 }
14783 14777 }
14784 14778 }
14785 14779 }
14786 14780
14787 14781 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt);
14788 14782
14789 14783 /* Allocate both the SCD TSBs here. */
14790 14784 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14791 14785 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) &&
14792 14786 (tsb_szc <= TSB_4M_SZCODE ||
14793 14787 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb,
14794 14788 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K,
14795 14789 TSB_ALLOC, scsfmmup))) {
14796 14790
14797 14791 SFMMU_STAT(sf_scd_1sttsb_allocfail);
14798 14792 return (TSB_ALLOCFAIL);
14799 14793 } else {
14800 14794 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX;
14801 14795
14802 14796 if (tte4m_cnt) {
14803 14797 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt);
14804 14798 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc,
14805 14799 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) &&
14806 14800 (tsb_szc <= TSB_4M_SZCODE ||
14807 14801 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE,
14808 14802 TSB4M|TSB32M|TSB256M,
14809 14803 TSB_ALLOC, scsfmmup))) {
14810 14804 /*
14811 14805 * If we fail to allocate the 2nd shared tsb,
14812 14806 * just free the 1st tsb, return failure.
14813 14807 */
14814 14808 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb);
14815 14809 SFMMU_STAT(sf_scd_2ndtsb_allocfail);
14816 14810 return (TSB_ALLOCFAIL);
14817 14811 } else {
14818 14812 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL);
14819 14813 newtsb->tsb_flags |= TSB_SHAREDCTX;
14820 14814 scsfmmup->sfmmu_tsb->tsb_next = newtsb;
14821 14815 SFMMU_STAT(sf_scd_2ndtsb_alloc);
14822 14816 }
14823 14817 }
14824 14818 SFMMU_STAT(sf_scd_1sttsb_alloc);
14825 14819 }
14826 14820 return (TSB_SUCCESS);
14827 14821 }
14828 14822
14829 14823 static void
14830 14824 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu)
14831 14825 {
14832 14826 while (scd_sfmmu->sfmmu_tsb != NULL) {
14833 14827 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next;
14834 14828 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb);
14835 14829 scd_sfmmu->sfmmu_tsb = next;
14836 14830 }
14837 14831 }
14838 14832
14839 14833 /*
14840 14834 * Link the sfmmu onto the hme region list.
14841 14835 */
14842 14836 void
14843 14837 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14844 14838 {
14845 14839 uint_t rid;
14846 14840 sf_rgn_link_t *rlink;
14847 14841 sfmmu_t *head;
14848 14842 sf_rgn_link_t *hrlink;
14849 14843
14850 14844 rid = rgnp->rgn_id;
14851 14845 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14852 14846
14853 14847 /* LINTED: constant in conditional context */
14854 14848 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1);
14855 14849 ASSERT(rlink != NULL);
14856 14850 mutex_enter(&rgnp->rgn_mutex);
14857 14851 if ((head = rgnp->rgn_sfmmu_head) == NULL) {
14858 14852 rlink->next = NULL;
14859 14853 rlink->prev = NULL;
14860 14854 /*
14861 14855 * make sure rlink's next field is NULL
14862 14856 * before making this link visible.
14863 14857 */
14864 14858 membar_stst();
14865 14859 rgnp->rgn_sfmmu_head = sfmmup;
14866 14860 } else {
14867 14861 /* LINTED: constant in conditional context */
14868 14862 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0);
14869 14863 ASSERT(hrlink != NULL);
14870 14864 ASSERT(hrlink->prev == NULL);
14871 14865 rlink->next = head;
14872 14866 rlink->prev = NULL;
14873 14867 hrlink->prev = sfmmup;
14874 14868 /*
14875 14869 * make sure rlink's next field is correct
14876 14870 * before making this link visible.
14877 14871 */
14878 14872 membar_stst();
14879 14873 rgnp->rgn_sfmmu_head = sfmmup;
14880 14874 }
14881 14875 mutex_exit(&rgnp->rgn_mutex);
14882 14876 }
14883 14877
14884 14878 /*
14885 14879 * Unlink the sfmmu from the hme region list.
14886 14880 */
14887 14881 void
14888 14882 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp)
14889 14883 {
14890 14884 uint_t rid;
14891 14885 sf_rgn_link_t *rlink;
14892 14886
14893 14887 rid = rgnp->rgn_id;
14894 14888 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14895 14889
14896 14890 /* LINTED: constant in conditional context */
14897 14891 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0);
14898 14892 ASSERT(rlink != NULL);
14899 14893 mutex_enter(&rgnp->rgn_mutex);
14900 14894 if (rgnp->rgn_sfmmu_head == sfmmup) {
14901 14895 sfmmu_t *next = rlink->next;
14902 14896 rgnp->rgn_sfmmu_head = next;
14903 14897 /*
14904 14898 * if we are stopped by xc_attention() after this
14905 14899 * point the forward link walking in
14906 14900 * sfmmu_rgntlb_demap() will work correctly since the
14907 14901 * head correctly points to the next element.
14908 14902 */
14909 14903 membar_stst();
14910 14904 rlink->next = NULL;
14911 14905 ASSERT(rlink->prev == NULL);
14912 14906 if (next != NULL) {
14913 14907 sf_rgn_link_t *nrlink;
14914 14908 /* LINTED: constant in conditional context */
14915 14909 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14916 14910 ASSERT(nrlink != NULL);
14917 14911 ASSERT(nrlink->prev == sfmmup);
14918 14912 nrlink->prev = NULL;
14919 14913 }
14920 14914 } else {
14921 14915 sfmmu_t *next = rlink->next;
14922 14916 sfmmu_t *prev = rlink->prev;
14923 14917 sf_rgn_link_t *prlink;
14924 14918
14925 14919 ASSERT(prev != NULL);
14926 14920 /* LINTED: constant in conditional context */
14927 14921 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0);
14928 14922 ASSERT(prlink != NULL);
14929 14923 ASSERT(prlink->next == sfmmup);
14930 14924 prlink->next = next;
14931 14925 /*
14932 14926 * if we are stopped by xc_attention()
14933 14927 * after this point the forward link walking
14934 14928 * will work correctly since the prev element
14935 14929 * correctly points to the next element.
14936 14930 */
14937 14931 membar_stst();
14938 14932 rlink->next = NULL;
14939 14933 rlink->prev = NULL;
14940 14934 if (next != NULL) {
14941 14935 sf_rgn_link_t *nrlink;
14942 14936 /* LINTED: constant in conditional context */
14943 14937 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0);
14944 14938 ASSERT(nrlink != NULL);
14945 14939 ASSERT(nrlink->prev == sfmmup);
14946 14940 nrlink->prev = prev;
14947 14941 }
14948 14942 }
14949 14943 mutex_exit(&rgnp->rgn_mutex);
14950 14944 }
14951 14945
14952 14946 /*
14953 14947 * Link scd sfmmu onto ism or hme region list for each region in the
14954 14948 * scd region map.
14955 14949 */
14956 14950 void
14957 14951 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp)
14958 14952 {
14959 14953 uint_t rid;
14960 14954 uint_t i;
14961 14955 uint_t j;
14962 14956 ulong_t w;
14963 14957 sf_region_t *rgnp;
14964 14958 sfmmu_t *scsfmmup;
14965 14959
14966 14960 scsfmmup = scdp->scd_sfmmup;
14967 14961 ASSERT(scsfmmup->sfmmu_scdhat);
14968 14962 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
14969 14963 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
14970 14964 continue;
14971 14965 }
14972 14966 j = 0;
14973 14967 while (w) {
14974 14968 if (!(w & 0x1)) {
14975 14969 j++;
14976 14970 w >>= 1;
14977 14971 continue;
14978 14972 }
14979 14973 rid = (i << BT_ULSHIFT) | j;
14980 14974 j++;
14981 14975 w >>= 1;
14982 14976
14983 14977 if (rid < SFMMU_MAX_HME_REGIONS) {
14984 14978 rgnp = srdp->srd_hmergnp[rid];
14985 14979 ASSERT(rgnp->rgn_id == rid);
14986 14980 ASSERT(rgnp->rgn_refcnt > 0);
14987 14981 sfmmu_link_to_hmeregion(scsfmmup, rgnp);
14988 14982 } else {
14989 14983 sfmmu_t *ism_hatid = NULL;
14990 14984 ism_ment_t *ism_ment;
14991 14985 rid -= SFMMU_MAX_HME_REGIONS;
14992 14986 rgnp = srdp->srd_ismrgnp[rid];
14993 14987 ASSERT(rgnp->rgn_id == rid);
14994 14988 ASSERT(rgnp->rgn_refcnt > 0);
14995 14989
14996 14990 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
14997 14991 ASSERT(ism_hatid->sfmmu_ismhat);
14998 14992 ism_ment = &scdp->scd_ism_links[rid];
14999 14993 ism_ment->iment_hat = scsfmmup;
15000 14994 ism_ment->iment_base_va = rgnp->rgn_saddr;
15001 14995 mutex_enter(&ism_mlist_lock);
15002 14996 iment_add(ism_ment, ism_hatid);
15003 14997 mutex_exit(&ism_mlist_lock);
15004 14998
15005 14999 }
15006 15000 }
15007 15001 }
15008 15002 }
15009 15003 /*
15010 15004 * Unlink scd sfmmu from ism or hme region list for each region in the
15011 15005 * scd region map.
15012 15006 */
15013 15007 void
15014 15008 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp)
15015 15009 {
15016 15010 uint_t rid;
15017 15011 uint_t i;
15018 15012 uint_t j;
15019 15013 ulong_t w;
15020 15014 sf_region_t *rgnp;
15021 15015 sfmmu_t *scsfmmup;
15022 15016
15023 15017 scsfmmup = scdp->scd_sfmmup;
15024 15018 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) {
15025 15019 if ((w = scdp->scd_region_map.bitmap[i]) == 0) {
15026 15020 continue;
15027 15021 }
15028 15022 j = 0;
15029 15023 while (w) {
15030 15024 if (!(w & 0x1)) {
15031 15025 j++;
15032 15026 w >>= 1;
15033 15027 continue;
15034 15028 }
15035 15029 rid = (i << BT_ULSHIFT) | j;
15036 15030 j++;
15037 15031 w >>= 1;
15038 15032
15039 15033 if (rid < SFMMU_MAX_HME_REGIONS) {
15040 15034 rgnp = srdp->srd_hmergnp[rid];
15041 15035 ASSERT(rgnp->rgn_id == rid);
15042 15036 ASSERT(rgnp->rgn_refcnt > 0);
15043 15037 sfmmu_unlink_from_hmeregion(scsfmmup,
15044 15038 rgnp);
15045 15039
15046 15040 } else {
15047 15041 sfmmu_t *ism_hatid = NULL;
15048 15042 ism_ment_t *ism_ment;
15049 15043 rid -= SFMMU_MAX_HME_REGIONS;
15050 15044 rgnp = srdp->srd_ismrgnp[rid];
15051 15045 ASSERT(rgnp->rgn_id == rid);
15052 15046 ASSERT(rgnp->rgn_refcnt > 0);
15053 15047
15054 15048 ism_hatid = (sfmmu_t *)rgnp->rgn_obj;
15055 15049 ASSERT(ism_hatid->sfmmu_ismhat);
15056 15050 ism_ment = &scdp->scd_ism_links[rid];
15057 15051 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup);
15058 15052 ASSERT(ism_ment->iment_base_va ==
15059 15053 rgnp->rgn_saddr);
15060 15054 mutex_enter(&ism_mlist_lock);
15061 15055 iment_sub(ism_ment, ism_hatid);
15062 15056 mutex_exit(&ism_mlist_lock);
15063 15057
15064 15058 }
15065 15059 }
15066 15060 }
15067 15061 }
15068 15062 /*
15069 15063 * Allocates and initialises a new SCD structure, this is called with
15070 15064 * the srd_scd_mutex held and returns with the reference count
15071 15065 * initialised to 1.
15072 15066 */
15073 15067 static sf_scd_t *
15074 15068 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map)
15075 15069 {
15076 15070 sf_scd_t *new_scdp;
15077 15071 sfmmu_t *scsfmmup;
15078 15072 int i;
15079 15073
15080 15074 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex));
15081 15075 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP);
15082 15076
15083 15077 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP);
15084 15078 new_scdp->scd_sfmmup = scsfmmup;
15085 15079 scsfmmup->sfmmu_srdp = srdp;
15086 15080 scsfmmup->sfmmu_scdp = new_scdp;
15087 15081 scsfmmup->sfmmu_tsb0_4minflcnt = 0;
15088 15082 scsfmmup->sfmmu_scdhat = 1;
15089 15083 CPUSET_ALL(scsfmmup->sfmmu_cpusran);
15090 15084 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
15091 15085
15092 15086 ASSERT(max_mmu_ctxdoms > 0);
15093 15087 for (i = 0; i < max_mmu_ctxdoms; i++) {
15094 15088 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
15095 15089 scsfmmup->sfmmu_ctxs[i].gnum = 0;
15096 15090 }
15097 15091
15098 15092 for (i = 0; i < MMU_PAGE_SIZES; i++) {
15099 15093 new_scdp->scd_rttecnt[i] = 0;
15100 15094 }
15101 15095
15102 15096 new_scdp->scd_region_map = *new_map;
15103 15097 new_scdp->scd_refcnt = 1;
15104 15098 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) {
15105 15099 kmem_cache_free(scd_cache, new_scdp);
15106 15100 kmem_cache_free(sfmmuid_cache, scsfmmup);
15107 15101 return (NULL);
15108 15102 }
15109 15103 if (&mmu_init_scd) {
15110 15104 mmu_init_scd(new_scdp);
15111 15105 }
15112 15106 return (new_scdp);
15113 15107 }
15114 15108
15115 15109 /*
15116 15110 * The first phase of a process joining an SCD. The hat structure is
15117 15111 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set
15118 15112 * and a cross-call with context invalidation is used to cause the
15119 15113 * remaining work to be carried out in the sfmmu_tsbmiss_exception()
15120 15114 * routine.
15121 15115 */
15122 15116 static void
↓ open down ↓ |
775 lines elided |
↑ open up ↑ |
15123 15117 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup)
15124 15118 {
15125 15119 hatlock_t *hatlockp;
15126 15120 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15127 15121 int i;
15128 15122 sf_scd_t *old_scdp;
15129 15123
15130 15124 ASSERT(srdp != NULL);
15131 15125 ASSERT(scdp != NULL);
15132 15126 ASSERT(scdp->scd_refcnt > 0);
15133 - ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15127 + ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
15134 15128
15135 15129 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) {
15136 15130 ASSERT(old_scdp != scdp);
15137 15131
15138 15132 mutex_enter(&old_scdp->scd_mutex);
15139 15133 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup);
15140 15134 mutex_exit(&old_scdp->scd_mutex);
15141 15135 /*
15142 15136 * sfmmup leaves the old scd. Update sfmmu_ttecnt to
15143 15137 * include the shme rgn ttecnt for rgns that
15144 15138 * were in the old SCD
15145 15139 */
15146 15140 for (i = 0; i < mmu_page_sizes; i++) {
15147 15141 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15148 15142 old_scdp->scd_rttecnt[i]);
15149 15143 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15150 15144 sfmmup->sfmmu_scdrttecnt[i]);
15151 15145 }
15152 15146 }
15153 15147
15154 15148 /*
15155 15149 * Move sfmmu to the scd lists.
15156 15150 */
15157 15151 mutex_enter(&scdp->scd_mutex);
15158 15152 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup);
15159 15153 mutex_exit(&scdp->scd_mutex);
15160 15154 SF_SCD_INCR_REF(scdp);
15161 15155
15162 15156 hatlockp = sfmmu_hat_enter(sfmmup);
15163 15157 /*
15164 15158 * For a multi-thread process, we must stop
15165 15159 * all the other threads before joining the scd.
15166 15160 */
15167 15161
15168 15162 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD);
15169 15163
15170 15164 sfmmu_invalidate_ctx(sfmmup);
15171 15165 sfmmup->sfmmu_scdp = scdp;
15172 15166
15173 15167 /*
15174 15168 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update
15175 15169 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD.
15176 15170 */
15177 15171 for (i = 0; i < mmu_page_sizes; i++) {
15178 15172 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i];
15179 15173 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]);
15180 15174 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15181 15175 -sfmmup->sfmmu_scdrttecnt[i]);
15182 15176 }
15183 15177 /* update tsb0 inflation count */
15184 15178 if (old_scdp != NULL) {
15185 15179 sfmmup->sfmmu_tsb0_4minflcnt +=
15186 15180 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15187 15181 }
15188 15182 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >=
15189 15183 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt);
15190 15184 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15191 15185
15192 15186 sfmmu_hat_exit(hatlockp);
15193 15187
15194 15188 if (old_scdp != NULL) {
15195 15189 SF_SCD_DECR_REF(srdp, old_scdp);
15196 15190 }
15197 15191
15198 15192 }
15199 15193
15200 15194 /*
15201 15195 * This routine is called by a process to become part of an SCD. It is called
15202 15196 * from sfmmu_tsbmiss_exception() once most of the initial work has been
15203 15197 * done by sfmmu_join_scd(). This routine must not drop the hat lock.
15204 15198 */
15205 15199 static void
15206 15200 sfmmu_finish_join_scd(sfmmu_t *sfmmup)
15207 15201 {
15208 15202 struct tsb_info *tsbinfop;
15209 15203
15210 15204 ASSERT(sfmmu_hat_lock_held(sfmmup));
15211 15205 ASSERT(sfmmup->sfmmu_scdp != NULL);
15212 15206 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD));
15213 15207 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15214 15208 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID));
15215 15209
15216 15210 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
15217 15211 tsbinfop = tsbinfop->tsb_next) {
15218 15212 if (tsbinfop->tsb_flags & TSB_SWAPPED) {
15219 15213 continue;
15220 15214 }
15221 15215 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG));
15222 15216
15223 15217 sfmmu_inv_tsb(tsbinfop->tsb_va,
15224 15218 TSB_BYTES(tsbinfop->tsb_szc));
15225 15219 }
15226 15220
15227 15221 /* Set HAT_CTX1_FLAG for all SCD ISMs */
15228 15222 sfmmu_ism_hatflags(sfmmup, 1);
15229 15223
15230 15224 SFMMU_STAT(sf_join_scd);
15231 15225 }
15232 15226
15233 15227 /*
15234 15228 * This routine is called in order to check if there is an SCD which matches
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
15235 15229 * the process's region map if not then a new SCD may be created.
15236 15230 */
15237 15231 static void
15238 15232 sfmmu_find_scd(sfmmu_t *sfmmup)
15239 15233 {
15240 15234 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15241 15235 sf_scd_t *scdp, *new_scdp;
15242 15236 int ret;
15243 15237
15244 15238 ASSERT(srdp != NULL);
15245 - ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15239 + ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as));
15246 15240
15247 15241 mutex_enter(&srdp->srd_scd_mutex);
15248 15242 for (scdp = srdp->srd_scdp; scdp != NULL;
15249 15243 scdp = scdp->scd_next) {
15250 15244 SF_RGNMAP_EQUAL(&scdp->scd_region_map,
15251 15245 &sfmmup->sfmmu_region_map, ret);
15252 15246 if (ret == 1) {
15253 15247 SF_SCD_INCR_REF(scdp);
15254 15248 mutex_exit(&srdp->srd_scd_mutex);
15255 15249 sfmmu_join_scd(scdp, sfmmup);
15256 15250 ASSERT(scdp->scd_refcnt >= 2);
15257 15251 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt);
15258 15252 return;
15259 15253 } else {
15260 15254 /*
15261 15255 * If the sfmmu region map is a subset of the scd
15262 15256 * region map, then the assumption is that this process
15263 15257 * will continue attaching to ISM segments until the
15264 15258 * region maps are equal.
15265 15259 */
15266 15260 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map,
15267 15261 &sfmmup->sfmmu_region_map, ret);
15268 15262 if (ret == 1) {
15269 15263 mutex_exit(&srdp->srd_scd_mutex);
15270 15264 return;
15271 15265 }
15272 15266 }
15273 15267 }
15274 15268
15275 15269 ASSERT(scdp == NULL);
15276 15270 /*
15277 15271 * No matching SCD has been found, create a new one.
15278 15272 */
15279 15273 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) ==
15280 15274 NULL) {
15281 15275 mutex_exit(&srdp->srd_scd_mutex);
15282 15276 return;
15283 15277 }
15284 15278
15285 15279 /*
15286 15280 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd.
15287 15281 */
15288 15282
15289 15283 /* Set scd_rttecnt for shme rgns in SCD */
15290 15284 sfmmu_set_scd_rttecnt(srdp, new_scdp);
15291 15285
15292 15286 /*
15293 15287 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists.
15294 15288 */
15295 15289 sfmmu_link_scd_to_regions(srdp, new_scdp);
15296 15290 sfmmu_add_scd(&srdp->srd_scdp, new_scdp);
15297 15291 SFMMU_STAT_ADD(sf_create_scd, 1);
15298 15292
15299 15293 mutex_exit(&srdp->srd_scd_mutex);
15300 15294 sfmmu_join_scd(new_scdp, sfmmup);
15301 15295 ASSERT(new_scdp->scd_refcnt >= 2);
15302 15296 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt);
15303 15297 }
15304 15298
15305 15299 /*
15306 15300 * This routine is called by a process to remove itself from an SCD. It is
15307 15301 * either called when the processes has detached from a segment or from
15308 15302 * hat_free_start() as a result of calling exit.
15309 15303 */
15310 15304 static void
15311 15305 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type)
15312 15306 {
15313 15307 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15314 15308 sf_srd_t *srdp = sfmmup->sfmmu_srdp;
15315 15309 hatlock_t *hatlockp = TSB_HASH(sfmmup);
15316 15310 int i;
15317 15311
15318 15312 ASSERT(scdp != NULL);
15319 15313 ASSERT(srdp != NULL);
15320 15314
15321 15315 if (sfmmup->sfmmu_free) {
15322 15316 /*
15323 15317 * If the process is part of an SCD the sfmmu is unlinked
15324 15318 * from scd_sf_list.
15325 15319 */
15326 15320 mutex_enter(&scdp->scd_mutex);
15327 15321 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15328 15322 mutex_exit(&scdp->scd_mutex);
15329 15323 /*
15330 15324 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15331 15325 * are about to leave the SCD
15332 15326 */
15333 15327 for (i = 0; i < mmu_page_sizes; i++) {
15334 15328 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15335 15329 scdp->scd_rttecnt[i]);
15336 15330 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15337 15331 sfmmup->sfmmu_scdrttecnt[i]);
15338 15332 sfmmup->sfmmu_scdrttecnt[i] = 0;
15339 15333 }
15340 15334 sfmmup->sfmmu_scdp = NULL;
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
15341 15335
15342 15336 SF_SCD_DECR_REF(srdp, scdp);
15343 15337 return;
15344 15338 }
15345 15339
15346 15340 ASSERT(r_type != SFMMU_REGION_ISM ||
15347 15341 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15348 15342 ASSERT(scdp->scd_refcnt);
15349 15343 ASSERT(!sfmmup->sfmmu_free);
15350 15344 ASSERT(sfmmu_hat_lock_held(sfmmup));
15351 - ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
15345 + ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as));
15352 15346
15353 15347 /*
15354 15348 * Wait for ISM maps to be updated.
15355 15349 */
15356 15350 if (r_type != SFMMU_REGION_ISM) {
15357 15351 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) &&
15358 15352 sfmmup->sfmmu_scdp != NULL) {
15359 15353 cv_wait(&sfmmup->sfmmu_tsb_cv,
15360 15354 HATLOCK_MUTEXP(hatlockp));
15361 15355 }
15362 15356
15363 15357 if (sfmmup->sfmmu_scdp == NULL) {
15364 15358 sfmmu_hat_exit(hatlockp);
15365 15359 return;
15366 15360 }
15367 15361 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
15368 15362 }
15369 15363
15370 15364 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) {
15371 15365 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD);
15372 15366 /*
15373 15367 * Since HAT_JOIN_SCD was set our context
15374 15368 * is still invalid.
15375 15369 */
15376 15370 } else {
15377 15371 /*
15378 15372 * For a multi-thread process, we must stop
15379 15373 * all the other threads before leaving the scd.
15380 15374 */
15381 15375
15382 15376 sfmmu_invalidate_ctx(sfmmup);
15383 15377 }
15384 15378
15385 15379 /* Clear all the rid's for ISM, delete flags, etc */
15386 15380 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
15387 15381 sfmmu_ism_hatflags(sfmmup, 0);
15388 15382
15389 15383 /*
15390 15384 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that
15391 15385 * are in SCD before this sfmmup leaves the SCD.
15392 15386 */
15393 15387 for (i = 0; i < mmu_page_sizes; i++) {
15394 15388 ASSERT(sfmmup->sfmmu_scdrttecnt[i] ==
15395 15389 scdp->scd_rttecnt[i]);
15396 15390 atomic_add_long(&sfmmup->sfmmu_ttecnt[i],
15397 15391 sfmmup->sfmmu_scdrttecnt[i]);
15398 15392 sfmmup->sfmmu_scdrttecnt[i] = 0;
15399 15393 /* update ismttecnt to include SCD ism before hat leaves SCD */
15400 15394 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i];
15401 15395 sfmmup->sfmmu_scdismttecnt[i] = 0;
15402 15396 }
15403 15397 /* update tsb0 inflation count */
15404 15398 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt;
15405 15399
15406 15400 if (r_type != SFMMU_REGION_ISM) {
15407 15401 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
15408 15402 }
15409 15403 sfmmup->sfmmu_scdp = NULL;
15410 15404
15411 15405 sfmmu_hat_exit(hatlockp);
15412 15406
15413 15407 /*
15414 15408 * Unlink sfmmu from scd_sf_list this can be done without holding
15415 15409 * the hat lock as we hold the sfmmu_as lock which prevents
15416 15410 * hat_join_region from adding this thread to the scd again. Other
15417 15411 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL
15418 15412 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp
15419 15413 * while holding the hat lock.
15420 15414 */
15421 15415 mutex_enter(&scdp->scd_mutex);
15422 15416 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup);
15423 15417 mutex_exit(&scdp->scd_mutex);
15424 15418 SFMMU_STAT(sf_leave_scd);
15425 15419
15426 15420 SF_SCD_DECR_REF(srdp, scdp);
15427 15421 hatlockp = sfmmu_hat_enter(sfmmup);
15428 15422
15429 15423 }
15430 15424
15431 15425 /*
15432 15426 * Unlink and free up an SCD structure with a reference count of 0.
15433 15427 */
15434 15428 static void
15435 15429 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap)
15436 15430 {
15437 15431 sfmmu_t *scsfmmup;
15438 15432 sf_scd_t *sp;
15439 15433 hatlock_t *shatlockp;
15440 15434 int i, ret;
15441 15435
15442 15436 mutex_enter(&srdp->srd_scd_mutex);
15443 15437 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) {
15444 15438 if (sp == scdp)
15445 15439 break;
15446 15440 }
15447 15441 if (sp == NULL || sp->scd_refcnt) {
15448 15442 mutex_exit(&srdp->srd_scd_mutex);
15449 15443 return;
15450 15444 }
15451 15445
15452 15446 /*
15453 15447 * It is possible that the scd has been freed and reallocated with a
15454 15448 * different region map while we've been waiting for the srd_scd_mutex.
15455 15449 */
15456 15450 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret);
15457 15451 if (ret != 1) {
15458 15452 mutex_exit(&srdp->srd_scd_mutex);
15459 15453 return;
15460 15454 }
15461 15455
15462 15456 ASSERT(scdp->scd_sf_list == NULL);
15463 15457 /*
15464 15458 * Unlink scd from srd_scdp list.
15465 15459 */
15466 15460 sfmmu_remove_scd(&srdp->srd_scdp, scdp);
15467 15461 mutex_exit(&srdp->srd_scd_mutex);
15468 15462
15469 15463 sfmmu_unlink_scd_from_regions(srdp, scdp);
15470 15464
15471 15465 /* Clear shared context tsb and release ctx */
15472 15466 scsfmmup = scdp->scd_sfmmup;
15473 15467
15474 15468 /*
15475 15469 * create a barrier so that scd will not be destroyed
15476 15470 * if other thread still holds the same shared hat lock.
15477 15471 * E.g., sfmmu_tsbmiss_exception() needs to acquire the
15478 15472 * shared hat lock before checking the shared tsb reloc flag.
15479 15473 */
15480 15474 shatlockp = sfmmu_hat_enter(scsfmmup);
15481 15475 sfmmu_hat_exit(shatlockp);
15482 15476
15483 15477 sfmmu_free_scd_tsbs(scsfmmup);
15484 15478
15485 15479 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
15486 15480 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) {
15487 15481 kmem_free(scsfmmup->sfmmu_hmeregion_links[i],
15488 15482 SFMMU_L2_HMERLINKS_SIZE);
15489 15483 scsfmmup->sfmmu_hmeregion_links[i] = NULL;
15490 15484 }
15491 15485 }
15492 15486 kmem_cache_free(sfmmuid_cache, scsfmmup);
15493 15487 kmem_cache_free(scd_cache, scdp);
15494 15488 SFMMU_STAT(sf_destroy_scd);
15495 15489 }
15496 15490
15497 15491 /*
15498 15492 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to
15499 15493 * bits which are set in the ism_region_map parameter. This flag indicates to
15500 15494 * the tsbmiss handler that mapping for these segments should be loaded using
15501 15495 * the shared context.
15502 15496 */
15503 15497 static void
15504 15498 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag)
15505 15499 {
15506 15500 sf_scd_t *scdp = sfmmup->sfmmu_scdp;
15507 15501 ism_blk_t *ism_blkp;
15508 15502 ism_map_t *ism_map;
15509 15503 int i, rid;
15510 15504
15511 15505 ASSERT(sfmmup->sfmmu_iblk != NULL);
15512 15506 ASSERT(scdp != NULL);
15513 15507 /*
15514 15508 * Note that the caller either set HAT_ISMBUSY flag or checked
15515 15509 * under hat lock that HAT_ISMBUSY was not set by another thread.
15516 15510 */
15517 15511 ASSERT(sfmmu_hat_lock_held(sfmmup));
15518 15512
15519 15513 ism_blkp = sfmmup->sfmmu_iblk;
15520 15514 while (ism_blkp != NULL) {
15521 15515 ism_map = ism_blkp->iblk_maps;
15522 15516 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) {
15523 15517 rid = ism_map[i].imap_rid;
15524 15518 if (rid == SFMMU_INVALID_ISMRID) {
15525 15519 continue;
15526 15520 }
15527 15521 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS);
15528 15522 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) &&
15529 15523 addflag) {
15530 15524 ism_map[i].imap_hatflags |=
15531 15525 HAT_CTX1_FLAG;
15532 15526 } else {
15533 15527 ism_map[i].imap_hatflags &=
15534 15528 ~HAT_CTX1_FLAG;
15535 15529 }
15536 15530 }
15537 15531 ism_blkp = ism_blkp->iblk_next;
15538 15532 }
15539 15533 }
15540 15534
15541 15535 static int
15542 15536 sfmmu_srd_lock_held(sf_srd_t *srdp)
15543 15537 {
15544 15538 return (MUTEX_HELD(&srdp->srd_mutex));
15545 15539 }
15546 15540
15547 15541 /* ARGSUSED */
15548 15542 static int
15549 15543 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags)
15550 15544 {
15551 15545 sf_scd_t *scdp = (sf_scd_t *)buf;
15552 15546
15553 15547 bzero(buf, sizeof (sf_scd_t));
15554 15548 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL);
15555 15549 return (0);
15556 15550 }
15557 15551
15558 15552 /* ARGSUSED */
15559 15553 static void
15560 15554 sfmmu_scdcache_destructor(void *buf, void *cdrarg)
15561 15555 {
15562 15556 sf_scd_t *scdp = (sf_scd_t *)buf;
15563 15557
15564 15558 mutex_destroy(&scdp->scd_mutex);
15565 15559 }
15566 15560
15567 15561 /*
15568 15562 * The listp parameter is a pointer to a list of hmeblks which are partially
15569 15563 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the
15570 15564 * freeing process is to cross-call all cpus to ensure that there are no
15571 15565 * remaining cached references.
15572 15566 *
15573 15567 * If the local generation number is less than the global then we can free
15574 15568 * hmeblks which are already on the pending queue as another cpu has completed
15575 15569 * the cross-call.
15576 15570 *
15577 15571 * We cross-call to make sure that there are no threads on other cpus accessing
15578 15572 * these hmblks and then complete the process of freeing them under the
15579 15573 * following conditions:
15580 15574 * The total number of pending hmeblks is greater than the threshold
15581 15575 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15582 15576 * It is at least 1 second since the last time we cross-called
15583 15577 *
15584 15578 * Otherwise, we add the hmeblks to the per-cpu pending queue.
15585 15579 */
15586 15580 static void
15587 15581 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15588 15582 {
15589 15583 struct hme_blk *hblkp, *pr_hblkp = NULL;
15590 15584 int count = 0;
15591 15585 cpuset_t cpuset = cpu_ready_set;
15592 15586 cpu_hme_pend_t *cpuhp;
15593 15587 timestruc_t now;
15594 15588 int one_second_expired = 0;
15595 15589
15596 15590 gethrestime_lasttick(&now);
15597 15591
15598 15592 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) {
15599 15593 ASSERT(hblkp->hblk_shw_bit == 0);
15600 15594 ASSERT(hblkp->hblk_shared == 0);
15601 15595 count++;
15602 15596 pr_hblkp = hblkp;
15603 15597 }
15604 15598
15605 15599 cpuhp = &cpu_hme_pend[CPU->cpu_seqid];
15606 15600 mutex_enter(&cpuhp->chp_mutex);
15607 15601
15608 15602 if ((cpuhp->chp_count + count) == 0) {
15609 15603 mutex_exit(&cpuhp->chp_mutex);
15610 15604 return;
15611 15605 }
15612 15606
15613 15607 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) {
15614 15608 one_second_expired = 1;
15615 15609 }
15616 15610
15617 15611 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT ||
15618 15612 (cpuhp->chp_count + count) > cpu_hme_pend_thresh ||
15619 15613 one_second_expired)) {
15620 15614 /* Append global list to local */
15621 15615 if (pr_hblkp == NULL) {
15622 15616 *listp = cpuhp->chp_listp;
15623 15617 } else {
15624 15618 pr_hblkp->hblk_next = cpuhp->chp_listp;
15625 15619 }
15626 15620 cpuhp->chp_listp = NULL;
15627 15621 cpuhp->chp_count = 0;
15628 15622 cpuhp->chp_timestamp = now.tv_sec;
15629 15623 mutex_exit(&cpuhp->chp_mutex);
15630 15624
15631 15625 kpreempt_disable();
15632 15626 CPUSET_DEL(cpuset, CPU->cpu_id);
15633 15627 xt_sync(cpuset);
15634 15628 xt_sync(cpuset);
15635 15629 kpreempt_enable();
15636 15630
15637 15631 /*
15638 15632 * At this stage we know that no trap handlers on other
15639 15633 * cpus can have references to hmeblks on the list.
15640 15634 */
15641 15635 sfmmu_hblk_free(listp);
15642 15636 } else if (*listp != NULL) {
15643 15637 pr_hblkp->hblk_next = cpuhp->chp_listp;
15644 15638 cpuhp->chp_listp = *listp;
15645 15639 cpuhp->chp_count += count;
15646 15640 *listp = NULL;
15647 15641 mutex_exit(&cpuhp->chp_mutex);
15648 15642 } else {
15649 15643 mutex_exit(&cpuhp->chp_mutex);
15650 15644 }
15651 15645 }
15652 15646
15653 15647 /*
15654 15648 * Add an hmeblk to the the hash list.
15655 15649 */
15656 15650 void
15657 15651 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15658 15652 uint64_t hblkpa)
15659 15653 {
15660 15654 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15661 15655 #ifdef DEBUG
15662 15656 if (hmebp->hmeblkp == NULL) {
15663 15657 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15664 15658 }
15665 15659 #endif /* DEBUG */
15666 15660
15667 15661 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15668 15662 /*
15669 15663 * Since the TSB miss handler now does not lock the hash chain before
15670 15664 * walking it, make sure that the hmeblks nextpa is globally visible
15671 15665 * before we make the hmeblk globally visible by updating the chain root
15672 15666 * pointer in the hash bucket.
15673 15667 */
15674 15668 membar_producer();
15675 15669 hmebp->hmeh_nextpa = hblkpa;
15676 15670 hmeblkp->hblk_next = hmebp->hmeblkp;
15677 15671 hmebp->hmeblkp = hmeblkp;
15678 15672
15679 15673 }
15680 15674
15681 15675 /*
15682 15676 * This function is the first part of a 2 part process to remove an hmeblk
15683 15677 * from the hash chain. In this phase we unlink the hmeblk from the hash chain
15684 15678 * but leave the next physical pointer unchanged. The hmeblk is then linked onto
15685 15679 * a per-cpu pending list using the virtual address pointer.
15686 15680 *
15687 15681 * TSB miss trap handlers that start after this phase will no longer see
15688 15682 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register
15689 15683 * can still use it for further chain traversal because we haven't yet modifed
15690 15684 * the next physical pointer or freed it.
15691 15685 *
15692 15686 * In the second phase of hmeblk removal we'll issue a barrier xcall before
15693 15687 * we reuse or free this hmeblk. This will make sure all lingering references to
15694 15688 * the hmeblk after first phase disappear before we finally reclaim it.
15695 15689 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains
15696 15690 * during their traversal.
15697 15691 *
15698 15692 * The hmehash_mutex must be held when calling this function.
15699 15693 *
15700 15694 * Input:
15701 15695 * hmebp - hme hash bucket pointer
15702 15696 * hmeblkp - address of hmeblk to be removed
15703 15697 * pr_hblk - virtual address of previous hmeblkp
15704 15698 * listp - pointer to list of hmeblks linked by virtual address
15705 15699 * free_now flag - indicates that a complete removal from the hash chains
15706 15700 * is necessary.
15707 15701 *
15708 15702 * It is inefficient to use the free_now flag as a cross-call is required to
15709 15703 * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15710 15704 * in short supply.
15711 15705 */
15712 15706 void
15713 15707 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15714 15708 struct hme_blk *pr_hblk, struct hme_blk **listp,
15715 15709 int free_now)
15716 15710 {
15717 15711 int shw_size, vshift;
15718 15712 struct hme_blk *shw_hblkp;
15719 15713 uint_t shw_mask, newshw_mask;
15720 15714 caddr_t vaddr;
15721 15715 int size;
15722 15716 cpuset_t cpuset = cpu_ready_set;
15723 15717
15724 15718 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15725 15719
15726 15720 if (hmebp->hmeblkp == hmeblkp) {
15727 15721 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa;
15728 15722 hmebp->hmeblkp = hmeblkp->hblk_next;
15729 15723 } else {
15730 15724 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa;
15731 15725 pr_hblk->hblk_next = hmeblkp->hblk_next;
15732 15726 }
15733 15727
15734 15728 size = get_hblk_ttesz(hmeblkp);
15735 15729 shw_hblkp = hmeblkp->hblk_shadow;
15736 15730 if (shw_hblkp) {
15737 15731 ASSERT(hblktosfmmu(hmeblkp) != KHATID);
15738 15732 ASSERT(!hmeblkp->hblk_shared);
15739 15733 #ifdef DEBUG
15740 15734 if (mmu_page_sizes == max_mmu_page_sizes) {
15741 15735 ASSERT(size < TTE256M);
15742 15736 } else {
15743 15737 ASSERT(size < TTE4M);
15744 15738 }
15745 15739 #endif /* DEBUG */
15746 15740
15747 15741 shw_size = get_hblk_ttesz(shw_hblkp);
15748 15742 vaddr = (caddr_t)get_hblk_base(hmeblkp);
15749 15743 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size);
15750 15744 ASSERT(vshift < 8);
15751 15745 /*
15752 15746 * Atomically clear shadow mask bit
15753 15747 */
15754 15748 do {
15755 15749 shw_mask = shw_hblkp->hblk_shw_mask;
15756 15750 ASSERT(shw_mask & (1 << vshift));
15757 15751 newshw_mask = shw_mask & ~(1 << vshift);
15758 15752 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask,
15759 15753 shw_mask, newshw_mask);
15760 15754 } while (newshw_mask != shw_mask);
15761 15755 hmeblkp->hblk_shadow = NULL;
15762 15756 }
15763 15757 hmeblkp->hblk_shw_bit = 0;
15764 15758
15765 15759 if (hmeblkp->hblk_shared) {
15766 15760 #ifdef DEBUG
15767 15761 sf_srd_t *srdp;
15768 15762 sf_region_t *rgnp;
15769 15763 uint_t rid;
15770 15764
15771 15765 srdp = hblktosrd(hmeblkp);
15772 15766 ASSERT(srdp != NULL && srdp->srd_refcnt != 0);
15773 15767 rid = hmeblkp->hblk_tag.htag_rid;
15774 15768 ASSERT(SFMMU_IS_SHMERID_VALID(rid));
15775 15769 ASSERT(rid < SFMMU_MAX_HME_REGIONS);
15776 15770 rgnp = srdp->srd_hmergnp[rid];
15777 15771 ASSERT(rgnp != NULL);
15778 15772 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid);
15779 15773 #endif /* DEBUG */
15780 15774 hmeblkp->hblk_shared = 0;
15781 15775 }
15782 15776 if (free_now) {
15783 15777 kpreempt_disable();
15784 15778 CPUSET_DEL(cpuset, CPU->cpu_id);
15785 15779 xt_sync(cpuset);
15786 15780 xt_sync(cpuset);
15787 15781 kpreempt_enable();
15788 15782
15789 15783 hmeblkp->hblk_nextpa = HMEBLK_ENDPA;
15790 15784 hmeblkp->hblk_next = NULL;
15791 15785 } else {
15792 15786 /* Append hmeblkp to listp for processing later. */
15793 15787 hmeblkp->hblk_next = *listp;
15794 15788 *listp = hmeblkp;
15795 15789 }
15796 15790 }
15797 15791
15798 15792 /*
15799 15793 * This routine is called when memory is in short supply and returns a free
15800 15794 * hmeblk of the requested size from the cpu pending lists.
15801 15795 */
15802 15796 static struct hme_blk *
15803 15797 sfmmu_check_pending_hblks(int size)
15804 15798 {
15805 15799 int i;
15806 15800 struct hme_blk *hmeblkp = NULL, *last_hmeblkp;
15807 15801 int found_hmeblk;
15808 15802 cpuset_t cpuset = cpu_ready_set;
15809 15803 cpu_hme_pend_t *cpuhp;
15810 15804
15811 15805 /* Flush cpu hblk pending queues */
15812 15806 for (i = 0; i < NCPU; i++) {
15813 15807 cpuhp = &cpu_hme_pend[i];
15814 15808 if (cpuhp->chp_listp != NULL) {
15815 15809 mutex_enter(&cpuhp->chp_mutex);
15816 15810 if (cpuhp->chp_listp == NULL) {
15817 15811 mutex_exit(&cpuhp->chp_mutex);
15818 15812 continue;
15819 15813 }
15820 15814 found_hmeblk = 0;
15821 15815 last_hmeblkp = NULL;
15822 15816 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL;
15823 15817 hmeblkp = hmeblkp->hblk_next) {
15824 15818 if (get_hblk_ttesz(hmeblkp) == size) {
15825 15819 if (last_hmeblkp == NULL) {
15826 15820 cpuhp->chp_listp =
15827 15821 hmeblkp->hblk_next;
15828 15822 } else {
15829 15823 last_hmeblkp->hblk_next =
15830 15824 hmeblkp->hblk_next;
15831 15825 }
15832 15826 ASSERT(cpuhp->chp_count > 0);
15833 15827 cpuhp->chp_count--;
15834 15828 found_hmeblk = 1;
15835 15829 break;
15836 15830 } else {
15837 15831 last_hmeblkp = hmeblkp;
15838 15832 }
15839 15833 }
15840 15834 mutex_exit(&cpuhp->chp_mutex);
15841 15835
15842 15836 if (found_hmeblk) {
15843 15837 kpreempt_disable();
15844 15838 CPUSET_DEL(cpuset, CPU->cpu_id);
15845 15839 xt_sync(cpuset);
15846 15840 xt_sync(cpuset);
15847 15841 kpreempt_enable();
15848 15842 return (hmeblkp);
15849 15843 }
15850 15844 }
15851 15845 }
15852 15846 return (NULL);
15853 15847 }
↓ open down ↓ |
492 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX