Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/vm/hat_i86.c
+++ new/usr/src/uts/i86pc/vm/hat_i86.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright (c) 2010, Intel Corporation.
26 26 * All rights reserved.
27 27 */
28 28 /*
29 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 30 */
31 31
32 32 /*
33 33 * VM - Hardware Address Translation management for i386 and amd64
34 34 *
35 35 * Implementation of the interfaces described in <common/vm/hat.h>
36 36 *
37 37 * Nearly all the details of how the hardware is managed should not be
38 38 * visible outside this layer except for misc. machine specific functions
39 39 * that work in conjunction with this code.
40 40 *
41 41 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
42 42 */
43 43
44 44 #include <sys/machparam.h>
45 45 #include <sys/machsystm.h>
46 46 #include <sys/mman.h>
47 47 #include <sys/types.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/cpuvar.h>
50 50 #include <sys/thread.h>
51 51 #include <sys/proc.h>
52 52 #include <sys/cpu.h>
53 53 #include <sys/kmem.h>
54 54 #include <sys/disp.h>
55 55 #include <sys/shm.h>
56 56 #include <sys/sysmacros.h>
57 57 #include <sys/machparam.h>
58 58 #include <sys/vmem.h>
59 59 #include <sys/vmsystm.h>
60 60 #include <sys/promif.h>
61 61 #include <sys/var.h>
62 62 #include <sys/x86_archext.h>
63 63 #include <sys/atomic.h>
64 64 #include <sys/bitmap.h>
65 65 #include <sys/controlregs.h>
66 66 #include <sys/bootconf.h>
67 67 #include <sys/bootsvcs.h>
68 68 #include <sys/bootinfo.h>
69 69 #include <sys/archsystm.h>
70 70
71 71 #include <vm/seg_kmem.h>
72 72 #include <vm/hat_i86.h>
73 73 #include <vm/as.h>
74 74 #include <vm/seg.h>
75 75 #include <vm/page.h>
76 76 #include <vm/seg_kp.h>
77 77 #include <vm/seg_kpm.h>
78 78 #include <vm/vm_dep.h>
79 79 #ifdef __xpv
80 80 #include <sys/hypervisor.h>
81 81 #endif
82 82 #include <vm/kboot_mmu.h>
83 83 #include <vm/seg_spt.h>
84 84
85 85 #include <sys/cmn_err.h>
86 86
87 87 /*
88 88 * Basic parameters for hat operation.
89 89 */
90 90 struct hat_mmu_info mmu;
91 91
92 92 /*
93 93 * The page that is the kernel's top level pagetable.
94 94 *
95 95 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
96 96 * on this 4K page for its top level page table. The remaining groups of
97 97 * 4 entries are used for per processor copies of user VLP pagetables for
98 98 * running threads. See hat_switch() and reload_pae32() for details.
99 99 *
100 100 * vlp_page[0..3] - level==2 PTEs for kernel HAT
101 101 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
102 102 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1
103 103 * etc...
104 104 */
105 105 static x86pte_t *vlp_page;
106 106
107 107 /*
108 108 * forward declaration of internal utility routines
109 109 */
110 110 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
111 111 x86pte_t new);
112 112
113 113 /*
114 114 * The kernel address space exists in all HATs. To implement this the
115 115 * kernel reserves a fixed number of entries in the topmost level(s) of page
116 116 * tables. The values are setup during startup and then copied to every user
117 117 * hat created by hat_alloc(). This means that kernelbase must be:
118 118 *
119 119 * 4Meg aligned for 32 bit kernels
120 120 * 512Gig aligned for x86_64 64 bit kernel
121 121 *
122 122 * The hat_kernel_range_ts describe what needs to be copied from kernel hat
123 123 * to each user hat.
124 124 */
125 125 typedef struct hat_kernel_range {
126 126 level_t hkr_level;
127 127 uintptr_t hkr_start_va;
128 128 uintptr_t hkr_end_va; /* zero means to end of memory */
129 129 } hat_kernel_range_t;
130 130 #define NUM_KERNEL_RANGE 2
131 131 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
132 132 static int num_kernel_ranges;
133 133
134 134 uint_t use_boot_reserve = 1; /* cleared after early boot process */
135 135 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
136 136
137 137 /*
138 138 * enable_1gpg: controls 1g page support for user applications.
139 139 * By default, 1g pages are exported to user applications. enable_1gpg can
140 140 * be set to 0 to not export.
141 141 */
142 142 int enable_1gpg = 1;
143 143
144 144 /*
145 145 * AMD shanghai processors provide better management of 1gb ptes in its tlb.
146 146 * By default, 1g page support will be disabled for pre-shanghai AMD
147 147 * processors that don't have optimal tlb support for the 1g page size.
148 148 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
149 149 * processors.
150 150 */
151 151 int chk_optimal_1gtlb = 1;
152 152
153 153
154 154 #ifdef DEBUG
155 155 uint_t map1gcnt;
156 156 #endif
157 157
158 158
159 159 /*
160 160 * A cpuset for all cpus. This is used for kernel address cross calls, since
161 161 * the kernel addresses apply to all cpus.
162 162 */
163 163 cpuset_t khat_cpuset;
164 164
165 165 /*
166 166 * management stuff for hat structures
167 167 */
168 168 kmutex_t hat_list_lock;
169 169 kcondvar_t hat_list_cv;
170 170 kmem_cache_t *hat_cache;
171 171 kmem_cache_t *hat_hash_cache;
172 172 kmem_cache_t *vlp_hash_cache;
173 173
174 174 /*
175 175 * Simple statistics
176 176 */
177 177 struct hatstats hatstat;
178 178
179 179 /*
180 180 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
181 181 * correctly. For such hypervisors we must set PT_USER for kernel
182 182 * entries ourselves (normally the emulation would set PT_USER for
183 183 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is
184 184 * thus set appropriately. Note that dboot/kbm is OK, as only the full
185 185 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
186 186 * incorrect.
187 187 */
188 188 int pt_kern;
189 189
190 190 /*
191 191 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
192 192 */
193 193 extern void atomic_orb(uchar_t *addr, uchar_t val);
194 194 extern void atomic_andb(uchar_t *addr, uchar_t val);
195 195
196 196 #ifndef __xpv
197 197 extern pfn_t memseg_get_start(struct memseg *);
198 198 #endif
199 199
200 200 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask)
201 201 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD)
202 202 #define PP_ISREF(pp) PP_GETRM(pp, P_REF)
203 203 #define PP_ISRO(pp) PP_GETRM(pp, P_RO)
204 204
205 205 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm)
206 206 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD)
207 207 #define PP_SETREF(pp) PP_SETRM(pp, P_REF)
208 208 #define PP_SETRO(pp) PP_SETRM(pp, P_RO)
209 209
210 210 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm))
211 211 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD)
212 212 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF)
213 213 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO)
214 214 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO)
215 215
216 216 /*
217 217 * kmem cache constructor for struct hat
218 218 */
219 219 /*ARGSUSED*/
220 220 static int
221 221 hati_constructor(void *buf, void *handle, int kmflags)
222 222 {
223 223 hat_t *hat = buf;
224 224
225 225 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
226 226 bzero(hat->hat_pages_mapped,
227 227 sizeof (pgcnt_t) * (mmu.max_page_level + 1));
228 228 hat->hat_ism_pgcnt = 0;
229 229 hat->hat_stats = 0;
230 230 hat->hat_flags = 0;
231 231 CPUSET_ZERO(hat->hat_cpus);
232 232 hat->hat_htable = NULL;
233 233 hat->hat_ht_hash = NULL;
234 234 return (0);
235 235 }
236 236
237 237 /*
238 238 * Allocate a hat structure for as. We also create the top level
239 239 * htable and initialize it to contain the kernel hat entries.
240 240 */
241 241 hat_t *
242 242 hat_alloc(struct as *as)
243 243 {
244 244 hat_t *hat;
245 245 htable_t *ht; /* top level htable */
246 246 uint_t use_vlp;
247 247 uint_t r;
248 248 hat_kernel_range_t *rp;
249 249 uintptr_t va;
250 250 uintptr_t eva;
251 251 uint_t start;
252 252 uint_t cnt;
253 253 htable_t *src;
254 254
255 255 /*
256 256 * Once we start creating user process HATs we can enable
257 257 * the htable_steal() code.
258 258 */
259 259 if (can_steal_post_boot == 0)
260 260 can_steal_post_boot = 1;
261 261
262 262 ASSERT(AS_WRITE_HELD(as, &as->a_lock));
263 263 hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
264 264 hat->hat_as = as;
265 265 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
266 266 ASSERT(hat->hat_flags == 0);
267 267
268 268 #if defined(__xpv)
269 269 /*
270 270 * No VLP stuff on the hypervisor due to the 64-bit split top level
271 271 * page tables. On 32-bit it's not needed as the hypervisor takes
272 272 * care of copying the top level PTEs to a below 4Gig page.
273 273 */
274 274 use_vlp = 0;
275 275 #else /* __xpv */
276 276 /* 32 bit processes uses a VLP style hat when running with PAE */
277 277 #if defined(__amd64)
278 278 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
279 279 #elif defined(__i386)
280 280 use_vlp = mmu.pae_hat;
281 281 #endif
282 282 #endif /* __xpv */
283 283 if (use_vlp) {
284 284 hat->hat_flags = HAT_VLP;
285 285 bzero(hat->hat_vlp_ptes, VLP_SIZE);
286 286 }
287 287
288 288 /*
289 289 * Allocate the htable hash
290 290 */
291 291 if ((hat->hat_flags & HAT_VLP)) {
292 292 hat->hat_num_hash = mmu.vlp_hash_cnt;
293 293 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
294 294 } else {
295 295 hat->hat_num_hash = mmu.hash_cnt;
296 296 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
297 297 }
298 298 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
299 299
300 300 /*
301 301 * Initialize Kernel HAT entries at the top of the top level page
302 302 * tables for the new hat.
303 303 */
304 304 hat->hat_htable = NULL;
305 305 hat->hat_ht_cached = NULL;
306 306 XPV_DISALLOW_MIGRATE();
307 307 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
308 308 hat->hat_htable = ht;
309 309
310 310 #if defined(__amd64)
311 311 if (hat->hat_flags & HAT_VLP)
312 312 goto init_done;
313 313 #endif
314 314
315 315 for (r = 0; r < num_kernel_ranges; ++r) {
316 316 rp = &kernel_ranges[r];
317 317 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
318 318 va += cnt * LEVEL_SIZE(rp->hkr_level)) {
319 319
320 320 if (rp->hkr_level == TOP_LEVEL(hat))
321 321 ht = hat->hat_htable;
322 322 else
323 323 ht = htable_create(hat, va, rp->hkr_level,
324 324 NULL);
325 325
326 326 start = htable_va2entry(va, ht);
327 327 cnt = HTABLE_NUM_PTES(ht) - start;
328 328 eva = va +
329 329 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
330 330 if (rp->hkr_end_va != 0 &&
331 331 (eva > rp->hkr_end_va || eva == 0))
332 332 cnt = htable_va2entry(rp->hkr_end_va, ht) -
333 333 start;
334 334
335 335 #if defined(__i386) && !defined(__xpv)
336 336 if (ht->ht_flags & HTABLE_VLP) {
337 337 bcopy(&vlp_page[start],
338 338 &hat->hat_vlp_ptes[start],
339 339 cnt * sizeof (x86pte_t));
340 340 continue;
341 341 }
342 342 #endif
343 343 src = htable_lookup(kas.a_hat, va, rp->hkr_level);
344 344 ASSERT(src != NULL);
345 345 x86pte_copy(src, ht, start, cnt);
346 346 htable_release(src);
347 347 }
348 348 }
349 349
350 350 init_done:
351 351
352 352 #if defined(__xpv)
353 353 /*
354 354 * Pin top level page tables after initializing them
355 355 */
356 356 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
357 357 #if defined(__amd64)
358 358 xen_pin(hat->hat_user_ptable, mmu.max_level);
359 359 #endif
360 360 #endif
361 361 XPV_ALLOW_MIGRATE();
362 362
363 363 /*
364 364 * Put it at the start of the global list of all hats (used by stealing)
365 365 *
366 366 * kas.a_hat is not in the list but is instead used to find the
367 367 * first and last items in the list.
368 368 *
369 369 * - kas.a_hat->hat_next points to the start of the user hats.
370 370 * The list ends where hat->hat_next == NULL
371 371 *
372 372 * - kas.a_hat->hat_prev points to the last of the user hats.
373 373 * The list begins where hat->hat_prev == NULL
374 374 */
375 375 mutex_enter(&hat_list_lock);
376 376 hat->hat_prev = NULL;
377 377 hat->hat_next = kas.a_hat->hat_next;
378 378 if (hat->hat_next)
379 379 hat->hat_next->hat_prev = hat;
380 380 else
381 381 kas.a_hat->hat_prev = hat;
382 382 kas.a_hat->hat_next = hat;
383 383 mutex_exit(&hat_list_lock);
384 384
385 385 return (hat);
386 386 }
387 387
388 388 /*
389 389 * process has finished executing but as has not been cleaned up yet.
390 390 */
391 391 /*ARGSUSED*/
392 392 void
393 393 hat_free_start(hat_t *hat)
394 394 {
395 395 ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
396 396
397 397 /*
398 398 * If the hat is currently a stealing victim, wait for the stealing
399 399 * to finish. Once we mark it as HAT_FREEING, htable_steal()
400 400 * won't look at its pagetables anymore.
401 401 */
402 402 mutex_enter(&hat_list_lock);
403 403 while (hat->hat_flags & HAT_VICTIM)
404 404 cv_wait(&hat_list_cv, &hat_list_lock);
405 405 hat->hat_flags |= HAT_FREEING;
406 406 mutex_exit(&hat_list_lock);
407 407 }
408 408
409 409 /*
410 410 * An address space is being destroyed, so we destroy the associated hat.
411 411 */
412 412 void
413 413 hat_free_end(hat_t *hat)
414 414 {
415 415 kmem_cache_t *cache;
416 416
417 417 ASSERT(hat->hat_flags & HAT_FREEING);
418 418
419 419 /*
420 420 * must not be running on the given hat
421 421 */
422 422 ASSERT(CPU->cpu_current_hat != hat);
423 423
424 424 /*
425 425 * Remove it from the list of HATs
426 426 */
427 427 mutex_enter(&hat_list_lock);
428 428 if (hat->hat_prev)
429 429 hat->hat_prev->hat_next = hat->hat_next;
430 430 else
431 431 kas.a_hat->hat_next = hat->hat_next;
432 432 if (hat->hat_next)
433 433 hat->hat_next->hat_prev = hat->hat_prev;
434 434 else
435 435 kas.a_hat->hat_prev = hat->hat_prev;
436 436 mutex_exit(&hat_list_lock);
437 437 hat->hat_next = hat->hat_prev = NULL;
438 438
439 439 #if defined(__xpv)
440 440 /*
441 441 * On the hypervisor, unpin top level page table(s)
442 442 */
443 443 xen_unpin(hat->hat_htable->ht_pfn);
444 444 #if defined(__amd64)
445 445 xen_unpin(hat->hat_user_ptable);
446 446 #endif
447 447 #endif
448 448
449 449 /*
450 450 * Make a pass through the htables freeing them all up.
451 451 */
452 452 htable_purge_hat(hat);
453 453
454 454 /*
455 455 * Decide which kmem cache the hash table came from, then free it.
456 456 */
457 457 if (hat->hat_flags & HAT_VLP)
458 458 cache = vlp_hash_cache;
459 459 else
460 460 cache = hat_hash_cache;
461 461 kmem_cache_free(cache, hat->hat_ht_hash);
462 462 hat->hat_ht_hash = NULL;
463 463
464 464 hat->hat_flags = 0;
465 465 kmem_cache_free(hat_cache, hat);
466 466 }
467 467
468 468 /*
469 469 * round kernelbase down to a supported value to use for _userlimit
470 470 *
471 471 * userlimit must be aligned down to an entry in the top level htable.
472 472 * The one exception is for 32 bit HAT's running PAE.
473 473 */
474 474 uintptr_t
475 475 hat_kernelbase(uintptr_t va)
476 476 {
477 477 #if defined(__i386)
478 478 va &= LEVEL_MASK(1);
479 479 #endif
480 480 if (IN_VA_HOLE(va))
481 481 panic("_userlimit %p will fall in VA hole\n", (void *)va);
482 482 return (va);
483 483 }
484 484
485 485 /*
486 486 *
487 487 */
488 488 static void
489 489 set_max_page_level()
490 490 {
491 491 level_t lvl;
492 492
493 493 if (!kbm_largepage_support) {
494 494 lvl = 0;
495 495 } else {
496 496 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
497 497 lvl = 2;
498 498 if (chk_optimal_1gtlb &&
499 499 cpuid_opteron_erratum(CPU, 6671130)) {
500 500 lvl = 1;
501 501 }
502 502 if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
503 503 LEVEL_SHIFT(0))) {
504 504 lvl = 1;
505 505 }
506 506 } else {
507 507 lvl = 1;
508 508 }
509 509 }
510 510 mmu.max_page_level = lvl;
511 511
512 512 if ((lvl == 2) && (enable_1gpg == 0))
513 513 mmu.umax_page_level = 1;
514 514 else
515 515 mmu.umax_page_level = lvl;
516 516 }
517 517
518 518 /*
519 519 * Initialize hat data structures based on processor MMU information.
520 520 */
521 521 void
522 522 mmu_init(void)
523 523 {
524 524 uint_t max_htables;
525 525 uint_t pa_bits;
526 526 uint_t va_bits;
527 527 int i;
528 528
529 529 /*
530 530 * If CPU enabled the page table global bit, use it for the kernel
531 531 * This is bit 7 in CR4 (PGE - Page Global Enable).
532 532 */
533 533 if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
534 534 (getcr4() & CR4_PGE) != 0)
535 535 mmu.pt_global = PT_GLOBAL;
536 536
537 537 /*
538 538 * Detect NX and PAE usage.
539 539 */
540 540 mmu.pae_hat = kbm_pae_support;
541 541 if (kbm_nx_support)
542 542 mmu.pt_nx = PT_NX;
543 543 else
544 544 mmu.pt_nx = 0;
545 545
546 546 /*
547 547 * Use CPU info to set various MMU parameters
548 548 */
549 549 cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
550 550
551 551 if (va_bits < sizeof (void *) * NBBY) {
552 552 mmu.hole_start = (1ul << (va_bits - 1));
553 553 mmu.hole_end = 0ul - mmu.hole_start - 1;
554 554 } else {
555 555 mmu.hole_end = 0;
556 556 mmu.hole_start = mmu.hole_end - 1;
557 557 }
558 558 #if defined(OPTERON_ERRATUM_121)
559 559 /*
560 560 * If erratum 121 has already been detected at this time, hole_start
561 561 * contains the value to be subtracted from mmu.hole_start.
562 562 */
563 563 ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
564 564 hole_start = mmu.hole_start - hole_start;
565 565 #else
566 566 hole_start = mmu.hole_start;
567 567 #endif
568 568 hole_end = mmu.hole_end;
569 569
570 570 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
571 571 if (mmu.pae_hat == 0 && pa_bits > 32)
572 572 mmu.highest_pfn = PFN_4G - 1;
573 573
574 574 if (mmu.pae_hat) {
575 575 mmu.pte_size = 8; /* 8 byte PTEs */
576 576 mmu.pte_size_shift = 3;
577 577 } else {
578 578 mmu.pte_size = 4; /* 4 byte PTEs */
579 579 mmu.pte_size_shift = 2;
580 580 }
581 581
582 582 if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
583 583 panic("Processor does not support PAE");
584 584
585 585 if (!is_x86_feature(x86_featureset, X86FSET_CX8))
586 586 panic("Processor does not support cmpxchg8b instruction");
587 587
588 588 #if defined(__amd64)
589 589
590 590 mmu.num_level = 4;
591 591 mmu.max_level = 3;
592 592 mmu.ptes_per_table = 512;
593 593 mmu.top_level_count = 512;
594 594
595 595 mmu.level_shift[0] = 12;
596 596 mmu.level_shift[1] = 21;
597 597 mmu.level_shift[2] = 30;
598 598 mmu.level_shift[3] = 39;
599 599
600 600 #elif defined(__i386)
601 601
602 602 if (mmu.pae_hat) {
603 603 mmu.num_level = 3;
604 604 mmu.max_level = 2;
605 605 mmu.ptes_per_table = 512;
606 606 mmu.top_level_count = 4;
607 607
608 608 mmu.level_shift[0] = 12;
609 609 mmu.level_shift[1] = 21;
610 610 mmu.level_shift[2] = 30;
611 611
612 612 } else {
613 613 mmu.num_level = 2;
614 614 mmu.max_level = 1;
615 615 mmu.ptes_per_table = 1024;
616 616 mmu.top_level_count = 1024;
617 617
618 618 mmu.level_shift[0] = 12;
619 619 mmu.level_shift[1] = 22;
620 620 }
621 621
622 622 #endif /* __i386 */
623 623
624 624 for (i = 0; i < mmu.num_level; ++i) {
625 625 mmu.level_size[i] = 1UL << mmu.level_shift[i];
626 626 mmu.level_offset[i] = mmu.level_size[i] - 1;
627 627 mmu.level_mask[i] = ~mmu.level_offset[i];
628 628 }
629 629
630 630 set_max_page_level();
631 631
632 632 mmu_page_sizes = mmu.max_page_level + 1;
633 633 mmu_exported_page_sizes = mmu.umax_page_level + 1;
634 634
635 635 /* restrict legacy applications from using pagesizes 1g and above */
636 636 mmu_legacy_page_sizes =
637 637 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
638 638
639 639
640 640 for (i = 0; i <= mmu.max_page_level; ++i) {
641 641 mmu.pte_bits[i] = PT_VALID | pt_kern;
642 642 if (i > 0)
643 643 mmu.pte_bits[i] |= PT_PAGESIZE;
644 644 }
645 645
646 646 /*
647 647 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
648 648 */
649 649 for (i = 1; i < mmu.num_level; ++i)
650 650 mmu.ptp_bits[i] = PT_PTPBITS;
651 651
652 652 #if defined(__i386)
653 653 mmu.ptp_bits[2] = PT_VALID;
654 654 #endif
655 655
656 656 /*
657 657 * Compute how many hash table entries to have per process for htables.
658 658 * We start with 1 page's worth of entries.
659 659 *
660 660 * If physical memory is small, reduce the amount need to cover it.
661 661 */
662 662 max_htables = physmax / mmu.ptes_per_table;
663 663 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
664 664 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
665 665 mmu.hash_cnt >>= 1;
666 666 mmu.vlp_hash_cnt = mmu.hash_cnt;
667 667
668 668 #if defined(__amd64)
669 669 /*
670 670 * If running in 64 bits and physical memory is large,
671 671 * increase the size of the cache to cover all of memory for
672 672 * a 64 bit process.
673 673 */
674 674 #define HASH_MAX_LENGTH 4
675 675 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
676 676 mmu.hash_cnt <<= 1;
677 677 #endif
678 678 }
679 679
680 680
681 681 /*
682 682 * initialize hat data structures
683 683 */
684 684 void
685 685 hat_init()
686 686 {
687 687 #if defined(__i386)
688 688 /*
689 689 * _userlimit must be aligned correctly
690 690 */
691 691 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
692 692 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
693 693 (void *)_userlimit, (void *)LEVEL_SIZE(1));
694 694 halt("hat_init(): Unable to continue");
695 695 }
696 696 #endif
697 697
698 698 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
699 699
700 700 /*
701 701 * initialize kmem caches
702 702 */
703 703 htable_init();
704 704 hment_init();
705 705
706 706 hat_cache = kmem_cache_create("hat_t",
707 707 sizeof (hat_t), 0, hati_constructor, NULL, NULL,
708 708 NULL, 0, 0);
709 709
710 710 hat_hash_cache = kmem_cache_create("HatHash",
711 711 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
712 712 NULL, 0, 0);
713 713
714 714 /*
715 715 * VLP hats can use a smaller hash table size on large memroy machines
716 716 */
717 717 if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
718 718 vlp_hash_cache = hat_hash_cache;
719 719 } else {
720 720 vlp_hash_cache = kmem_cache_create("HatVlpHash",
721 721 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
722 722 NULL, 0, 0);
723 723 }
724 724
725 725 /*
726 726 * Set up the kernel's hat
727 727 */
728 728 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
729 729 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
730 730 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
731 731 kas.a_hat->hat_as = &kas;
732 732 kas.a_hat->hat_flags = 0;
733 733 AS_LOCK_EXIT(&kas, &kas.a_lock);
734 734
735 735 CPUSET_ZERO(khat_cpuset);
736 736 CPUSET_ADD(khat_cpuset, CPU->cpu_id);
737 737
738 738 /*
739 739 * The kernel hat's next pointer serves as the head of the hat list .
740 740 * The kernel hat's prev pointer tracks the last hat on the list for
741 741 * htable_steal() to use.
742 742 */
743 743 kas.a_hat->hat_next = NULL;
744 744 kas.a_hat->hat_prev = NULL;
745 745
746 746 /*
747 747 * Allocate an htable hash bucket for the kernel
748 748 * XX64 - tune for 64 bit procs
749 749 */
750 750 kas.a_hat->hat_num_hash = mmu.hash_cnt;
751 751 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
752 752 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
753 753
754 754 /*
755 755 * zero out the top level and cached htable pointers
756 756 */
757 757 kas.a_hat->hat_ht_cached = NULL;
758 758 kas.a_hat->hat_htable = NULL;
759 759
760 760 /*
761 761 * Pre-allocate hrm_hashtab before enabling the collection of
762 762 * refmod statistics. Allocating on the fly would mean us
763 763 * running the risk of suffering recursive mutex enters or
764 764 * deadlocks.
765 765 */
766 766 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
767 767 KM_SLEEP);
768 768 }
769 769
770 770 /*
771 771 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
772 772 *
773 773 * Each CPU has a set of 2 pagetables that are reused for any 32 bit
774 774 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
775 775 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
776 776 */
777 777 /*ARGSUSED*/
778 778 static void
779 779 hat_vlp_setup(struct cpu *cpu)
780 780 {
781 781 #if defined(__amd64) && !defined(__xpv)
782 782 struct hat_cpu_info *hci = cpu->cpu_hat_info;
783 783 pfn_t pfn;
784 784
785 785 /*
786 786 * allocate the level==2 page table for the bottom most
787 787 * 512Gig of address space (this is where 32 bit apps live)
788 788 */
789 789 ASSERT(hci != NULL);
790 790 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
791 791
792 792 /*
793 793 * Allocate a top level pagetable and copy the kernel's
794 794 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
795 795 */
796 796 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
797 797 hci->hci_vlp_pfn =
798 798 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
799 799 ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
800 800 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
801 801
802 802 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
803 803 ASSERT(pfn != PFN_INVALID);
804 804 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
805 805 #endif /* __amd64 && !__xpv */
806 806 }
807 807
808 808 /*ARGSUSED*/
809 809 static void
810 810 hat_vlp_teardown(cpu_t *cpu)
811 811 {
812 812 #if defined(__amd64) && !defined(__xpv)
813 813 struct hat_cpu_info *hci;
814 814
815 815 if ((hci = cpu->cpu_hat_info) == NULL)
816 816 return;
817 817 if (hci->hci_vlp_l2ptes)
818 818 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
819 819 if (hci->hci_vlp_l3ptes)
820 820 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
821 821 #endif
822 822 }
823 823
824 824 #define NEXT_HKR(r, l, s, e) { \
825 825 kernel_ranges[r].hkr_level = l; \
826 826 kernel_ranges[r].hkr_start_va = s; \
827 827 kernel_ranges[r].hkr_end_va = e; \
828 828 ++r; \
829 829 }
830 830
831 831 /*
832 832 * Finish filling in the kernel hat.
833 833 * Pre fill in all top level kernel page table entries for the kernel's
834 834 * part of the address range. From this point on we can't use any new
835 835 * kernel large pages if they need PTE's at max_level
836 836 *
837 837 * create the kmap mappings.
838 838 */
839 839 void
840 840 hat_init_finish(void)
841 841 {
842 842 size_t size;
843 843 uint_t r = 0;
844 844 uintptr_t va;
845 845 hat_kernel_range_t *rp;
846 846
847 847
848 848 /*
849 849 * We are now effectively running on the kernel hat.
850 850 * Clearing use_boot_reserve shuts off using the pre-allocated boot
851 851 * reserve for all HAT allocations. From here on, the reserves are
852 852 * only used when avoiding recursion in kmem_alloc().
853 853 */
854 854 use_boot_reserve = 0;
855 855 htable_adjust_reserve();
856 856
857 857 /*
858 858 * User HATs are initialized with copies of all kernel mappings in
859 859 * higher level page tables. Ensure that those entries exist.
860 860 */
861 861 #if defined(__amd64)
862 862
863 863 NEXT_HKR(r, 3, kernelbase, 0);
864 864 #if defined(__xpv)
865 865 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
866 866 #endif
867 867
868 868 #elif defined(__i386)
869 869
870 870 #if !defined(__xpv)
871 871 if (mmu.pae_hat) {
872 872 va = kernelbase;
873 873 if ((va & LEVEL_MASK(2)) != va) {
874 874 va = P2ROUNDUP(va, LEVEL_SIZE(2));
875 875 NEXT_HKR(r, 1, kernelbase, va);
876 876 }
877 877 if (va != 0)
878 878 NEXT_HKR(r, 2, va, 0);
879 879 } else
880 880 #endif /* __xpv */
881 881 NEXT_HKR(r, 1, kernelbase, 0);
882 882
883 883 #endif /* __i386 */
884 884
885 885 num_kernel_ranges = r;
886 886
887 887 /*
888 888 * Create all the kernel pagetables that will have entries
889 889 * shared to user HATs.
890 890 */
891 891 for (r = 0; r < num_kernel_ranges; ++r) {
892 892 rp = &kernel_ranges[r];
893 893 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
894 894 va += LEVEL_SIZE(rp->hkr_level)) {
895 895 htable_t *ht;
896 896
897 897 if (IN_HYPERVISOR_VA(va))
898 898 continue;
899 899
900 900 /* can/must skip if a page mapping already exists */
901 901 if (rp->hkr_level <= mmu.max_page_level &&
902 902 (ht = htable_getpage(kas.a_hat, va, NULL)) !=
903 903 NULL) {
904 904 htable_release(ht);
905 905 continue;
906 906 }
907 907
908 908 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
909 909 NULL);
910 910 }
911 911 }
912 912
913 913 /*
914 914 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
915 915 * page holding the top level pagetable. We use the remainder for
916 916 * the "per CPU" page tables for VLP processes.
917 917 * Map the top level kernel pagetable into the kernel to make
918 918 * it easy to use bcopy access these tables.
919 919 */
920 920 if (mmu.pae_hat) {
921 921 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
922 922 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
923 923 kas.a_hat->hat_htable->ht_pfn,
924 924 #if !defined(__xpv)
925 925 PROT_WRITE |
926 926 #endif
927 927 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
928 928 HAT_LOAD | HAT_LOAD_NOCONSIST);
929 929 }
930 930 hat_vlp_setup(CPU);
931 931
932 932 /*
933 933 * Create kmap (cached mappings of kernel PTEs)
934 934 * for 32 bit we map from segmap_start .. ekernelheap
935 935 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
936 936 */
937 937 #if defined(__i386)
938 938 size = (uintptr_t)ekernelheap - segmap_start;
939 939 #elif defined(__amd64)
940 940 size = segmapsize;
941 941 #endif
942 942 hat_kmap_init((uintptr_t)segmap_start, size);
943 943 }
944 944
945 945 /*
946 946 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
947 947 * are 32 bit, so for safety we must use atomic_cas_64() to install these.
948 948 */
949 949 #ifdef __i386
950 950 static void
951 951 reload_pae32(hat_t *hat, cpu_t *cpu)
952 952 {
953 953 x86pte_t *src;
954 954 x86pte_t *dest;
955 955 x86pte_t pte;
956 956 int i;
957 957
958 958 /*
959 959 * Load the 4 entries of the level 2 page table into this
960 960 * cpu's range of the vlp_page and point cr3 at them.
961 961 */
962 962 ASSERT(mmu.pae_hat);
963 963 src = hat->hat_vlp_ptes;
964 964 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
965 965 for (i = 0; i < VLP_NUM_PTES; ++i) {
966 966 for (;;) {
967 967 pte = dest[i];
968 968 if (pte == src[i])
969 969 break;
970 970 if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
971 971 break;
972 972 }
973 973 }
974 974 }
975 975 #endif
976 976
977 977 /*
978 978 * Switch to a new active hat, maintaining bit masks to track active CPUs.
979 979 *
980 980 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
981 981 * remains a 32-bit value.
982 982 */
983 983 void
984 984 hat_switch(hat_t *hat)
985 985 {
986 986 uint64_t newcr3;
987 987 cpu_t *cpu = CPU;
988 988 hat_t *old = cpu->cpu_current_hat;
989 989
990 990 /*
991 991 * set up this information first, so we don't miss any cross calls
992 992 */
993 993 if (old != NULL) {
994 994 if (old == hat)
995 995 return;
996 996 if (old != kas.a_hat)
997 997 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
998 998 }
999 999
1000 1000 /*
1001 1001 * Add this CPU to the active set for this HAT.
1002 1002 */
1003 1003 if (hat != kas.a_hat) {
1004 1004 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1005 1005 }
1006 1006 cpu->cpu_current_hat = hat;
1007 1007
1008 1008 /*
1009 1009 * now go ahead and load cr3
1010 1010 */
1011 1011 if (hat->hat_flags & HAT_VLP) {
1012 1012 #if defined(__amd64)
1013 1013 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1014 1014
1015 1015 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1016 1016 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1017 1017 #elif defined(__i386)
1018 1018 reload_pae32(hat, cpu);
1019 1019 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1020 1020 (cpu->cpu_id + 1) * VLP_SIZE;
1021 1021 #endif
1022 1022 } else {
1023 1023 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1024 1024 }
1025 1025 #ifdef __xpv
1026 1026 {
1027 1027 struct mmuext_op t[2];
1028 1028 uint_t retcnt;
1029 1029 uint_t opcnt = 1;
1030 1030
1031 1031 t[0].cmd = MMUEXT_NEW_BASEPTR;
1032 1032 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1033 1033 #if defined(__amd64)
1034 1034 /*
1035 1035 * There's an interesting problem here, as to what to
1036 1036 * actually specify when switching to the kernel hat.
1037 1037 * For now we'll reuse the kernel hat again.
1038 1038 */
1039 1039 t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1040 1040 if (hat == kas.a_hat)
1041 1041 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1042 1042 else
1043 1043 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1044 1044 ++opcnt;
1045 1045 #endif /* __amd64 */
1046 1046 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1047 1047 panic("HYPERVISOR_mmu_update() failed");
1048 1048 ASSERT(retcnt == opcnt);
1049 1049
1050 1050 }
1051 1051 #else
1052 1052 setcr3(newcr3);
1053 1053 #endif
1054 1054 ASSERT(cpu == CPU);
1055 1055 }
1056 1056
1057 1057 /*
1058 1058 * Utility to return a valid x86pte_t from protections, pfn, and level number
1059 1059 */
1060 1060 static x86pte_t
1061 1061 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1062 1062 {
1063 1063 x86pte_t pte;
1064 1064 uint_t cache_attr = attr & HAT_ORDER_MASK;
1065 1065
1066 1066 pte = MAKEPTE(pfn, level);
1067 1067
1068 1068 if (attr & PROT_WRITE)
1069 1069 PTE_SET(pte, PT_WRITABLE);
1070 1070
1071 1071 if (attr & PROT_USER)
1072 1072 PTE_SET(pte, PT_USER);
1073 1073
1074 1074 if (!(attr & PROT_EXEC))
1075 1075 PTE_SET(pte, mmu.pt_nx);
1076 1076
1077 1077 /*
1078 1078 * Set the software bits used track ref/mod sync's and hments.
1079 1079 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1080 1080 */
1081 1081 if (flags & HAT_LOAD_NOCONSIST)
1082 1082 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1083 1083 else if (attr & HAT_NOSYNC)
1084 1084 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1085 1085
1086 1086 /*
1087 1087 * Set the caching attributes in the PTE. The combination
1088 1088 * of attributes are poorly defined, so we pay attention
1089 1089 * to them in the given order.
1090 1090 *
1091 1091 * The test for HAT_STRICTORDER is different because it's defined
1092 1092 * as "0" - which was a stupid thing to do, but is too late to change!
1093 1093 */
1094 1094 if (cache_attr == HAT_STRICTORDER) {
1095 1095 PTE_SET(pte, PT_NOCACHE);
1096 1096 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1097 1097 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1098 1098 /* nothing to set */;
1099 1099 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1100 1100 PTE_SET(pte, PT_NOCACHE);
1101 1101 if (is_x86_feature(x86_featureset, X86FSET_PAT))
1102 1102 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1103 1103 else
1104 1104 PTE_SET(pte, PT_WRITETHRU);
1105 1105 } else {
1106 1106 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1107 1107 }
1108 1108
1109 1109 return (pte);
1110 1110 }
1111 1111
1112 1112 /*
1113 1113 * Duplicate address translations of the parent to the child.
1114 1114 * This function really isn't used anymore.
1115 1115 */
1116 1116 /*ARGSUSED*/
1117 1117 int
1118 1118 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1119 1119 {
1120 1120 ASSERT((uintptr_t)addr < kernelbase);
1121 1121 ASSERT(new != kas.a_hat);
1122 1122 ASSERT(old != kas.a_hat);
1123 1123 return (0);
1124 1124 }
1125 1125
1126 1126 /*
1127 1127 * Allocate any hat resources required for a process being swapped in.
1128 1128 */
1129 1129 /*ARGSUSED*/
1130 1130 void
1131 1131 hat_swapin(hat_t *hat)
1132 1132 {
1133 1133 /* do nothing - we let everything fault back in */
1134 1134 }
1135 1135
1136 1136 /*
1137 1137 * Unload all translations associated with an address space of a process
1138 1138 * that is being swapped out.
1139 1139 */
1140 1140 void
1141 1141 hat_swapout(hat_t *hat)
1142 1142 {
1143 1143 uintptr_t vaddr = (uintptr_t)0;
1144 1144 uintptr_t eaddr = _userlimit;
1145 1145 htable_t *ht = NULL;
1146 1146 level_t l;
1147 1147
1148 1148 XPV_DISALLOW_MIGRATE();
1149 1149 /*
1150 1150 * We can't just call hat_unload(hat, 0, _userlimit...) here, because
1151 1151 * seg_spt and shared pagetables can't be swapped out.
1152 1152 * Take a look at segspt_shmswapout() - it's a big no-op.
1153 1153 *
1154 1154 * Instead we'll walk through all the address space and unload
1155 1155 * any mappings which we are sure are not shared, not locked.
1156 1156 */
1157 1157 ASSERT(IS_PAGEALIGNED(vaddr));
1158 1158 ASSERT(IS_PAGEALIGNED(eaddr));
1159 1159 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1160 1160 if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1161 1161 eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1162 1162
1163 1163 while (vaddr < eaddr) {
1164 1164 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1165 1165 if (ht == NULL)
1166 1166 break;
1167 1167
1168 1168 ASSERT(!IN_VA_HOLE(vaddr));
1169 1169
1170 1170 /*
1171 1171 * If the page table is shared skip its entire range.
1172 1172 */
1173 1173 l = ht->ht_level;
1174 1174 if (ht->ht_flags & HTABLE_SHARED_PFN) {
1175 1175 vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1176 1176 htable_release(ht);
1177 1177 ht = NULL;
1178 1178 continue;
1179 1179 }
1180 1180
1181 1181 /*
1182 1182 * If the page table has no locked entries, unload this one.
1183 1183 */
1184 1184 if (ht->ht_lock_cnt == 0)
1185 1185 hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1186 1186 HAT_UNLOAD_UNMAP);
1187 1187
1188 1188 /*
1189 1189 * If we have a level 0 page table with locked entries,
1190 1190 * skip the entire page table, otherwise skip just one entry.
1191 1191 */
1192 1192 if (ht->ht_lock_cnt > 0 && l == 0)
1193 1193 vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1194 1194 else
1195 1195 vaddr += LEVEL_SIZE(l);
1196 1196 }
1197 1197 if (ht)
1198 1198 htable_release(ht);
1199 1199
1200 1200 /*
1201 1201 * We're in swapout because the system is low on memory, so
1202 1202 * go back and flush all the htables off the cached list.
1203 1203 */
1204 1204 htable_purge_hat(hat);
1205 1205 XPV_ALLOW_MIGRATE();
1206 1206 }
1207 1207
1208 1208 /*
1209 1209 * returns number of bytes that have valid mappings in hat.
1210 1210 */
1211 1211 size_t
1212 1212 hat_get_mapped_size(hat_t *hat)
1213 1213 {
1214 1214 size_t total = 0;
1215 1215 int l;
1216 1216
1217 1217 for (l = 0; l <= mmu.max_page_level; l++)
1218 1218 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1219 1219 total += hat->hat_ism_pgcnt;
↓ open down ↓ |
1219 lines elided |
↑ open up ↑ |
1220 1220
1221 1221 return (total);
1222 1222 }
1223 1223
1224 1224 /*
1225 1225 * enable/disable collection of stats for hat.
1226 1226 */
1227 1227 int
1228 1228 hat_stats_enable(hat_t *hat)
1229 1229 {
1230 - atomic_add_32(&hat->hat_stats, 1);
1230 + atomic_inc_32(&hat->hat_stats);
1231 1231 return (1);
1232 1232 }
1233 1233
1234 1234 void
1235 1235 hat_stats_disable(hat_t *hat)
1236 1236 {
1237 - atomic_add_32(&hat->hat_stats, -1);
1237 + atomic_dec_32(&hat->hat_stats);
1238 1238 }
1239 1239
1240 1240 /*
1241 1241 * Utility to sync the ref/mod bits from a page table entry to the page_t
1242 1242 * We must be holding the mapping list lock when this is called.
1243 1243 */
1244 1244 static void
1245 1245 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1246 1246 {
1247 1247 uint_t rm = 0;
1248 1248 pgcnt_t pgcnt;
1249 1249
1250 1250 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1251 1251 return;
1252 1252
1253 1253 if (PTE_GET(pte, PT_REF))
1254 1254 rm |= P_REF;
1255 1255
1256 1256 if (PTE_GET(pte, PT_MOD))
1257 1257 rm |= P_MOD;
1258 1258
1259 1259 if (rm == 0)
1260 1260 return;
1261 1261
1262 1262 /*
1263 1263 * sync to all constituent pages of a large page
1264 1264 */
1265 1265 ASSERT(x86_hm_held(pp));
1266 1266 pgcnt = page_get_pagecnt(level);
1267 1267 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1268 1268 for (; pgcnt > 0; --pgcnt) {
1269 1269 /*
1270 1270 * hat_page_demote() can't decrease
1271 1271 * pszc below this mapping size
1272 1272 * since this large mapping existed after we
1273 1273 * took mlist lock.
1274 1274 */
1275 1275 ASSERT(pp->p_szc >= level);
1276 1276 hat_page_setattr(pp, rm);
1277 1277 ++pp;
1278 1278 }
1279 1279 }
1280 1280
1281 1281 /*
1282 1282 * This the set of PTE bits for PFN, permissions and caching
1283 1283 * that are allowed to change on a HAT_LOAD_REMAP
1284 1284 */
1285 1285 #define PT_REMAP_BITS \
1286 1286 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \
1287 1287 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1288 1288
1289 1289 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1290 1290 /*
1291 1291 * Do the low-level work to get a mapping entered into a HAT's pagetables
1292 1292 * and in the mapping list of the associated page_t.
1293 1293 */
1294 1294 static int
1295 1295 hati_pte_map(
1296 1296 htable_t *ht,
1297 1297 uint_t entry,
1298 1298 page_t *pp,
1299 1299 x86pte_t pte,
1300 1300 int flags,
1301 1301 void *pte_ptr)
1302 1302 {
1303 1303 hat_t *hat = ht->ht_hat;
1304 1304 x86pte_t old_pte;
1305 1305 level_t l = ht->ht_level;
1306 1306 hment_t *hm;
1307 1307 uint_t is_consist;
1308 1308 uint_t is_locked;
1309 1309 int rv = 0;
1310 1310
1311 1311 /*
1312 1312 * Is this a consistent (ie. need mapping list lock) mapping?
1313 1313 */
1314 1314 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1315 1315
1316 1316 /*
1317 1317 * Track locked mapping count in the htable. Do this first,
1318 1318 * as we track locking even if there already is a mapping present.
1319 1319 */
1320 1320 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1321 1321 if (is_locked)
1322 1322 HTABLE_LOCK_INC(ht);
1323 1323
1324 1324 /*
1325 1325 * Acquire the page's mapping list lock and get an hment to use.
1326 1326 * Note that hment_prepare() might return NULL.
1327 1327 */
1328 1328 if (is_consist) {
1329 1329 x86_hm_enter(pp);
1330 1330 hm = hment_prepare(ht, entry, pp);
1331 1331 }
1332 1332
1333 1333 /*
1334 1334 * Set the new pte, retrieving the old one at the same time.
1335 1335 */
1336 1336 old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1337 1337
1338 1338 /*
1339 1339 * Did we get a large page / page table collision?
1340 1340 */
1341 1341 if (old_pte == LPAGE_ERROR) {
1342 1342 if (is_locked)
1343 1343 HTABLE_LOCK_DEC(ht);
1344 1344 rv = -1;
1345 1345 goto done;
1346 1346 }
1347 1347
1348 1348 /*
1349 1349 * If the mapping didn't change there is nothing more to do.
1350 1350 */
1351 1351 if (PTE_EQUIV(pte, old_pte))
1352 1352 goto done;
1353 1353
1354 1354 /*
1355 1355 * Install a new mapping in the page's mapping list
1356 1356 */
1357 1357 if (!PTE_ISVALID(old_pte)) {
1358 1358 if (is_consist) {
1359 1359 hment_assign(ht, entry, pp, hm);
1360 1360 x86_hm_exit(pp);
1361 1361 } else {
1362 1362 ASSERT(flags & HAT_LOAD_NOCONSIST);
1363 1363 }
1364 1364 #if defined(__amd64)
1365 1365 if (ht->ht_flags & HTABLE_VLP) {
1366 1366 cpu_t *cpu = CPU;
1367 1367 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1368 1368 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1369 1369 }
1370 1370 #endif
1371 1371 HTABLE_INC(ht->ht_valid_cnt);
1372 1372 PGCNT_INC(hat, l);
1373 1373 return (rv);
1374 1374 }
1375 1375
1376 1376 /*
1377 1377 * Remap's are more complicated:
1378 1378 * - HAT_LOAD_REMAP must be specified if changing the pfn.
1379 1379 * We also require that NOCONSIST be specified.
1380 1380 * - Otherwise only permission or caching bits may change.
1381 1381 */
1382 1382 if (!PTE_ISPAGE(old_pte, l))
1383 1383 panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1384 1384
1385 1385 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1386 1386 REMAPASSERT(flags & HAT_LOAD_REMAP);
1387 1387 REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1388 1388 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1389 1389 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1390 1390 pf_is_memory(PTE2PFN(pte, l)));
1391 1391 REMAPASSERT(!is_consist);
1392 1392 }
1393 1393
1394 1394 /*
1395 1395 * We only let remaps change the certain bits in the PTE.
1396 1396 */
1397 1397 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1398 1398 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1399 1399 old_pte, pte);
1400 1400
1401 1401 /*
1402 1402 * We don't create any mapping list entries on a remap, so release
1403 1403 * any allocated hment after we drop the mapping list lock.
1404 1404 */
1405 1405 done:
1406 1406 if (is_consist) {
1407 1407 x86_hm_exit(pp);
1408 1408 if (hm != NULL)
1409 1409 hment_free(hm);
1410 1410 }
1411 1411 return (rv);
1412 1412 }
1413 1413
1414 1414 /*
1415 1415 * Internal routine to load a single page table entry. This only fails if
1416 1416 * we attempt to overwrite a page table link with a large page.
1417 1417 */
1418 1418 static int
1419 1419 hati_load_common(
1420 1420 hat_t *hat,
1421 1421 uintptr_t va,
1422 1422 page_t *pp,
1423 1423 uint_t attr,
1424 1424 uint_t flags,
1425 1425 level_t level,
1426 1426 pfn_t pfn)
1427 1427 {
1428 1428 htable_t *ht;
1429 1429 uint_t entry;
1430 1430 x86pte_t pte;
1431 1431 int rv = 0;
1432 1432
1433 1433 /*
1434 1434 * The number 16 is arbitrary and here to catch a recursion problem
1435 1435 * early before we blow out the kernel stack.
1436 1436 */
1437 1437 ++curthread->t_hatdepth;
1438 1438 ASSERT(curthread->t_hatdepth < 16);
1439 1439
1440 1440 ASSERT(hat == kas.a_hat ||
1441 1441 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1442 1442
1443 1443 if (flags & HAT_LOAD_SHARE)
1444 1444 hat->hat_flags |= HAT_SHARED;
1445 1445
1446 1446 /*
1447 1447 * Find the page table that maps this page if it already exists.
1448 1448 */
1449 1449 ht = htable_lookup(hat, va, level);
1450 1450
1451 1451 /*
1452 1452 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1453 1453 */
1454 1454 if (pp == NULL)
1455 1455 flags |= HAT_LOAD_NOCONSIST;
1456 1456
1457 1457 if (ht == NULL) {
1458 1458 ht = htable_create(hat, va, level, NULL);
1459 1459 ASSERT(ht != NULL);
1460 1460 }
1461 1461 entry = htable_va2entry(va, ht);
1462 1462
1463 1463 /*
1464 1464 * a bunch of paranoid error checking
1465 1465 */
1466 1466 ASSERT(ht->ht_busy > 0);
1467 1467 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1468 1468 panic("hati_load_common: bad htable %p, va %p",
1469 1469 (void *)ht, (void *)va);
1470 1470 ASSERT(ht->ht_level == level);
1471 1471
1472 1472 /*
1473 1473 * construct the new PTE
1474 1474 */
1475 1475 if (hat == kas.a_hat)
1476 1476 attr &= ~PROT_USER;
1477 1477 pte = hati_mkpte(pfn, attr, level, flags);
1478 1478 if (hat == kas.a_hat && va >= kernelbase)
1479 1479 PTE_SET(pte, mmu.pt_global);
1480 1480
1481 1481 /*
1482 1482 * establish the mapping
1483 1483 */
1484 1484 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1485 1485
1486 1486 /*
1487 1487 * release the htable and any reserves
1488 1488 */
1489 1489 htable_release(ht);
1490 1490 --curthread->t_hatdepth;
1491 1491 return (rv);
1492 1492 }
1493 1493
1494 1494 /*
1495 1495 * special case of hat_memload to deal with some kernel addrs for performance
1496 1496 */
1497 1497 static void
1498 1498 hat_kmap_load(
1499 1499 caddr_t addr,
1500 1500 page_t *pp,
1501 1501 uint_t attr,
1502 1502 uint_t flags)
1503 1503 {
1504 1504 uintptr_t va = (uintptr_t)addr;
1505 1505 x86pte_t pte;
1506 1506 pfn_t pfn = page_pptonum(pp);
1507 1507 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr);
1508 1508 htable_t *ht;
1509 1509 uint_t entry;
1510 1510 void *pte_ptr;
1511 1511
1512 1512 /*
1513 1513 * construct the requested PTE
1514 1514 */
1515 1515 attr &= ~PROT_USER;
1516 1516 attr |= HAT_STORECACHING_OK;
1517 1517 pte = hati_mkpte(pfn, attr, 0, flags);
1518 1518 PTE_SET(pte, mmu.pt_global);
1519 1519
1520 1520 /*
1521 1521 * Figure out the pte_ptr and htable and use common code to finish up
1522 1522 */
1523 1523 if (mmu.pae_hat)
1524 1524 pte_ptr = mmu.kmap_ptes + pg_off;
1525 1525 else
1526 1526 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1527 1527 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1528 1528 LEVEL_SHIFT(1)];
1529 1529 entry = htable_va2entry(va, ht);
1530 1530 ++curthread->t_hatdepth;
1531 1531 ASSERT(curthread->t_hatdepth < 16);
1532 1532 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1533 1533 --curthread->t_hatdepth;
1534 1534 }
1535 1535
1536 1536 /*
1537 1537 * hat_memload() - load a translation to the given page struct
1538 1538 *
1539 1539 * Flags for hat_memload/hat_devload/hat_*attr.
1540 1540 *
1541 1541 * HAT_LOAD Default flags to load a translation to the page.
1542 1542 *
1543 1543 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(),
1544 1544 * and hat_devload().
1545 1545 *
1546 1546 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1547 1547 * sets PT_NOCONSIST
1548 1548 *
1549 1549 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables
1550 1550 * that map some user pages (not kas) is shared by more
1551 1551 * than one process (eg. ISM).
1552 1552 *
1553 1553 * HAT_LOAD_REMAP Reload a valid pte with a different page frame.
1554 1554 *
1555 1555 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this
1556 1556 * point, it's setting up mapping to allocate internal
1557 1557 * hat layer data structures. This flag forces hat layer
1558 1558 * to tap its reserves in order to prevent infinite
1559 1559 * recursion.
1560 1560 *
1561 1561 * The following is a protection attribute (like PROT_READ, etc.)
1562 1562 *
1563 1563 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits
1564 1564 * are never cleared.
1565 1565 *
1566 1566 * Installing new valid PTE's and creation of the mapping list
1567 1567 * entry are controlled under the same lock. It's derived from the
1568 1568 * page_t being mapped.
1569 1569 */
1570 1570 static uint_t supported_memload_flags =
1571 1571 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1572 1572 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1573 1573
1574 1574 void
1575 1575 hat_memload(
1576 1576 hat_t *hat,
1577 1577 caddr_t addr,
1578 1578 page_t *pp,
1579 1579 uint_t attr,
1580 1580 uint_t flags)
1581 1581 {
1582 1582 uintptr_t va = (uintptr_t)addr;
1583 1583 level_t level = 0;
1584 1584 pfn_t pfn = page_pptonum(pp);
1585 1585
1586 1586 XPV_DISALLOW_MIGRATE();
1587 1587 ASSERT(IS_PAGEALIGNED(va));
1588 1588 ASSERT(hat == kas.a_hat || va < _userlimit);
1589 1589 ASSERT(hat == kas.a_hat ||
1590 1590 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1591 1591 ASSERT((flags & supported_memload_flags) == flags);
1592 1592
1593 1593 ASSERT(!IN_VA_HOLE(va));
1594 1594 ASSERT(!PP_ISFREE(pp));
1595 1595
1596 1596 /*
1597 1597 * kernel address special case for performance.
1598 1598 */
1599 1599 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1600 1600 ASSERT(hat == kas.a_hat);
1601 1601 hat_kmap_load(addr, pp, attr, flags);
1602 1602 XPV_ALLOW_MIGRATE();
1603 1603 return;
1604 1604 }
1605 1605
1606 1606 /*
1607 1607 * This is used for memory with normal caching enabled, so
1608 1608 * always set HAT_STORECACHING_OK.
1609 1609 */
1610 1610 attr |= HAT_STORECACHING_OK;
1611 1611 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1612 1612 panic("unexpected hati_load_common() failure");
1613 1613 XPV_ALLOW_MIGRATE();
1614 1614 }
1615 1615
1616 1616 /* ARGSUSED */
1617 1617 void
1618 1618 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1619 1619 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1620 1620 {
1621 1621 hat_memload(hat, addr, pp, attr, flags);
1622 1622 }
1623 1623
1624 1624 /*
1625 1625 * Load the given array of page structs using large pages when possible
1626 1626 */
1627 1627 void
1628 1628 hat_memload_array(
1629 1629 hat_t *hat,
1630 1630 caddr_t addr,
1631 1631 size_t len,
1632 1632 page_t **pages,
1633 1633 uint_t attr,
1634 1634 uint_t flags)
1635 1635 {
1636 1636 uintptr_t va = (uintptr_t)addr;
1637 1637 uintptr_t eaddr = va + len;
1638 1638 level_t level;
1639 1639 size_t pgsize;
1640 1640 pgcnt_t pgindx = 0;
1641 1641 pfn_t pfn;
1642 1642 pgcnt_t i;
1643 1643
1644 1644 XPV_DISALLOW_MIGRATE();
1645 1645 ASSERT(IS_PAGEALIGNED(va));
1646 1646 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1647 1647 ASSERT(hat == kas.a_hat ||
1648 1648 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1649 1649 ASSERT((flags & supported_memload_flags) == flags);
1650 1650
1651 1651 /*
1652 1652 * memload is used for memory with full caching enabled, so
1653 1653 * set HAT_STORECACHING_OK.
1654 1654 */
1655 1655 attr |= HAT_STORECACHING_OK;
1656 1656
1657 1657 /*
1658 1658 * handle all pages using largest possible pagesize
1659 1659 */
1660 1660 while (va < eaddr) {
1661 1661 /*
1662 1662 * decide what level mapping to use (ie. pagesize)
1663 1663 */
1664 1664 pfn = page_pptonum(pages[pgindx]);
1665 1665 for (level = mmu.max_page_level; ; --level) {
1666 1666 pgsize = LEVEL_SIZE(level);
1667 1667 if (level == 0)
1668 1668 break;
1669 1669
1670 1670 if (!IS_P2ALIGNED(va, pgsize) ||
1671 1671 (eaddr - va) < pgsize ||
1672 1672 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1673 1673 continue;
1674 1674
1675 1675 /*
1676 1676 * To use a large mapping of this size, all the
1677 1677 * pages we are passed must be sequential subpages
1678 1678 * of the large page.
1679 1679 * hat_page_demote() can't change p_szc because
1680 1680 * all pages are locked.
1681 1681 */
1682 1682 if (pages[pgindx]->p_szc >= level) {
1683 1683 for (i = 0; i < mmu_btop(pgsize); ++i) {
1684 1684 if (pfn + i !=
1685 1685 page_pptonum(pages[pgindx + i]))
1686 1686 break;
1687 1687 ASSERT(pages[pgindx + i]->p_szc >=
1688 1688 level);
1689 1689 ASSERT(pages[pgindx] + i ==
1690 1690 pages[pgindx + i]);
1691 1691 }
1692 1692 if (i == mmu_btop(pgsize)) {
1693 1693 #ifdef DEBUG
1694 1694 if (level == 2)
1695 1695 map1gcnt++;
1696 1696 #endif
1697 1697 break;
1698 1698 }
1699 1699 }
1700 1700 }
1701 1701
1702 1702 /*
1703 1703 * Load this page mapping. If the load fails, try a smaller
1704 1704 * pagesize.
1705 1705 */
1706 1706 ASSERT(!IN_VA_HOLE(va));
1707 1707 while (hati_load_common(hat, va, pages[pgindx], attr,
1708 1708 flags, level, pfn) != 0) {
1709 1709 if (level == 0)
1710 1710 panic("unexpected hati_load_common() failure");
1711 1711 --level;
1712 1712 pgsize = LEVEL_SIZE(level);
1713 1713 }
1714 1714
1715 1715 /*
1716 1716 * move to next page
1717 1717 */
1718 1718 va += pgsize;
1719 1719 pgindx += mmu_btop(pgsize);
1720 1720 }
1721 1721 XPV_ALLOW_MIGRATE();
1722 1722 }
1723 1723
1724 1724 /* ARGSUSED */
1725 1725 void
1726 1726 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1727 1727 struct page **pps, uint_t attr, uint_t flags,
1728 1728 hat_region_cookie_t rcookie)
1729 1729 {
1730 1730 hat_memload_array(hat, addr, len, pps, attr, flags);
1731 1731 }
1732 1732
1733 1733 /*
1734 1734 * void hat_devload(hat, addr, len, pf, attr, flags)
1735 1735 * load/lock the given page frame number
1736 1736 *
1737 1737 * Advisory ordering attributes. Apply only to device mappings.
1738 1738 *
1739 1739 * HAT_STRICTORDER: the CPU must issue the references in order, as the
1740 1740 * programmer specified. This is the default.
1741 1741 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1742 1742 * of reordering; store or load with store or load).
1743 1743 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1744 1744 * to consecutive locations (for example, turn two consecutive byte
1745 1745 * stores into one halfword store), and it may batch individual loads
1746 1746 * (for example, turn two consecutive byte loads into one halfword load).
1747 1747 * This also implies re-ordering.
1748 1748 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1749 1749 * until another store occurs. The default is to fetch new data
1750 1750 * on every load. This also implies merging.
1751 1751 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1752 1752 * the device (perhaps with other data) at a later time. The default is
1753 1753 * to push the data right away. This also implies load caching.
1754 1754 *
1755 1755 * Equivalent of hat_memload(), but can be used for device memory where
1756 1756 * there are no page_t's and we support additional flags (write merging, etc).
1757 1757 * Note that we can have large page mappings with this interface.
1758 1758 */
1759 1759 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1760 1760 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1761 1761 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1762 1762
1763 1763 void
1764 1764 hat_devload(
1765 1765 hat_t *hat,
1766 1766 caddr_t addr,
1767 1767 size_t len,
1768 1768 pfn_t pfn,
1769 1769 uint_t attr,
1770 1770 int flags)
1771 1771 {
1772 1772 uintptr_t va = ALIGN2PAGE(addr);
1773 1773 uintptr_t eva = va + len;
1774 1774 level_t level;
1775 1775 size_t pgsize;
1776 1776 page_t *pp;
1777 1777 int f; /* per PTE copy of flags - maybe modified */
1778 1778 uint_t a; /* per PTE copy of attr */
1779 1779
1780 1780 XPV_DISALLOW_MIGRATE();
1781 1781 ASSERT(IS_PAGEALIGNED(va));
1782 1782 ASSERT(hat == kas.a_hat || eva <= _userlimit);
1783 1783 ASSERT(hat == kas.a_hat ||
1784 1784 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1785 1785 ASSERT((flags & supported_devload_flags) == flags);
1786 1786
1787 1787 /*
1788 1788 * handle all pages
1789 1789 */
1790 1790 while (va < eva) {
1791 1791
1792 1792 /*
1793 1793 * decide what level mapping to use (ie. pagesize)
1794 1794 */
1795 1795 for (level = mmu.max_page_level; ; --level) {
1796 1796 pgsize = LEVEL_SIZE(level);
1797 1797 if (level == 0)
1798 1798 break;
1799 1799 if (IS_P2ALIGNED(va, pgsize) &&
1800 1800 (eva - va) >= pgsize &&
1801 1801 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1802 1802 #ifdef DEBUG
1803 1803 if (level == 2)
1804 1804 map1gcnt++;
1805 1805 #endif
1806 1806 break;
1807 1807 }
1808 1808 }
1809 1809
1810 1810 /*
1811 1811 * If this is just memory then allow caching (this happens
1812 1812 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1813 1813 * to override that. If we don't have a page_t then make sure
1814 1814 * NOCONSIST is set.
1815 1815 */
1816 1816 a = attr;
1817 1817 f = flags;
1818 1818 if (!pf_is_memory(pfn))
1819 1819 f |= HAT_LOAD_NOCONSIST;
1820 1820 else if (!(a & HAT_PLAT_NOCACHE))
1821 1821 a |= HAT_STORECACHING_OK;
1822 1822
1823 1823 if (f & HAT_LOAD_NOCONSIST)
1824 1824 pp = NULL;
1825 1825 else
1826 1826 pp = page_numtopp_nolock(pfn);
1827 1827
1828 1828 /*
1829 1829 * Check to make sure we are really trying to map a valid
1830 1830 * memory page. The caller wishing to intentionally map
1831 1831 * free memory pages will have passed the HAT_LOAD_NOCONSIST
1832 1832 * flag, then pp will be NULL.
1833 1833 */
1834 1834 if (pp != NULL) {
1835 1835 if (PP_ISFREE(pp)) {
1836 1836 panic("hat_devload: loading "
1837 1837 "a mapping to free page %p", (void *)pp);
1838 1838 }
1839 1839
1840 1840 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1841 1841 panic("hat_devload: loading a mapping "
1842 1842 "to an unlocked page %p",
1843 1843 (void *)pp);
1844 1844 }
1845 1845 }
1846 1846
1847 1847 /*
1848 1848 * load this page mapping
1849 1849 */
1850 1850 ASSERT(!IN_VA_HOLE(va));
1851 1851 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1852 1852 if (level == 0)
1853 1853 panic("unexpected hati_load_common() failure");
1854 1854 --level;
1855 1855 pgsize = LEVEL_SIZE(level);
1856 1856 }
1857 1857
1858 1858 /*
1859 1859 * move to next page
1860 1860 */
1861 1861 va += pgsize;
1862 1862 pfn += mmu_btop(pgsize);
1863 1863 }
1864 1864 XPV_ALLOW_MIGRATE();
1865 1865 }
1866 1866
1867 1867 /*
1868 1868 * void hat_unlock(hat, addr, len)
1869 1869 * unlock the mappings to a given range of addresses
1870 1870 *
1871 1871 * Locks are tracked by ht_lock_cnt in the htable.
1872 1872 */
1873 1873 void
1874 1874 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1875 1875 {
1876 1876 uintptr_t vaddr = (uintptr_t)addr;
1877 1877 uintptr_t eaddr = vaddr + len;
1878 1878 htable_t *ht = NULL;
1879 1879
1880 1880 /*
1881 1881 * kernel entries are always locked, we don't track lock counts
1882 1882 */
1883 1883 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1884 1884 ASSERT(IS_PAGEALIGNED(vaddr));
1885 1885 ASSERT(IS_PAGEALIGNED(eaddr));
1886 1886 if (hat == kas.a_hat)
1887 1887 return;
1888 1888 if (eaddr > _userlimit)
1889 1889 panic("hat_unlock() address out of range - above _userlimit");
1890 1890
1891 1891 XPV_DISALLOW_MIGRATE();
1892 1892 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1893 1893 while (vaddr < eaddr) {
1894 1894 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1895 1895 if (ht == NULL)
1896 1896 break;
1897 1897
1898 1898 ASSERT(!IN_VA_HOLE(vaddr));
1899 1899
1900 1900 if (ht->ht_lock_cnt < 1)
1901 1901 panic("hat_unlock(): lock_cnt < 1, "
1902 1902 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1903 1903 HTABLE_LOCK_DEC(ht);
1904 1904
1905 1905 vaddr += LEVEL_SIZE(ht->ht_level);
1906 1906 }
1907 1907 if (ht)
1908 1908 htable_release(ht);
1909 1909 XPV_ALLOW_MIGRATE();
1910 1910 }
1911 1911
1912 1912 /* ARGSUSED */
1913 1913 void
1914 1914 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1915 1915 hat_region_cookie_t rcookie)
1916 1916 {
1917 1917 panic("No shared region support on x86");
1918 1918 }
1919 1919
1920 1920 #if !defined(__xpv)
1921 1921 /*
1922 1922 * Cross call service routine to demap a virtual page on
1923 1923 * the current CPU or flush all mappings in TLB.
1924 1924 */
1925 1925 /*ARGSUSED*/
1926 1926 static int
1927 1927 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1928 1928 {
1929 1929 hat_t *hat = (hat_t *)a1;
1930 1930 caddr_t addr = (caddr_t)a2;
1931 1931
1932 1932 /*
1933 1933 * If the target hat isn't the kernel and this CPU isn't operating
1934 1934 * in the target hat, we can ignore the cross call.
1935 1935 */
1936 1936 if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1937 1937 return (0);
1938 1938
1939 1939 /*
1940 1940 * For a normal address, we just flush one page mapping
1941 1941 */
1942 1942 if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1943 1943 mmu_tlbflush_entry(addr);
1944 1944 return (0);
1945 1945 }
1946 1946
1947 1947 /*
1948 1948 * Otherwise we reload cr3 to effect a complete TLB flush.
1949 1949 *
1950 1950 * A reload of cr3 on a VLP process also means we must also recopy in
1951 1951 * the pte values from the struct hat
1952 1952 */
1953 1953 if (hat->hat_flags & HAT_VLP) {
1954 1954 #if defined(__amd64)
1955 1955 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1956 1956
1957 1957 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1958 1958 #elif defined(__i386)
1959 1959 reload_pae32(hat, CPU);
1960 1960 #endif
1961 1961 }
1962 1962 reload_cr3();
1963 1963 return (0);
1964 1964 }
1965 1965
1966 1966 /*
1967 1967 * Flush all TLB entries, including global (ie. kernel) ones.
1968 1968 */
1969 1969 static void
1970 1970 flush_all_tlb_entries(void)
1971 1971 {
1972 1972 ulong_t cr4 = getcr4();
1973 1973
1974 1974 if (cr4 & CR4_PGE) {
1975 1975 setcr4(cr4 & ~(ulong_t)CR4_PGE);
1976 1976 setcr4(cr4);
1977 1977
1978 1978 /*
1979 1979 * 32 bit PAE also needs to always reload_cr3()
1980 1980 */
1981 1981 if (mmu.max_level == 2)
1982 1982 reload_cr3();
1983 1983 } else {
1984 1984 reload_cr3();
1985 1985 }
1986 1986 }
1987 1987
1988 1988 #define TLB_CPU_HALTED (01ul)
1989 1989 #define TLB_INVAL_ALL (02ul)
1990 1990 #define CAS_TLB_INFO(cpu, old, new) \
1991 1991 atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1992 1992
1993 1993 /*
1994 1994 * Record that a CPU is going idle
1995 1995 */
1996 1996 void
1997 1997 tlb_going_idle(void)
1998 1998 {
1999 1999 atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
2000 2000 }
2001 2001
2002 2002 /*
2003 2003 * Service a delayed TLB flush if coming out of being idle.
2004 2004 * It will be called from cpu idle notification with interrupt disabled.
2005 2005 */
2006 2006 void
2007 2007 tlb_service(void)
2008 2008 {
2009 2009 ulong_t tlb_info;
2010 2010 ulong_t found;
2011 2011
2012 2012 /*
2013 2013 * We only have to do something if coming out of being idle.
2014 2014 */
2015 2015 tlb_info = CPU->cpu_m.mcpu_tlb_info;
2016 2016 if (tlb_info & TLB_CPU_HALTED) {
2017 2017 ASSERT(CPU->cpu_current_hat == kas.a_hat);
2018 2018
2019 2019 /*
2020 2020 * Atomic clear and fetch of old state.
2021 2021 */
2022 2022 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2023 2023 ASSERT(found & TLB_CPU_HALTED);
2024 2024 tlb_info = found;
2025 2025 SMT_PAUSE();
2026 2026 }
2027 2027 if (tlb_info & TLB_INVAL_ALL)
2028 2028 flush_all_tlb_entries();
2029 2029 }
2030 2030 }
2031 2031 #endif /* !__xpv */
2032 2032
2033 2033 /*
2034 2034 * Internal routine to do cross calls to invalidate a range of pages on
2035 2035 * all CPUs using a given hat.
2036 2036 */
2037 2037 void
2038 2038 hat_tlb_inval(hat_t *hat, uintptr_t va)
2039 2039 {
2040 2040 extern int flushes_require_xcalls; /* from mp_startup.c */
2041 2041 cpuset_t justme;
2042 2042 cpuset_t cpus_to_shootdown;
2043 2043 #ifndef __xpv
2044 2044 cpuset_t check_cpus;
2045 2045 cpu_t *cpup;
2046 2046 int c;
2047 2047 #endif
2048 2048
2049 2049 /*
2050 2050 * If the hat is being destroyed, there are no more users, so
2051 2051 * demap need not do anything.
2052 2052 */
2053 2053 if (hat->hat_flags & HAT_FREEING)
2054 2054 return;
2055 2055
2056 2056 /*
2057 2057 * If demapping from a shared pagetable, we best demap the
2058 2058 * entire set of user TLBs, since we don't know what addresses
2059 2059 * these were shared at.
2060 2060 */
2061 2061 if (hat->hat_flags & HAT_SHARED) {
2062 2062 hat = kas.a_hat;
2063 2063 va = DEMAP_ALL_ADDR;
2064 2064 }
2065 2065
2066 2066 /*
2067 2067 * if not running with multiple CPUs, don't use cross calls
2068 2068 */
2069 2069 if (panicstr || !flushes_require_xcalls) {
2070 2070 #ifdef __xpv
2071 2071 if (va == DEMAP_ALL_ADDR)
2072 2072 xen_flush_tlb();
2073 2073 else
2074 2074 xen_flush_va((caddr_t)va);
2075 2075 #else
2076 2076 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
2077 2077 #endif
2078 2078 return;
2079 2079 }
2080 2080
2081 2081
2082 2082 /*
2083 2083 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2084 2084 * Otherwise it's just CPUs currently executing in this hat.
2085 2085 */
2086 2086 kpreempt_disable();
2087 2087 CPUSET_ONLY(justme, CPU->cpu_id);
2088 2088 if (hat == kas.a_hat)
2089 2089 cpus_to_shootdown = khat_cpuset;
2090 2090 else
2091 2091 cpus_to_shootdown = hat->hat_cpus;
2092 2092
2093 2093 #ifndef __xpv
2094 2094 /*
2095 2095 * If any CPUs in the set are idle, just request a delayed flush
2096 2096 * and avoid waking them up.
2097 2097 */
2098 2098 check_cpus = cpus_to_shootdown;
2099 2099 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2100 2100 ulong_t tlb_info;
2101 2101
2102 2102 if (!CPU_IN_SET(check_cpus, c))
2103 2103 continue;
2104 2104 CPUSET_DEL(check_cpus, c);
2105 2105 cpup = cpu[c];
2106 2106 if (cpup == NULL)
2107 2107 continue;
2108 2108
2109 2109 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2110 2110 while (tlb_info == TLB_CPU_HALTED) {
2111 2111 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2112 2112 TLB_CPU_HALTED | TLB_INVAL_ALL);
2113 2113 SMT_PAUSE();
2114 2114 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2115 2115 }
2116 2116 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2117 2117 HATSTAT_INC(hs_tlb_inval_delayed);
2118 2118 CPUSET_DEL(cpus_to_shootdown, c);
2119 2119 }
2120 2120 }
2121 2121 #endif
2122 2122
2123 2123 if (CPUSET_ISNULL(cpus_to_shootdown) ||
2124 2124 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2125 2125
2126 2126 #ifdef __xpv
2127 2127 if (va == DEMAP_ALL_ADDR)
2128 2128 xen_flush_tlb();
2129 2129 else
2130 2130 xen_flush_va((caddr_t)va);
2131 2131 #else
2132 2132 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
2133 2133 #endif
2134 2134
2135 2135 } else {
2136 2136
2137 2137 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2138 2138 #ifdef __xpv
2139 2139 if (va == DEMAP_ALL_ADDR)
2140 2140 xen_gflush_tlb(cpus_to_shootdown);
2141 2141 else
2142 2142 xen_gflush_va((caddr_t)va, cpus_to_shootdown);
2143 2143 #else
2144 2144 xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
2145 2145 CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2146 2146 #endif
2147 2147
2148 2148 }
2149 2149 kpreempt_enable();
2150 2150 }
2151 2151
2152 2152 /*
2153 2153 * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2154 2154 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't
2155 2155 * handle releasing of the htables.
2156 2156 */
2157 2157 void
2158 2158 hat_pte_unmap(
2159 2159 htable_t *ht,
2160 2160 uint_t entry,
2161 2161 uint_t flags,
2162 2162 x86pte_t old_pte,
2163 2163 void *pte_ptr)
2164 2164 {
2165 2165 hat_t *hat = ht->ht_hat;
2166 2166 hment_t *hm = NULL;
2167 2167 page_t *pp = NULL;
2168 2168 level_t l = ht->ht_level;
2169 2169 pfn_t pfn;
2170 2170
2171 2171 /*
2172 2172 * We always track the locking counts, even if nothing is unmapped
2173 2173 */
2174 2174 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2175 2175 ASSERT(ht->ht_lock_cnt > 0);
2176 2176 HTABLE_LOCK_DEC(ht);
2177 2177 }
2178 2178
2179 2179 /*
2180 2180 * Figure out which page's mapping list lock to acquire using the PFN
2181 2181 * passed in "old" PTE. We then attempt to invalidate the PTE.
2182 2182 * If another thread, probably a hat_pageunload, has asynchronously
2183 2183 * unmapped/remapped this address we'll loop here.
2184 2184 */
2185 2185 ASSERT(ht->ht_busy > 0);
2186 2186 while (PTE_ISVALID(old_pte)) {
2187 2187 pfn = PTE2PFN(old_pte, l);
2188 2188 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2189 2189 pp = NULL;
2190 2190 } else {
2191 2191 #ifdef __xpv
2192 2192 if (pfn == PFN_INVALID)
2193 2193 panic("Invalid PFN, but not PT_NOCONSIST");
2194 2194 #endif
2195 2195 pp = page_numtopp_nolock(pfn);
2196 2196 if (pp == NULL) {
2197 2197 panic("no page_t, not NOCONSIST: old_pte="
2198 2198 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2199 2199 old_pte, (uintptr_t)ht, entry,
2200 2200 (uintptr_t)pte_ptr);
2201 2201 }
2202 2202 x86_hm_enter(pp);
2203 2203 }
2204 2204
2205 2205 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr);
2206 2206
2207 2207 /*
2208 2208 * If the page hadn't changed we've unmapped it and can proceed
2209 2209 */
2210 2210 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2211 2211 break;
2212 2212
2213 2213 /*
2214 2214 * Otherwise, we'll have to retry with the current old_pte.
2215 2215 * Drop the hment lock, since the pfn may have changed.
2216 2216 */
2217 2217 if (pp != NULL) {
2218 2218 x86_hm_exit(pp);
2219 2219 pp = NULL;
2220 2220 } else {
2221 2221 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2222 2222 }
2223 2223 }
2224 2224
2225 2225 /*
2226 2226 * If the old mapping wasn't valid, there's nothing more to do
2227 2227 */
2228 2228 if (!PTE_ISVALID(old_pte)) {
2229 2229 if (pp != NULL)
2230 2230 x86_hm_exit(pp);
2231 2231 return;
2232 2232 }
2233 2233
2234 2234 /*
2235 2235 * Take care of syncing any MOD/REF bits and removing the hment.
2236 2236 */
2237 2237 if (pp != NULL) {
2238 2238 if (!(flags & HAT_UNLOAD_NOSYNC))
2239 2239 hati_sync_pte_to_page(pp, old_pte, l);
2240 2240 hm = hment_remove(pp, ht, entry);
2241 2241 x86_hm_exit(pp);
2242 2242 if (hm != NULL)
2243 2243 hment_free(hm);
2244 2244 }
2245 2245
2246 2246 /*
2247 2247 * Handle book keeping in the htable and hat
2248 2248 */
2249 2249 ASSERT(ht->ht_valid_cnt > 0);
2250 2250 HTABLE_DEC(ht->ht_valid_cnt);
2251 2251 PGCNT_DEC(hat, l);
2252 2252 }
2253 2253
2254 2254 /*
2255 2255 * very cheap unload implementation to special case some kernel addresses
2256 2256 */
2257 2257 static void
2258 2258 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2259 2259 {
2260 2260 uintptr_t va = (uintptr_t)addr;
2261 2261 uintptr_t eva = va + len;
2262 2262 pgcnt_t pg_index;
2263 2263 htable_t *ht;
2264 2264 uint_t entry;
2265 2265 x86pte_t *pte_ptr;
2266 2266 x86pte_t old_pte;
2267 2267
2268 2268 for (; va < eva; va += MMU_PAGESIZE) {
2269 2269 /*
2270 2270 * Get the PTE
2271 2271 */
2272 2272 pg_index = mmu_btop(va - mmu.kmap_addr);
2273 2273 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2274 2274 old_pte = GET_PTE(pte_ptr);
2275 2275
2276 2276 /*
2277 2277 * get the htable / entry
2278 2278 */
2279 2279 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2280 2280 >> LEVEL_SHIFT(1)];
2281 2281 entry = htable_va2entry(va, ht);
2282 2282
2283 2283 /*
2284 2284 * use mostly common code to unmap it.
2285 2285 */
2286 2286 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
2287 2287 }
2288 2288 }
2289 2289
2290 2290
2291 2291 /*
2292 2292 * unload a range of virtual address space (no callback)
2293 2293 */
2294 2294 void
2295 2295 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2296 2296 {
2297 2297 uintptr_t va = (uintptr_t)addr;
2298 2298
2299 2299 XPV_DISALLOW_MIGRATE();
2300 2300 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2301 2301
2302 2302 /*
2303 2303 * special case for performance.
2304 2304 */
2305 2305 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2306 2306 ASSERT(hat == kas.a_hat);
2307 2307 hat_kmap_unload(addr, len, flags);
2308 2308 } else {
2309 2309 hat_unload_callback(hat, addr, len, flags, NULL);
2310 2310 }
2311 2311 XPV_ALLOW_MIGRATE();
2312 2312 }
2313 2313
2314 2314 /*
2315 2315 * Do the callbacks for ranges being unloaded.
2316 2316 */
2317 2317 typedef struct range_info {
2318 2318 uintptr_t rng_va;
2319 2319 ulong_t rng_cnt;
2320 2320 level_t rng_level;
2321 2321 } range_info_t;
2322 2322
2323 2323 static void
2324 2324 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
2325 2325 {
2326 2326 /*
2327 2327 * do callbacks to upper level VM system
2328 2328 */
2329 2329 while (cb != NULL && cnt > 0) {
2330 2330 --cnt;
2331 2331 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2332 2332 cb->hcb_end_addr = cb->hcb_start_addr;
2333 2333 cb->hcb_end_addr +=
2334 2334 range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
2335 2335 cb->hcb_function(cb);
2336 2336 }
2337 2337 }
2338 2338
2339 2339 /*
2340 2340 * Unload a given range of addresses (has optional callback)
2341 2341 *
2342 2342 * Flags:
2343 2343 * define HAT_UNLOAD 0x00
2344 2344 * define HAT_UNLOAD_NOSYNC 0x02
2345 2345 * define HAT_UNLOAD_UNLOCK 0x04
2346 2346 * define HAT_UNLOAD_OTHER 0x08 - not used
2347 2347 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD
2348 2348 */
2349 2349 #define MAX_UNLOAD_CNT (8)
2350 2350 void
2351 2351 hat_unload_callback(
2352 2352 hat_t *hat,
2353 2353 caddr_t addr,
2354 2354 size_t len,
2355 2355 uint_t flags,
2356 2356 hat_callback_t *cb)
2357 2357 {
2358 2358 uintptr_t vaddr = (uintptr_t)addr;
2359 2359 uintptr_t eaddr = vaddr + len;
2360 2360 htable_t *ht = NULL;
2361 2361 uint_t entry;
2362 2362 uintptr_t contig_va = (uintptr_t)-1L;
2363 2363 range_info_t r[MAX_UNLOAD_CNT];
2364 2364 uint_t r_cnt = 0;
2365 2365 x86pte_t old_pte;
2366 2366
2367 2367 XPV_DISALLOW_MIGRATE();
2368 2368 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2369 2369 ASSERT(IS_PAGEALIGNED(vaddr));
2370 2370 ASSERT(IS_PAGEALIGNED(eaddr));
2371 2371
2372 2372 /*
2373 2373 * Special case a single page being unloaded for speed. This happens
2374 2374 * quite frequently, COW faults after a fork() for example.
2375 2375 */
2376 2376 if (cb == NULL && len == MMU_PAGESIZE) {
2377 2377 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2378 2378 if (ht != NULL) {
2379 2379 if (PTE_ISVALID(old_pte))
2380 2380 hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2381 2381 htable_release(ht);
2382 2382 }
2383 2383 XPV_ALLOW_MIGRATE();
2384 2384 return;
2385 2385 }
2386 2386
2387 2387 while (vaddr < eaddr) {
2388 2388 old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2389 2389 if (ht == NULL)
2390 2390 break;
2391 2391
2392 2392 ASSERT(!IN_VA_HOLE(vaddr));
2393 2393
2394 2394 if (vaddr < (uintptr_t)addr)
2395 2395 panic("hat_unload_callback(): unmap inside large page");
2396 2396
2397 2397 /*
2398 2398 * We'll do the call backs for contiguous ranges
2399 2399 */
2400 2400 if (vaddr != contig_va ||
2401 2401 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2402 2402 if (r_cnt == MAX_UNLOAD_CNT) {
2403 2403 handle_ranges(cb, r_cnt, r);
2404 2404 r_cnt = 0;
2405 2405 }
2406 2406 r[r_cnt].rng_va = vaddr;
2407 2407 r[r_cnt].rng_cnt = 0;
2408 2408 r[r_cnt].rng_level = ht->ht_level;
2409 2409 ++r_cnt;
2410 2410 }
2411 2411
2412 2412 /*
2413 2413 * Unload one mapping from the page tables.
2414 2414 */
2415 2415 entry = htable_va2entry(vaddr, ht);
2416 2416 hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2417 2417 ASSERT(ht->ht_level <= mmu.max_page_level);
2418 2418 vaddr += LEVEL_SIZE(ht->ht_level);
2419 2419 contig_va = vaddr;
2420 2420 ++r[r_cnt - 1].rng_cnt;
2421 2421 }
2422 2422 if (ht)
2423 2423 htable_release(ht);
2424 2424
2425 2425 /*
2426 2426 * handle last range for callbacks
2427 2427 */
2428 2428 if (r_cnt > 0)
2429 2429 handle_ranges(cb, r_cnt, r);
2430 2430 XPV_ALLOW_MIGRATE();
2431 2431 }
2432 2432
2433 2433 /*
2434 2434 * Invalidate a virtual address translation on a slave CPU during
2435 2435 * panic() dumps.
2436 2436 */
2437 2437 void
2438 2438 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2439 2439 {
2440 2440 ssize_t sz;
2441 2441 caddr_t endva = va + size;
2442 2442
2443 2443 while (va < endva) {
2444 2444 sz = hat_getpagesize(hat, va);
2445 2445 if (sz < 0) {
2446 2446 #ifdef __xpv
2447 2447 xen_flush_tlb();
2448 2448 #else
2449 2449 flush_all_tlb_entries();
2450 2450 #endif
2451 2451 break;
2452 2452 }
2453 2453 #ifdef __xpv
2454 2454 xen_flush_va(va);
2455 2455 #else
2456 2456 mmu_tlbflush_entry(va);
2457 2457 #endif
2458 2458 va += sz;
2459 2459 }
2460 2460 }
2461 2461
2462 2462 /*
2463 2463 * synchronize mapping with software data structures
2464 2464 *
2465 2465 * This interface is currently only used by the working set monitor
2466 2466 * driver.
2467 2467 */
2468 2468 /*ARGSUSED*/
2469 2469 void
2470 2470 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2471 2471 {
2472 2472 uintptr_t vaddr = (uintptr_t)addr;
2473 2473 uintptr_t eaddr = vaddr + len;
2474 2474 htable_t *ht = NULL;
2475 2475 uint_t entry;
2476 2476 x86pte_t pte;
2477 2477 x86pte_t save_pte;
2478 2478 x86pte_t new;
2479 2479 page_t *pp;
2480 2480
2481 2481 ASSERT(!IN_VA_HOLE(vaddr));
2482 2482 ASSERT(IS_PAGEALIGNED(vaddr));
2483 2483 ASSERT(IS_PAGEALIGNED(eaddr));
2484 2484 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2485 2485
2486 2486 XPV_DISALLOW_MIGRATE();
2487 2487 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2488 2488 try_again:
2489 2489 pte = htable_walk(hat, &ht, &vaddr, eaddr);
2490 2490 if (ht == NULL)
2491 2491 break;
2492 2492 entry = htable_va2entry(vaddr, ht);
2493 2493
2494 2494 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2495 2495 PTE_GET(pte, PT_REF | PT_MOD) == 0)
2496 2496 continue;
2497 2497
2498 2498 /*
2499 2499 * We need to acquire the mapping list lock to protect
2500 2500 * against hat_pageunload(), hat_unload(), etc.
2501 2501 */
2502 2502 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2503 2503 if (pp == NULL)
2504 2504 break;
2505 2505 x86_hm_enter(pp);
2506 2506 save_pte = pte;
2507 2507 pte = x86pte_get(ht, entry);
2508 2508 if (pte != save_pte) {
2509 2509 x86_hm_exit(pp);
2510 2510 goto try_again;
2511 2511 }
2512 2512 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2513 2513 PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2514 2514 x86_hm_exit(pp);
2515 2515 continue;
2516 2516 }
2517 2517
2518 2518 /*
2519 2519 * Need to clear ref or mod bits. We may compete with
2520 2520 * hardware updating the R/M bits and have to try again.
2521 2521 */
2522 2522 if (flags == HAT_SYNC_ZERORM) {
2523 2523 new = pte;
2524 2524 PTE_CLR(new, PT_REF | PT_MOD);
2525 2525 pte = hati_update_pte(ht, entry, pte, new);
2526 2526 if (pte != 0) {
2527 2527 x86_hm_exit(pp);
2528 2528 goto try_again;
2529 2529 }
2530 2530 } else {
2531 2531 /*
2532 2532 * sync the PTE to the page_t
2533 2533 */
2534 2534 hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2535 2535 }
2536 2536 x86_hm_exit(pp);
2537 2537 }
2538 2538 if (ht)
2539 2539 htable_release(ht);
2540 2540 XPV_ALLOW_MIGRATE();
2541 2541 }
2542 2542
2543 2543 /*
2544 2544 * void hat_map(hat, addr, len, flags)
2545 2545 */
2546 2546 /*ARGSUSED*/
2547 2547 void
2548 2548 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2549 2549 {
2550 2550 /* does nothing */
2551 2551 }
2552 2552
2553 2553 /*
2554 2554 * uint_t hat_getattr(hat, addr, *attr)
2555 2555 * returns attr for <hat,addr> in *attr. returns 0 if there was a
2556 2556 * mapping and *attr is valid, nonzero if there was no mapping and
2557 2557 * *attr is not valid.
2558 2558 */
2559 2559 uint_t
2560 2560 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2561 2561 {
2562 2562 uintptr_t vaddr = ALIGN2PAGE(addr);
2563 2563 htable_t *ht = NULL;
2564 2564 x86pte_t pte;
2565 2565
2566 2566 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2567 2567
2568 2568 if (IN_VA_HOLE(vaddr))
2569 2569 return ((uint_t)-1);
2570 2570
2571 2571 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2572 2572 if (ht == NULL)
2573 2573 return ((uint_t)-1);
2574 2574
2575 2575 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2576 2576 htable_release(ht);
2577 2577 return ((uint_t)-1);
2578 2578 }
2579 2579
2580 2580 *attr = PROT_READ;
2581 2581 if (PTE_GET(pte, PT_WRITABLE))
2582 2582 *attr |= PROT_WRITE;
2583 2583 if (PTE_GET(pte, PT_USER))
2584 2584 *attr |= PROT_USER;
2585 2585 if (!PTE_GET(pte, mmu.pt_nx))
2586 2586 *attr |= PROT_EXEC;
2587 2587 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2588 2588 *attr |= HAT_NOSYNC;
2589 2589 htable_release(ht);
2590 2590 return (0);
2591 2591 }
2592 2592
2593 2593 /*
2594 2594 * hat_updateattr() applies the given attribute change to an existing mapping
2595 2595 */
2596 2596 #define HAT_LOAD_ATTR 1
2597 2597 #define HAT_SET_ATTR 2
2598 2598 #define HAT_CLR_ATTR 3
2599 2599
2600 2600 static void
2601 2601 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2602 2602 {
2603 2603 uintptr_t vaddr = (uintptr_t)addr;
2604 2604 uintptr_t eaddr = (uintptr_t)addr + len;
2605 2605 htable_t *ht = NULL;
2606 2606 uint_t entry;
2607 2607 x86pte_t oldpte, newpte;
2608 2608 page_t *pp;
2609 2609
2610 2610 XPV_DISALLOW_MIGRATE();
2611 2611 ASSERT(IS_PAGEALIGNED(vaddr));
2612 2612 ASSERT(IS_PAGEALIGNED(eaddr));
2613 2613 ASSERT(hat == kas.a_hat ||
2614 2614 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2615 2615 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2616 2616 try_again:
2617 2617 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2618 2618 if (ht == NULL)
2619 2619 break;
2620 2620 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2621 2621 continue;
2622 2622
2623 2623 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2624 2624 if (pp == NULL)
2625 2625 continue;
2626 2626 x86_hm_enter(pp);
2627 2627
2628 2628 newpte = oldpte;
2629 2629 /*
2630 2630 * We found a page table entry in the desired range,
2631 2631 * figure out the new attributes.
2632 2632 */
2633 2633 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2634 2634 if ((attr & PROT_WRITE) &&
2635 2635 !PTE_GET(oldpte, PT_WRITABLE))
2636 2636 newpte |= PT_WRITABLE;
2637 2637
2638 2638 if ((attr & HAT_NOSYNC) &&
2639 2639 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2640 2640 newpte |= PT_NOSYNC;
2641 2641
2642 2642 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2643 2643 newpte &= ~mmu.pt_nx;
2644 2644 }
2645 2645
2646 2646 if (what == HAT_LOAD_ATTR) {
2647 2647 if (!(attr & PROT_WRITE) &&
2648 2648 PTE_GET(oldpte, PT_WRITABLE))
2649 2649 newpte &= ~PT_WRITABLE;
2650 2650
2651 2651 if (!(attr & HAT_NOSYNC) &&
2652 2652 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2653 2653 newpte &= ~PT_SOFTWARE;
2654 2654
2655 2655 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2656 2656 newpte |= mmu.pt_nx;
2657 2657 }
2658 2658
2659 2659 if (what == HAT_CLR_ATTR) {
2660 2660 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2661 2661 newpte &= ~PT_WRITABLE;
2662 2662
2663 2663 if ((attr & HAT_NOSYNC) &&
2664 2664 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2665 2665 newpte &= ~PT_SOFTWARE;
2666 2666
2667 2667 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2668 2668 newpte |= mmu.pt_nx;
2669 2669 }
2670 2670
2671 2671 /*
2672 2672 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2673 2673 * x86pte_set() depends on this.
2674 2674 */
2675 2675 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2676 2676 newpte |= PT_REF | PT_MOD;
2677 2677
2678 2678 /*
2679 2679 * what about PROT_READ or others? this code only handles:
2680 2680 * EXEC, WRITE, NOSYNC
2681 2681 */
2682 2682
2683 2683 /*
2684 2684 * If new PTE really changed, update the table.
2685 2685 */
2686 2686 if (newpte != oldpte) {
2687 2687 entry = htable_va2entry(vaddr, ht);
2688 2688 oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2689 2689 if (oldpte != 0) {
2690 2690 x86_hm_exit(pp);
2691 2691 goto try_again;
2692 2692 }
2693 2693 }
2694 2694 x86_hm_exit(pp);
2695 2695 }
2696 2696 if (ht)
2697 2697 htable_release(ht);
2698 2698 XPV_ALLOW_MIGRATE();
2699 2699 }
2700 2700
2701 2701 /*
2702 2702 * Various wrappers for hat_updateattr()
2703 2703 */
2704 2704 void
2705 2705 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2706 2706 {
2707 2707 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2708 2708 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2709 2709 }
2710 2710
2711 2711 void
2712 2712 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2713 2713 {
2714 2714 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2715 2715 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2716 2716 }
2717 2717
2718 2718 void
2719 2719 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2720 2720 {
2721 2721 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2722 2722 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2723 2723 }
2724 2724
2725 2725 void
2726 2726 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2727 2727 {
2728 2728 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2729 2729 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2730 2730 }
2731 2731
2732 2732 /*
2733 2733 * size_t hat_getpagesize(hat, addr)
2734 2734 * returns pagesize in bytes for <hat, addr>. returns -1 of there is
2735 2735 * no mapping. This is an advisory call.
2736 2736 */
2737 2737 ssize_t
2738 2738 hat_getpagesize(hat_t *hat, caddr_t addr)
2739 2739 {
2740 2740 uintptr_t vaddr = ALIGN2PAGE(addr);
2741 2741 htable_t *ht;
2742 2742 size_t pagesize;
2743 2743
2744 2744 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2745 2745 if (IN_VA_HOLE(vaddr))
2746 2746 return (-1);
2747 2747 ht = htable_getpage(hat, vaddr, NULL);
2748 2748 if (ht == NULL)
2749 2749 return (-1);
2750 2750 pagesize = LEVEL_SIZE(ht->ht_level);
2751 2751 htable_release(ht);
2752 2752 return (pagesize);
2753 2753 }
2754 2754
2755 2755
2756 2756
2757 2757 /*
2758 2758 * pfn_t hat_getpfnum(hat, addr)
2759 2759 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2760 2760 */
2761 2761 pfn_t
2762 2762 hat_getpfnum(hat_t *hat, caddr_t addr)
2763 2763 {
2764 2764 uintptr_t vaddr = ALIGN2PAGE(addr);
2765 2765 htable_t *ht;
2766 2766 uint_t entry;
2767 2767 pfn_t pfn = PFN_INVALID;
2768 2768
2769 2769 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2770 2770 if (khat_running == 0)
2771 2771 return (PFN_INVALID);
2772 2772
2773 2773 if (IN_VA_HOLE(vaddr))
2774 2774 return (PFN_INVALID);
2775 2775
2776 2776 XPV_DISALLOW_MIGRATE();
2777 2777 /*
2778 2778 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2779 2779 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2780 2780 * this up.
2781 2781 */
2782 2782 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2783 2783 x86pte_t pte;
2784 2784 pgcnt_t pg_index;
2785 2785
2786 2786 pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2787 2787 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2788 2788 if (PTE_ISVALID(pte))
2789 2789 /*LINTED [use of constant 0 causes a lint warning] */
2790 2790 pfn = PTE2PFN(pte, 0);
2791 2791 XPV_ALLOW_MIGRATE();
2792 2792 return (pfn);
2793 2793 }
2794 2794
2795 2795 ht = htable_getpage(hat, vaddr, &entry);
2796 2796 if (ht == NULL) {
2797 2797 XPV_ALLOW_MIGRATE();
2798 2798 return (PFN_INVALID);
2799 2799 }
2800 2800 ASSERT(vaddr >= ht->ht_vaddr);
2801 2801 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2802 2802 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2803 2803 if (ht->ht_level > 0)
2804 2804 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2805 2805 htable_release(ht);
2806 2806 XPV_ALLOW_MIGRATE();
2807 2807 return (pfn);
2808 2808 }
2809 2809
2810 2810 /*
2811 2811 * int hat_probe(hat, addr)
2812 2812 * return 0 if no valid mapping is present. Faster version
2813 2813 * of hat_getattr in certain architectures.
2814 2814 */
2815 2815 int
2816 2816 hat_probe(hat_t *hat, caddr_t addr)
2817 2817 {
2818 2818 uintptr_t vaddr = ALIGN2PAGE(addr);
2819 2819 uint_t entry;
2820 2820 htable_t *ht;
2821 2821 pgcnt_t pg_off;
2822 2822
2823 2823 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2824 2824 ASSERT(hat == kas.a_hat ||
2825 2825 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2826 2826 if (IN_VA_HOLE(vaddr))
2827 2827 return (0);
2828 2828
2829 2829 /*
2830 2830 * Most common use of hat_probe is from segmap. We special case it
2831 2831 * for performance.
2832 2832 */
2833 2833 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2834 2834 pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2835 2835 if (mmu.pae_hat)
2836 2836 return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2837 2837 else
2838 2838 return (PTE_ISVALID(
2839 2839 ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2840 2840 }
2841 2841
2842 2842 ht = htable_getpage(hat, vaddr, &entry);
2843 2843 htable_release(ht);
2844 2844 return (ht != NULL);
2845 2845 }
2846 2846
2847 2847 /*
2848 2848 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2849 2849 */
2850 2850 static int
2851 2851 is_it_dism(hat_t *hat, caddr_t va)
2852 2852 {
2853 2853 struct seg *seg;
2854 2854 struct shm_data *shmd;
2855 2855 struct spt_data *sptd;
2856 2856
2857 2857 seg = as_findseg(hat->hat_as, va, 0);
2858 2858 ASSERT(seg != NULL);
2859 2859 ASSERT(seg->s_base <= va);
2860 2860 shmd = (struct shm_data *)seg->s_data;
2861 2861 ASSERT(shmd != NULL);
2862 2862 sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2863 2863 ASSERT(sptd != NULL);
2864 2864 if (sptd->spt_flags & SHM_PAGEABLE)
2865 2865 return (1);
2866 2866 return (0);
2867 2867 }
2868 2868
2869 2869 /*
2870 2870 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2871 2871 * except that we use the ism_hat's existing mappings to determine the pages
2872 2872 * and protections to use for this hat. If we find a full properly aligned
2873 2873 * and sized pagetable, we will attempt to share the pagetable itself.
2874 2874 */
2875 2875 /*ARGSUSED*/
2876 2876 int
2877 2877 hat_share(
2878 2878 hat_t *hat,
2879 2879 caddr_t addr,
2880 2880 hat_t *ism_hat,
2881 2881 caddr_t src_addr,
2882 2882 size_t len, /* almost useless value, see below.. */
2883 2883 uint_t ismszc)
2884 2884 {
2885 2885 uintptr_t vaddr_start = (uintptr_t)addr;
2886 2886 uintptr_t vaddr;
2887 2887 uintptr_t eaddr = vaddr_start + len;
2888 2888 uintptr_t ism_addr_start = (uintptr_t)src_addr;
2889 2889 uintptr_t ism_addr = ism_addr_start;
2890 2890 uintptr_t e_ism_addr = ism_addr + len;
2891 2891 htable_t *ism_ht = NULL;
2892 2892 htable_t *ht;
2893 2893 x86pte_t pte;
2894 2894 page_t *pp;
2895 2895 pfn_t pfn;
2896 2896 level_t l;
2897 2897 pgcnt_t pgcnt;
2898 2898 uint_t prot;
2899 2899 int is_dism;
2900 2900 int flags;
2901 2901
2902 2902 /*
2903 2903 * We might be asked to share an empty DISM hat by as_dup()
2904 2904 */
2905 2905 ASSERT(hat != kas.a_hat);
2906 2906 ASSERT(eaddr <= _userlimit);
2907 2907 if (!(ism_hat->hat_flags & HAT_SHARED)) {
2908 2908 ASSERT(hat_get_mapped_size(ism_hat) == 0);
2909 2909 return (0);
2910 2910 }
2911 2911 XPV_DISALLOW_MIGRATE();
2912 2912
2913 2913 /*
2914 2914 * The SPT segment driver often passes us a size larger than there are
2915 2915 * valid mappings. That's because it rounds the segment size up to a
2916 2916 * large pagesize, even if the actual memory mapped by ism_hat is less.
2917 2917 */
2918 2918 ASSERT(IS_PAGEALIGNED(vaddr_start));
2919 2919 ASSERT(IS_PAGEALIGNED(ism_addr_start));
2920 2920 ASSERT(ism_hat->hat_flags & HAT_SHARED);
2921 2921 is_dism = is_it_dism(hat, addr);
2922 2922 while (ism_addr < e_ism_addr) {
2923 2923 /*
2924 2924 * use htable_walk to get the next valid ISM mapping
2925 2925 */
2926 2926 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2927 2927 if (ism_ht == NULL)
2928 2928 break;
2929 2929
2930 2930 /*
2931 2931 * First check to see if we already share the page table.
2932 2932 */
2933 2933 l = ism_ht->ht_level;
2934 2934 vaddr = vaddr_start + (ism_addr - ism_addr_start);
2935 2935 ht = htable_lookup(hat, vaddr, l);
2936 2936 if (ht != NULL) {
2937 2937 if (ht->ht_flags & HTABLE_SHARED_PFN)
2938 2938 goto shared;
2939 2939 htable_release(ht);
2940 2940 goto not_shared;
2941 2941 }
2942 2942
2943 2943 /*
2944 2944 * Can't ever share top table.
2945 2945 */
2946 2946 if (l == mmu.max_level)
2947 2947 goto not_shared;
2948 2948
2949 2949 /*
2950 2950 * Avoid level mismatches later due to DISM faults.
2951 2951 */
2952 2952 if (is_dism && l > 0)
2953 2953 goto not_shared;
2954 2954
2955 2955 /*
2956 2956 * addresses and lengths must align
2957 2957 * table must be fully populated
2958 2958 * no lower level page tables
2959 2959 */
2960 2960 if (ism_addr != ism_ht->ht_vaddr ||
2961 2961 (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2962 2962 goto not_shared;
2963 2963
2964 2964 /*
2965 2965 * The range of address space must cover a full table.
2966 2966 */
2967 2967 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
2968 2968 goto not_shared;
2969 2969
2970 2970 /*
2971 2971 * All entries in the ISM page table must be leaf PTEs.
2972 2972 */
2973 2973 if (l > 0) {
2974 2974 int e;
2975 2975
2976 2976 /*
2977 2977 * We know the 0th is from htable_walk() above.
2978 2978 */
2979 2979 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
2980 2980 x86pte_t pte;
2981 2981 pte = x86pte_get(ism_ht, e);
2982 2982 if (!PTE_ISPAGE(pte, l))
2983 2983 goto not_shared;
2984 2984 }
2985 2985 }
2986 2986
2987 2987 /*
2988 2988 * share the page table
2989 2989 */
2990 2990 ht = htable_create(hat, vaddr, l, ism_ht);
2991 2991 shared:
2992 2992 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
2993 2993 ASSERT(ht->ht_shares == ism_ht);
2994 2994 hat->hat_ism_pgcnt +=
2995 2995 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
2996 2996 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
2997 2997 ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
2998 2998 htable_release(ht);
2999 2999 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3000 3000 htable_release(ism_ht);
3001 3001 ism_ht = NULL;
3002 3002 continue;
3003 3003
3004 3004 not_shared:
3005 3005 /*
3006 3006 * Unable to share the page table. Instead we will
3007 3007 * create new mappings from the values in the ISM mappings.
3008 3008 * Figure out what level size mappings to use;
3009 3009 */
3010 3010 for (l = ism_ht->ht_level; l > 0; --l) {
3011 3011 if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3012 3012 (vaddr & LEVEL_OFFSET(l)) == 0)
3013 3013 break;
3014 3014 }
3015 3015
3016 3016 /*
3017 3017 * The ISM mapping might be larger than the share area,
3018 3018 * be careful to truncate it if needed.
3019 3019 */
3020 3020 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3021 3021 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3022 3022 } else {
3023 3023 pgcnt = mmu_btop(eaddr - vaddr);
3024 3024 l = 0;
3025 3025 }
3026 3026
3027 3027 pfn = PTE2PFN(pte, ism_ht->ht_level);
3028 3028 ASSERT(pfn != PFN_INVALID);
3029 3029 while (pgcnt > 0) {
3030 3030 /*
3031 3031 * Make a new pte for the PFN for this level.
3032 3032 * Copy protections for the pte from the ISM pte.
3033 3033 */
3034 3034 pp = page_numtopp_nolock(pfn);
3035 3035 ASSERT(pp != NULL);
3036 3036
3037 3037 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3038 3038 if (PTE_GET(pte, PT_WRITABLE))
3039 3039 prot |= PROT_WRITE;
3040 3040 if (!PTE_GET(pte, PT_NX))
3041 3041 prot |= PROT_EXEC;
3042 3042
3043 3043 flags = HAT_LOAD;
3044 3044 if (!is_dism)
3045 3045 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3046 3046 while (hati_load_common(hat, vaddr, pp, prot, flags,
3047 3047 l, pfn) != 0) {
3048 3048 if (l == 0)
3049 3049 panic("hati_load_common() failure");
3050 3050 --l;
3051 3051 }
3052 3052
3053 3053 vaddr += LEVEL_SIZE(l);
3054 3054 ism_addr += LEVEL_SIZE(l);
3055 3055 pfn += mmu_btop(LEVEL_SIZE(l));
3056 3056 pgcnt -= mmu_btop(LEVEL_SIZE(l));
3057 3057 }
3058 3058 }
3059 3059 if (ism_ht != NULL)
3060 3060 htable_release(ism_ht);
3061 3061 XPV_ALLOW_MIGRATE();
3062 3062 return (0);
3063 3063 }
3064 3064
3065 3065
3066 3066 /*
3067 3067 * hat_unshare() is similar to hat_unload_callback(), but
3068 3068 * we have to look for empty shared pagetables. Note that
3069 3069 * hat_unshare() is always invoked against an entire segment.
3070 3070 */
3071 3071 /*ARGSUSED*/
3072 3072 void
3073 3073 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3074 3074 {
3075 3075 uint64_t vaddr = (uintptr_t)addr;
3076 3076 uintptr_t eaddr = vaddr + len;
3077 3077 htable_t *ht = NULL;
3078 3078 uint_t need_demaps = 0;
3079 3079 int flags = HAT_UNLOAD_UNMAP;
3080 3080 level_t l;
3081 3081
3082 3082 ASSERT(hat != kas.a_hat);
3083 3083 ASSERT(eaddr <= _userlimit);
3084 3084 ASSERT(IS_PAGEALIGNED(vaddr));
3085 3085 ASSERT(IS_PAGEALIGNED(eaddr));
3086 3086 XPV_DISALLOW_MIGRATE();
3087 3087
3088 3088 /*
3089 3089 * First go through and remove any shared pagetables.
3090 3090 *
3091 3091 * Note that it's ok to delay the TLB shootdown till the entire range is
3092 3092 * finished, because if hat_pageunload() were to unload a shared
3093 3093 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3094 3094 */
3095 3095 l = mmu.max_page_level;
3096 3096 if (l == mmu.max_level)
3097 3097 --l;
3098 3098 for (; l >= 0; --l) {
3099 3099 for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3100 3100 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3101 3101 ASSERT(!IN_VA_HOLE(vaddr));
3102 3102 /*
3103 3103 * find a pagetable that maps the current address
3104 3104 */
3105 3105 ht = htable_lookup(hat, vaddr, l);
3106 3106 if (ht == NULL)
3107 3107 continue;
3108 3108 if (ht->ht_flags & HTABLE_SHARED_PFN) {
3109 3109 /*
3110 3110 * clear page count, set valid_cnt to 0,
3111 3111 * let htable_release() finish the job
3112 3112 */
3113 3113 hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3114 3114 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3115 3115 ht->ht_valid_cnt = 0;
3116 3116 need_demaps = 1;
3117 3117 }
3118 3118 htable_release(ht);
3119 3119 }
3120 3120 }
3121 3121
3122 3122 /*
3123 3123 * flush the TLBs - since we're probably dealing with MANY mappings
3124 3124 * we do just one CR3 reload.
3125 3125 */
3126 3126 if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3127 3127 hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3128 3128
3129 3129 /*
3130 3130 * Now go back and clean up any unaligned mappings that
3131 3131 * couldn't share pagetables.
3132 3132 */
3133 3133 if (!is_it_dism(hat, addr))
3134 3134 flags |= HAT_UNLOAD_UNLOCK;
3135 3135 hat_unload(hat, addr, len, flags);
3136 3136 XPV_ALLOW_MIGRATE();
3137 3137 }
3138 3138
3139 3139
3140 3140 /*
3141 3141 * hat_reserve() does nothing
3142 3142 */
3143 3143 /*ARGSUSED*/
3144 3144 void
3145 3145 hat_reserve(struct as *as, caddr_t addr, size_t len)
3146 3146 {
3147 3147 }
3148 3148
3149 3149
3150 3150 /*
3151 3151 * Called when all mappings to a page should have write permission removed.
3152 3152 * Mostly stolen from hat_pagesync()
3153 3153 */
3154 3154 static void
3155 3155 hati_page_clrwrt(struct page *pp)
3156 3156 {
3157 3157 hment_t *hm = NULL;
3158 3158 htable_t *ht;
3159 3159 uint_t entry;
3160 3160 x86pte_t old;
3161 3161 x86pte_t new;
3162 3162 uint_t pszc = 0;
3163 3163
3164 3164 XPV_DISALLOW_MIGRATE();
3165 3165 next_size:
3166 3166 /*
3167 3167 * walk thru the mapping list clearing write permission
3168 3168 */
3169 3169 x86_hm_enter(pp);
3170 3170 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3171 3171 if (ht->ht_level < pszc)
3172 3172 continue;
3173 3173 old = x86pte_get(ht, entry);
3174 3174
3175 3175 for (;;) {
3176 3176 /*
3177 3177 * Is this mapping of interest?
3178 3178 */
3179 3179 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3180 3180 PTE_GET(old, PT_WRITABLE) == 0)
3181 3181 break;
3182 3182
3183 3183 /*
3184 3184 * Clear ref/mod writable bits. This requires cross
3185 3185 * calls to ensure any executing TLBs see cleared bits.
3186 3186 */
3187 3187 new = old;
3188 3188 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3189 3189 old = hati_update_pte(ht, entry, old, new);
3190 3190 if (old != 0)
3191 3191 continue;
3192 3192
3193 3193 break;
3194 3194 }
3195 3195 }
3196 3196 x86_hm_exit(pp);
3197 3197 while (pszc < pp->p_szc) {
3198 3198 page_t *tpp;
3199 3199 pszc++;
3200 3200 tpp = PP_GROUPLEADER(pp, pszc);
3201 3201 if (pp != tpp) {
3202 3202 pp = tpp;
3203 3203 goto next_size;
3204 3204 }
3205 3205 }
3206 3206 XPV_ALLOW_MIGRATE();
3207 3207 }
3208 3208
3209 3209 /*
3210 3210 * void hat_page_setattr(pp, flag)
3211 3211 * void hat_page_clrattr(pp, flag)
3212 3212 * used to set/clr ref/mod bits.
3213 3213 */
3214 3214 void
3215 3215 hat_page_setattr(struct page *pp, uint_t flag)
3216 3216 {
3217 3217 vnode_t *vp = pp->p_vnode;
3218 3218 kmutex_t *vphm = NULL;
3219 3219 page_t **listp;
3220 3220 int noshuffle;
3221 3221
3222 3222 noshuffle = flag & P_NSH;
3223 3223 flag &= ~P_NSH;
3224 3224
3225 3225 if (PP_GETRM(pp, flag) == flag)
3226 3226 return;
3227 3227
3228 3228 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3229 3229 !noshuffle) {
3230 3230 vphm = page_vnode_mutex(vp);
3231 3231 mutex_enter(vphm);
3232 3232 }
3233 3233
3234 3234 PP_SETRM(pp, flag);
3235 3235
3236 3236 if (vphm != NULL) {
3237 3237
3238 3238 /*
3239 3239 * Some File Systems examine v_pages for NULL w/o
3240 3240 * grabbing the vphm mutex. Must not let it become NULL when
3241 3241 * pp is the only page on the list.
3242 3242 */
3243 3243 if (pp->p_vpnext != pp) {
3244 3244 page_vpsub(&vp->v_pages, pp);
3245 3245 if (vp->v_pages != NULL)
3246 3246 listp = &vp->v_pages->p_vpprev->p_vpnext;
3247 3247 else
3248 3248 listp = &vp->v_pages;
3249 3249 page_vpadd(listp, pp);
3250 3250 }
3251 3251 mutex_exit(vphm);
3252 3252 }
3253 3253 }
3254 3254
3255 3255 void
3256 3256 hat_page_clrattr(struct page *pp, uint_t flag)
3257 3257 {
3258 3258 vnode_t *vp = pp->p_vnode;
3259 3259 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3260 3260
3261 3261 /*
3262 3262 * Caller is expected to hold page's io lock for VMODSORT to work
3263 3263 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3264 3264 * bit is cleared.
3265 3265 * We don't have assert to avoid tripping some existing third party
3266 3266 * code. The dirty page is moved back to top of the v_page list
3267 3267 * after IO is done in pvn_write_done().
3268 3268 */
3269 3269 PP_CLRRM(pp, flag);
3270 3270
3271 3271 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3272 3272
3273 3273 /*
3274 3274 * VMODSORT works by removing write permissions and getting
3275 3275 * a fault when a page is made dirty. At this point
3276 3276 * we need to remove write permission from all mappings
3277 3277 * to this page.
3278 3278 */
3279 3279 hati_page_clrwrt(pp);
3280 3280 }
3281 3281 }
3282 3282
3283 3283 /*
3284 3284 * If flag is specified, returns 0 if attribute is disabled
3285 3285 * and non zero if enabled. If flag specifes multiple attributes
3286 3286 * then returns 0 if ALL attributes are disabled. This is an advisory
3287 3287 * call.
3288 3288 */
3289 3289 uint_t
3290 3290 hat_page_getattr(struct page *pp, uint_t flag)
3291 3291 {
3292 3292 return (PP_GETRM(pp, flag));
3293 3293 }
3294 3294
3295 3295
3296 3296 /*
3297 3297 * common code used by hat_pageunload() and hment_steal()
3298 3298 */
3299 3299 hment_t *
3300 3300 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3301 3301 {
3302 3302 x86pte_t old_pte;
3303 3303 pfn_t pfn = pp->p_pagenum;
3304 3304 hment_t *hm;
3305 3305
3306 3306 /*
3307 3307 * We need to acquire a hold on the htable in order to
3308 3308 * do the invalidate. We know the htable must exist, since
3309 3309 * unmap's don't release the htable until after removing any
3310 3310 * hment. Having x86_hm_enter() keeps that from proceeding.
3311 3311 */
3312 3312 htable_acquire(ht);
3313 3313
3314 3314 /*
3315 3315 * Invalidate the PTE and remove the hment.
3316 3316 */
3317 3317 old_pte = x86pte_inval(ht, entry, 0, NULL);
3318 3318 if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3319 3319 panic("x86pte_inval() failure found PTE = " FMT_PTE
3320 3320 " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3321 3321 old_pte, pfn, (uintptr_t)ht, entry);
3322 3322 }
3323 3323
3324 3324 /*
3325 3325 * Clean up all the htable information for this mapping
3326 3326 */
3327 3327 ASSERT(ht->ht_valid_cnt > 0);
3328 3328 HTABLE_DEC(ht->ht_valid_cnt);
3329 3329 PGCNT_DEC(ht->ht_hat, ht->ht_level);
3330 3330
3331 3331 /*
3332 3332 * sync ref/mod bits to the page_t
3333 3333 */
3334 3334 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3335 3335 hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3336 3336
3337 3337 /*
3338 3338 * Remove the mapping list entry for this page.
3339 3339 */
3340 3340 hm = hment_remove(pp, ht, entry);
3341 3341
3342 3342 /*
3343 3343 * drop the mapping list lock so that we might free the
3344 3344 * hment and htable.
3345 3345 */
3346 3346 x86_hm_exit(pp);
3347 3347 htable_release(ht);
3348 3348 return (hm);
3349 3349 }
3350 3350
3351 3351 extern int vpm_enable;
3352 3352 /*
3353 3353 * Unload all translations to a page. If the page is a subpage of a large
3354 3354 * page, the large page mappings are also removed.
3355 3355 *
3356 3356 * The forceflags are unused.
3357 3357 */
3358 3358
3359 3359 /*ARGSUSED*/
3360 3360 static int
3361 3361 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3362 3362 {
3363 3363 page_t *cur_pp = pp;
3364 3364 hment_t *hm;
3365 3365 hment_t *prev;
3366 3366 htable_t *ht;
3367 3367 uint_t entry;
3368 3368 level_t level;
3369 3369
3370 3370 XPV_DISALLOW_MIGRATE();
3371 3371
3372 3372 /*
3373 3373 * prevent recursion due to kmem_free()
3374 3374 */
3375 3375 ++curthread->t_hatdepth;
3376 3376 ASSERT(curthread->t_hatdepth < 16);
3377 3377
3378 3378 #if defined(__amd64)
3379 3379 /*
3380 3380 * clear the vpm ref.
3381 3381 */
3382 3382 if (vpm_enable) {
3383 3383 pp->p_vpmref = 0;
3384 3384 }
3385 3385 #endif
3386 3386 /*
3387 3387 * The loop with next_size handles pages with multiple pagesize mappings
3388 3388 */
3389 3389 next_size:
3390 3390 for (;;) {
3391 3391
3392 3392 /*
3393 3393 * Get a mapping list entry
3394 3394 */
3395 3395 x86_hm_enter(cur_pp);
3396 3396 for (prev = NULL; ; prev = hm) {
3397 3397 hm = hment_walk(cur_pp, &ht, &entry, prev);
3398 3398 if (hm == NULL) {
3399 3399 x86_hm_exit(cur_pp);
3400 3400
3401 3401 /*
3402 3402 * If not part of a larger page, we're done.
3403 3403 */
3404 3404 if (cur_pp->p_szc <= pg_szcd) {
3405 3405 ASSERT(curthread->t_hatdepth > 0);
3406 3406 --curthread->t_hatdepth;
3407 3407 XPV_ALLOW_MIGRATE();
3408 3408 return (0);
3409 3409 }
3410 3410
3411 3411 /*
3412 3412 * Else check the next larger page size.
3413 3413 * hat_page_demote() may decrease p_szc
3414 3414 * but that's ok we'll just take an extra
3415 3415 * trip discover there're no larger mappings
3416 3416 * and return.
3417 3417 */
3418 3418 ++pg_szcd;
3419 3419 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3420 3420 goto next_size;
3421 3421 }
3422 3422
3423 3423 /*
3424 3424 * If this mapping size matches, remove it.
3425 3425 */
3426 3426 level = ht->ht_level;
3427 3427 if (level == pg_szcd)
3428 3428 break;
3429 3429 }
3430 3430
3431 3431 /*
3432 3432 * Remove the mapping list entry for this page.
3433 3433 * Note this does the x86_hm_exit() for us.
3434 3434 */
3435 3435 hm = hati_page_unmap(cur_pp, ht, entry);
3436 3436 if (hm != NULL)
3437 3437 hment_free(hm);
3438 3438 }
3439 3439 }
3440 3440
3441 3441 int
3442 3442 hat_pageunload(struct page *pp, uint_t forceflag)
3443 3443 {
3444 3444 ASSERT(PAGE_EXCL(pp));
3445 3445 return (hati_pageunload(pp, 0, forceflag));
3446 3446 }
3447 3447
3448 3448 /*
3449 3449 * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3450 3450 * page level that included pp.
3451 3451 *
3452 3452 * pp must be locked EXCL. Even though no other constituent pages are locked
3453 3453 * it's legal to unload large mappings to pp because all constituent pages of
3454 3454 * large locked mappings have to be locked SHARED. therefore if we have EXCL
3455 3455 * lock on one of constituent pages none of the large mappings to pp are
3456 3456 * locked.
3457 3457 *
3458 3458 * Change (always decrease) p_szc field starting from the last constituent
3459 3459 * page and ending with root constituent page so that root's pszc always shows
3460 3460 * the area where hat_page_demote() may be active.
3461 3461 *
3462 3462 * This mechanism is only used for file system pages where it's not always
3463 3463 * possible to get EXCL locks on all constituent pages to demote the size code
3464 3464 * (as is done for anonymous or kernel large pages).
3465 3465 */
3466 3466 void
3467 3467 hat_page_demote(page_t *pp)
3468 3468 {
3469 3469 uint_t pszc;
3470 3470 uint_t rszc;
3471 3471 uint_t szc;
3472 3472 page_t *rootpp;
3473 3473 page_t *firstpp;
3474 3474 page_t *lastpp;
3475 3475 pgcnt_t pgcnt;
3476 3476
3477 3477 ASSERT(PAGE_EXCL(pp));
3478 3478 ASSERT(!PP_ISFREE(pp));
3479 3479 ASSERT(page_szc_lock_assert(pp));
3480 3480
3481 3481 if (pp->p_szc == 0)
3482 3482 return;
3483 3483
3484 3484 rootpp = PP_GROUPLEADER(pp, 1);
3485 3485 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3486 3486
3487 3487 /*
3488 3488 * all large mappings to pp are gone
3489 3489 * and no new can be setup since pp is locked exclusively.
3490 3490 *
3491 3491 * Lock the root to make sure there's only one hat_page_demote()
3492 3492 * outstanding within the area of this root's pszc.
3493 3493 *
3494 3494 * Second potential hat_page_demote() is already eliminated by upper
3495 3495 * VM layer via page_szc_lock() but we don't rely on it and use our
3496 3496 * own locking (so that upper layer locking can be changed without
3497 3497 * assumptions that hat depends on upper layer VM to prevent multiple
3498 3498 * hat_page_demote() to be issued simultaneously to the same large
3499 3499 * page).
3500 3500 */
3501 3501 again:
3502 3502 pszc = pp->p_szc;
3503 3503 if (pszc == 0)
3504 3504 return;
3505 3505 rootpp = PP_GROUPLEADER(pp, pszc);
3506 3506 x86_hm_enter(rootpp);
3507 3507 /*
3508 3508 * If root's p_szc is different from pszc we raced with another
3509 3509 * hat_page_demote(). Drop the lock and try to find the root again.
3510 3510 * If root's p_szc is greater than pszc previous hat_page_demote() is
3511 3511 * not done yet. Take and release mlist lock of root's root to wait
3512 3512 * for previous hat_page_demote() to complete.
3513 3513 */
3514 3514 if ((rszc = rootpp->p_szc) != pszc) {
3515 3515 x86_hm_exit(rootpp);
3516 3516 if (rszc > pszc) {
3517 3517 /* p_szc of a locked non free page can't increase */
3518 3518 ASSERT(pp != rootpp);
3519 3519
3520 3520 rootpp = PP_GROUPLEADER(rootpp, rszc);
3521 3521 x86_hm_enter(rootpp);
3522 3522 x86_hm_exit(rootpp);
3523 3523 }
3524 3524 goto again;
3525 3525 }
3526 3526 ASSERT(pp->p_szc == pszc);
3527 3527
3528 3528 /*
3529 3529 * Decrement by 1 p_szc of every constituent page of a region that
3530 3530 * covered pp. For example if original szc is 3 it gets changed to 2
3531 3531 * everywhere except in region 2 that covered pp. Region 2 that
3532 3532 * covered pp gets demoted to 1 everywhere except in region 1 that
3533 3533 * covered pp. The region 1 that covered pp is demoted to region
3534 3534 * 0. It's done this way because from region 3 we removed level 3
3535 3535 * mappings, from region 2 that covered pp we removed level 2 mappings
3536 3536 * and from region 1 that covered pp we removed level 1 mappings. All
3537 3537 * changes are done from from high pfn's to low pfn's so that roots
3538 3538 * are changed last allowing one to know the largest region where
3539 3539 * hat_page_demote() is stil active by only looking at the root page.
3540 3540 *
3541 3541 * This algorithm is implemented in 2 while loops. First loop changes
3542 3542 * p_szc of pages to the right of pp's level 1 region and second
3543 3543 * loop changes p_szc of pages of level 1 region that covers pp
3544 3544 * and all pages to the left of level 1 region that covers pp.
3545 3545 * In the first loop p_szc keeps dropping with every iteration
3546 3546 * and in the second loop it keeps increasing with every iteration.
3547 3547 *
3548 3548 * First loop description: Demote pages to the right of pp outside of
3549 3549 * level 1 region that covers pp. In every iteration of the while
3550 3550 * loop below find the last page of szc region and the first page of
3551 3551 * (szc - 1) region that is immediately to the right of (szc - 1)
3552 3552 * region that covers pp. From last such page to first such page
3553 3553 * change every page's szc to szc - 1. Decrement szc and continue
3554 3554 * looping until szc is 1. If pp belongs to the last (szc - 1) region
3555 3555 * of szc region skip to the next iteration.
3556 3556 */
3557 3557 szc = pszc;
3558 3558 while (szc > 1) {
3559 3559 lastpp = PP_GROUPLEADER(pp, szc);
3560 3560 pgcnt = page_get_pagecnt(szc);
3561 3561 lastpp += pgcnt - 1;
3562 3562 firstpp = PP_GROUPLEADER(pp, (szc - 1));
3563 3563 pgcnt = page_get_pagecnt(szc - 1);
3564 3564 if (lastpp - firstpp < pgcnt) {
3565 3565 szc--;
3566 3566 continue;
3567 3567 }
3568 3568 firstpp += pgcnt;
3569 3569 while (lastpp != firstpp) {
3570 3570 ASSERT(lastpp->p_szc == pszc);
3571 3571 lastpp->p_szc = szc - 1;
3572 3572 lastpp--;
3573 3573 }
3574 3574 firstpp->p_szc = szc - 1;
3575 3575 szc--;
3576 3576 }
3577 3577
3578 3578 /*
3579 3579 * Second loop description:
3580 3580 * First iteration changes p_szc to 0 of every
3581 3581 * page of level 1 region that covers pp.
3582 3582 * Subsequent iterations find last page of szc region
3583 3583 * immediately to the left of szc region that covered pp
3584 3584 * and first page of (szc + 1) region that covers pp.
3585 3585 * From last to first page change p_szc of every page to szc.
3586 3586 * Increment szc and continue looping until szc is pszc.
3587 3587 * If pp belongs to the fist szc region of (szc + 1) region
3588 3588 * skip to the next iteration.
3589 3589 *
3590 3590 */
3591 3591 szc = 0;
3592 3592 while (szc < pszc) {
3593 3593 firstpp = PP_GROUPLEADER(pp, (szc + 1));
3594 3594 if (szc == 0) {
3595 3595 pgcnt = page_get_pagecnt(1);
3596 3596 lastpp = firstpp + (pgcnt - 1);
3597 3597 } else {
3598 3598 lastpp = PP_GROUPLEADER(pp, szc);
3599 3599 if (firstpp == lastpp) {
3600 3600 szc++;
3601 3601 continue;
3602 3602 }
3603 3603 lastpp--;
3604 3604 pgcnt = page_get_pagecnt(szc);
3605 3605 }
3606 3606 while (lastpp != firstpp) {
3607 3607 ASSERT(lastpp->p_szc == pszc);
3608 3608 lastpp->p_szc = szc;
3609 3609 lastpp--;
3610 3610 }
3611 3611 firstpp->p_szc = szc;
3612 3612 if (firstpp == rootpp)
3613 3613 break;
3614 3614 szc++;
3615 3615 }
3616 3616 x86_hm_exit(rootpp);
3617 3617 }
3618 3618
3619 3619 /*
3620 3620 * get hw stats from hardware into page struct and reset hw stats
3621 3621 * returns attributes of page
3622 3622 * Flags for hat_pagesync, hat_getstat, hat_sync
3623 3623 *
3624 3624 * define HAT_SYNC_ZERORM 0x01
3625 3625 *
3626 3626 * Additional flags for hat_pagesync
3627 3627 *
3628 3628 * define HAT_SYNC_STOPON_REF 0x02
3629 3629 * define HAT_SYNC_STOPON_MOD 0x04
3630 3630 * define HAT_SYNC_STOPON_RM 0x06
3631 3631 * define HAT_SYNC_STOPON_SHARED 0x08
3632 3632 */
3633 3633 uint_t
3634 3634 hat_pagesync(struct page *pp, uint_t flags)
3635 3635 {
3636 3636 hment_t *hm = NULL;
3637 3637 htable_t *ht;
3638 3638 uint_t entry;
3639 3639 x86pte_t old, save_old;
3640 3640 x86pte_t new;
3641 3641 uchar_t nrmbits = P_REF|P_MOD|P_RO;
3642 3642 extern ulong_t po_share;
3643 3643 page_t *save_pp = pp;
3644 3644 uint_t pszc = 0;
3645 3645
3646 3646 ASSERT(PAGE_LOCKED(pp) || panicstr);
3647 3647
3648 3648 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3649 3649 return (pp->p_nrm & nrmbits);
3650 3650
3651 3651 if ((flags & HAT_SYNC_ZERORM) == 0) {
3652 3652
3653 3653 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3654 3654 return (pp->p_nrm & nrmbits);
3655 3655
3656 3656 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3657 3657 return (pp->p_nrm & nrmbits);
3658 3658
3659 3659 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3660 3660 hat_page_getshare(pp) > po_share) {
3661 3661 if (PP_ISRO(pp))
3662 3662 PP_SETREF(pp);
3663 3663 return (pp->p_nrm & nrmbits);
3664 3664 }
3665 3665 }
3666 3666
3667 3667 XPV_DISALLOW_MIGRATE();
3668 3668 next_size:
3669 3669 /*
3670 3670 * walk thru the mapping list syncing (and clearing) ref/mod bits.
3671 3671 */
3672 3672 x86_hm_enter(pp);
3673 3673 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3674 3674 if (ht->ht_level < pszc)
3675 3675 continue;
3676 3676 old = x86pte_get(ht, entry);
3677 3677 try_again:
3678 3678
3679 3679 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3680 3680
3681 3681 if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3682 3682 continue;
3683 3683
3684 3684 save_old = old;
3685 3685 if ((flags & HAT_SYNC_ZERORM) != 0) {
3686 3686
3687 3687 /*
3688 3688 * Need to clear ref or mod bits. Need to demap
3689 3689 * to make sure any executing TLBs see cleared bits.
3690 3690 */
3691 3691 new = old;
3692 3692 PTE_CLR(new, PT_REF | PT_MOD);
3693 3693 old = hati_update_pte(ht, entry, old, new);
3694 3694 if (old != 0)
3695 3695 goto try_again;
3696 3696
3697 3697 old = save_old;
3698 3698 }
3699 3699
3700 3700 /*
3701 3701 * Sync the PTE
3702 3702 */
3703 3703 if (!(flags & HAT_SYNC_ZERORM) &&
3704 3704 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3705 3705 hati_sync_pte_to_page(pp, old, ht->ht_level);
3706 3706
3707 3707 /*
3708 3708 * can stop short if we found a ref'd or mod'd page
3709 3709 */
3710 3710 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3711 3711 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3712 3712 x86_hm_exit(pp);
3713 3713 goto done;
3714 3714 }
3715 3715 }
3716 3716 x86_hm_exit(pp);
3717 3717 while (pszc < pp->p_szc) {
3718 3718 page_t *tpp;
3719 3719 pszc++;
3720 3720 tpp = PP_GROUPLEADER(pp, pszc);
3721 3721 if (pp != tpp) {
3722 3722 pp = tpp;
3723 3723 goto next_size;
3724 3724 }
3725 3725 }
3726 3726 done:
3727 3727 XPV_ALLOW_MIGRATE();
3728 3728 return (save_pp->p_nrm & nrmbits);
3729 3729 }
3730 3730
3731 3731 /*
3732 3732 * returns approx number of mappings to this pp. A return of 0 implies
3733 3733 * there are no mappings to the page.
3734 3734 */
3735 3735 ulong_t
3736 3736 hat_page_getshare(page_t *pp)
3737 3737 {
3738 3738 uint_t cnt;
3739 3739 cnt = hment_mapcnt(pp);
3740 3740 #if defined(__amd64)
3741 3741 if (vpm_enable && pp->p_vpmref) {
3742 3742 cnt += 1;
3743 3743 }
3744 3744 #endif
3745 3745 return (cnt);
3746 3746 }
3747 3747
3748 3748 /*
3749 3749 * Return 1 the number of mappings exceeds sh_thresh. Return 0
3750 3750 * otherwise.
3751 3751 */
3752 3752 int
3753 3753 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3754 3754 {
3755 3755 return (hat_page_getshare(pp) > sh_thresh);
3756 3756 }
3757 3757
3758 3758 /*
3759 3759 * hat_softlock isn't supported anymore
3760 3760 */
3761 3761 /*ARGSUSED*/
3762 3762 faultcode_t
3763 3763 hat_softlock(
3764 3764 hat_t *hat,
3765 3765 caddr_t addr,
3766 3766 size_t *len,
3767 3767 struct page **page_array,
3768 3768 uint_t flags)
3769 3769 {
3770 3770 return (FC_NOSUPPORT);
3771 3771 }
3772 3772
3773 3773
3774 3774
3775 3775 /*
3776 3776 * Routine to expose supported HAT features to platform independent code.
3777 3777 */
3778 3778 /*ARGSUSED*/
3779 3779 int
3780 3780 hat_supported(enum hat_features feature, void *arg)
3781 3781 {
3782 3782 switch (feature) {
3783 3783
3784 3784 case HAT_SHARED_PT: /* this is really ISM */
3785 3785 return (1);
3786 3786
3787 3787 case HAT_DYNAMIC_ISM_UNMAP:
3788 3788 return (0);
3789 3789
3790 3790 case HAT_VMODSORT:
3791 3791 return (1);
3792 3792
3793 3793 case HAT_SHARED_REGIONS:
3794 3794 return (0);
3795 3795
3796 3796 default:
3797 3797 panic("hat_supported() - unknown feature");
3798 3798 }
3799 3799 return (0);
3800 3800 }
3801 3801
3802 3802 /*
3803 3803 * Called when a thread is exiting and has been switched to the kernel AS
3804 3804 */
3805 3805 void
3806 3806 hat_thread_exit(kthread_t *thd)
3807 3807 {
3808 3808 ASSERT(thd->t_procp->p_as == &kas);
3809 3809 XPV_DISALLOW_MIGRATE();
3810 3810 hat_switch(thd->t_procp->p_as->a_hat);
3811 3811 XPV_ALLOW_MIGRATE();
3812 3812 }
3813 3813
3814 3814 /*
3815 3815 * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3816 3816 */
3817 3817 /*ARGSUSED*/
3818 3818 void
3819 3819 hat_setup(hat_t *hat, int flags)
3820 3820 {
3821 3821 XPV_DISALLOW_MIGRATE();
3822 3822 kpreempt_disable();
3823 3823
3824 3824 hat_switch(hat);
3825 3825
3826 3826 kpreempt_enable();
3827 3827 XPV_ALLOW_MIGRATE();
3828 3828 }
3829 3829
3830 3830 /*
3831 3831 * Prepare for a CPU private mapping for the given address.
3832 3832 *
3833 3833 * The address can only be used from a single CPU and can be remapped
3834 3834 * using hat_mempte_remap(). Return the address of the PTE.
3835 3835 *
3836 3836 * We do the htable_create() if necessary and increment the valid count so
3837 3837 * the htable can't disappear. We also hat_devload() the page table into
3838 3838 * kernel so that the PTE is quickly accessed.
3839 3839 */
3840 3840 hat_mempte_t
3841 3841 hat_mempte_setup(caddr_t addr)
3842 3842 {
3843 3843 uintptr_t va = (uintptr_t)addr;
3844 3844 htable_t *ht;
3845 3845 uint_t entry;
3846 3846 x86pte_t oldpte;
3847 3847 hat_mempte_t p;
3848 3848
3849 3849 ASSERT(IS_PAGEALIGNED(va));
3850 3850 ASSERT(!IN_VA_HOLE(va));
3851 3851 ++curthread->t_hatdepth;
3852 3852 XPV_DISALLOW_MIGRATE();
3853 3853 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3854 3854 if (ht == NULL) {
3855 3855 ht = htable_create(kas.a_hat, va, 0, NULL);
3856 3856 entry = htable_va2entry(va, ht);
3857 3857 ASSERT(ht->ht_level == 0);
3858 3858 oldpte = x86pte_get(ht, entry);
3859 3859 }
3860 3860 if (PTE_ISVALID(oldpte))
3861 3861 panic("hat_mempte_setup(): address already mapped"
3862 3862 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3863 3863
3864 3864 /*
3865 3865 * increment ht_valid_cnt so that the pagetable can't disappear
3866 3866 */
3867 3867 HTABLE_INC(ht->ht_valid_cnt);
3868 3868
3869 3869 /*
3870 3870 * return the PTE physical address to the caller.
3871 3871 */
3872 3872 htable_release(ht);
3873 3873 XPV_ALLOW_MIGRATE();
3874 3874 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3875 3875 --curthread->t_hatdepth;
3876 3876 return (p);
3877 3877 }
3878 3878
3879 3879 /*
3880 3880 * Release a CPU private mapping for the given address.
3881 3881 * We decrement the htable valid count so it might be destroyed.
3882 3882 */
3883 3883 /*ARGSUSED1*/
3884 3884 void
3885 3885 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3886 3886 {
3887 3887 htable_t *ht;
3888 3888
3889 3889 XPV_DISALLOW_MIGRATE();
3890 3890 /*
3891 3891 * invalidate any left over mapping and decrement the htable valid count
3892 3892 */
3893 3893 #ifdef __xpv
3894 3894 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3895 3895 UVMF_INVLPG | UVMF_LOCAL))
3896 3896 panic("HYPERVISOR_update_va_mapping() failed");
3897 3897 #else
3898 3898 {
3899 3899 x86pte_t *pteptr;
3900 3900
3901 3901 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3902 3902 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3903 3903 if (mmu.pae_hat)
3904 3904 *pteptr = 0;
3905 3905 else
3906 3906 *(x86pte32_t *)pteptr = 0;
3907 3907 mmu_tlbflush_entry(addr);
3908 3908 x86pte_mapout();
3909 3909 }
3910 3910 #endif
3911 3911
3912 3912 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3913 3913 if (ht == NULL)
3914 3914 panic("hat_mempte_release(): invalid address");
3915 3915 ASSERT(ht->ht_level == 0);
3916 3916 HTABLE_DEC(ht->ht_valid_cnt);
3917 3917 htable_release(ht);
3918 3918 XPV_ALLOW_MIGRATE();
3919 3919 }
3920 3920
3921 3921 /*
3922 3922 * Apply a temporary CPU private mapping to a page. We flush the TLB only
3923 3923 * on this CPU, so this ought to have been called with preemption disabled.
3924 3924 */
3925 3925 void
3926 3926 hat_mempte_remap(
3927 3927 pfn_t pfn,
3928 3928 caddr_t addr,
3929 3929 hat_mempte_t pte_pa,
3930 3930 uint_t attr,
3931 3931 uint_t flags)
3932 3932 {
3933 3933 uintptr_t va = (uintptr_t)addr;
3934 3934 x86pte_t pte;
3935 3935
3936 3936 /*
3937 3937 * Remap the given PTE to the new page's PFN. Invalidate only
3938 3938 * on this CPU.
3939 3939 */
3940 3940 #ifdef DEBUG
3941 3941 htable_t *ht;
3942 3942 uint_t entry;
3943 3943
3944 3944 ASSERT(IS_PAGEALIGNED(va));
3945 3945 ASSERT(!IN_VA_HOLE(va));
3946 3946 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3947 3947 ASSERT(ht != NULL);
3948 3948 ASSERT(ht->ht_level == 0);
3949 3949 ASSERT(ht->ht_valid_cnt > 0);
3950 3950 ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3951 3951 htable_release(ht);
3952 3952 #endif
3953 3953 XPV_DISALLOW_MIGRATE();
3954 3954 pte = hati_mkpte(pfn, attr, 0, flags);
3955 3955 #ifdef __xpv
3956 3956 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3957 3957 panic("HYPERVISOR_update_va_mapping() failed");
3958 3958 #else
3959 3959 {
3960 3960 x86pte_t *pteptr;
3961 3961
3962 3962 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3963 3963 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3964 3964 if (mmu.pae_hat)
3965 3965 *(x86pte_t *)pteptr = pte;
3966 3966 else
3967 3967 *(x86pte32_t *)pteptr = (x86pte32_t)pte;
3968 3968 mmu_tlbflush_entry(addr);
3969 3969 x86pte_mapout();
3970 3970 }
3971 3971 #endif
3972 3972 XPV_ALLOW_MIGRATE();
3973 3973 }
3974 3974
3975 3975
3976 3976
3977 3977 /*
3978 3978 * Hat locking functions
3979 3979 * XXX - these two functions are currently being used by hatstats
3980 3980 * they can be removed by using a per-as mutex for hatstats.
3981 3981 */
3982 3982 void
3983 3983 hat_enter(hat_t *hat)
3984 3984 {
3985 3985 mutex_enter(&hat->hat_mutex);
3986 3986 }
3987 3987
3988 3988 void
3989 3989 hat_exit(hat_t *hat)
3990 3990 {
3991 3991 mutex_exit(&hat->hat_mutex);
3992 3992 }
3993 3993
3994 3994 /*
3995 3995 * HAT part of cpu initialization.
3996 3996 */
3997 3997 void
3998 3998 hat_cpu_online(struct cpu *cpup)
3999 3999 {
4000 4000 if (cpup != CPU) {
4001 4001 x86pte_cpu_init(cpup);
4002 4002 hat_vlp_setup(cpup);
4003 4003 }
4004 4004 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4005 4005 }
4006 4006
4007 4007 /*
4008 4008 * HAT part of cpu deletion.
4009 4009 * (currently, we only call this after the cpu is safely passivated.)
4010 4010 */
4011 4011 void
4012 4012 hat_cpu_offline(struct cpu *cpup)
4013 4013 {
4014 4014 ASSERT(cpup != CPU);
4015 4015
4016 4016 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4017 4017 hat_vlp_teardown(cpup);
4018 4018 x86pte_cpu_fini(cpup);
4019 4019 }
4020 4020
4021 4021 /*
4022 4022 * Function called after all CPUs are brought online.
4023 4023 * Used to remove low address boot mappings.
4024 4024 */
4025 4025 void
4026 4026 clear_boot_mappings(uintptr_t low, uintptr_t high)
4027 4027 {
4028 4028 uintptr_t vaddr = low;
4029 4029 htable_t *ht = NULL;
4030 4030 level_t level;
4031 4031 uint_t entry;
4032 4032 x86pte_t pte;
4033 4033
4034 4034 /*
4035 4035 * On 1st CPU we can unload the prom mappings, basically we blow away
4036 4036 * all virtual mappings under _userlimit.
4037 4037 */
4038 4038 while (vaddr < high) {
4039 4039 pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4040 4040 if (ht == NULL)
4041 4041 break;
4042 4042
4043 4043 level = ht->ht_level;
4044 4044 entry = htable_va2entry(vaddr, ht);
4045 4045 ASSERT(level <= mmu.max_page_level);
4046 4046 ASSERT(PTE_ISPAGE(pte, level));
4047 4047
4048 4048 /*
4049 4049 * Unload the mapping from the page tables.
4050 4050 */
4051 4051 (void) x86pte_inval(ht, entry, 0, NULL);
4052 4052 ASSERT(ht->ht_valid_cnt > 0);
4053 4053 HTABLE_DEC(ht->ht_valid_cnt);
4054 4054 PGCNT_DEC(ht->ht_hat, ht->ht_level);
4055 4055
4056 4056 vaddr += LEVEL_SIZE(ht->ht_level);
4057 4057 }
4058 4058 if (ht)
4059 4059 htable_release(ht);
4060 4060 }
4061 4061
4062 4062 /*
4063 4063 * Atomically update a new translation for a single page. If the
4064 4064 * currently installed PTE doesn't match the value we expect to find,
4065 4065 * it's not updated and we return the PTE we found.
4066 4066 *
4067 4067 * If activating nosync or NOWRITE and the page was modified we need to sync
4068 4068 * with the page_t. Also sync with page_t if clearing ref/mod bits.
4069 4069 */
4070 4070 static x86pte_t
4071 4071 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4072 4072 {
4073 4073 page_t *pp;
4074 4074 uint_t rm = 0;
4075 4075 x86pte_t replaced;
4076 4076
4077 4077 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4078 4078 PTE_GET(expected, PT_MOD | PT_REF) &&
4079 4079 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4080 4080 !PTE_GET(new, PT_MOD | PT_REF))) {
4081 4081
4082 4082 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4083 4083 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4084 4084 ASSERT(pp != NULL);
4085 4085 if (PTE_GET(expected, PT_MOD))
4086 4086 rm |= P_MOD;
4087 4087 if (PTE_GET(expected, PT_REF))
4088 4088 rm |= P_REF;
4089 4089 PTE_CLR(new, PT_MOD | PT_REF);
4090 4090 }
4091 4091
4092 4092 replaced = x86pte_update(ht, entry, expected, new);
4093 4093 if (replaced != expected)
4094 4094 return (replaced);
4095 4095
4096 4096 if (rm) {
4097 4097 /*
4098 4098 * sync to all constituent pages of a large page
4099 4099 */
4100 4100 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4101 4101 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4102 4102 while (pgcnt-- > 0) {
4103 4103 /*
4104 4104 * hat_page_demote() can't decrease
4105 4105 * pszc below this mapping size
4106 4106 * since large mapping existed after we
4107 4107 * took mlist lock.
4108 4108 */
4109 4109 ASSERT(pp->p_szc >= ht->ht_level);
4110 4110 hat_page_setattr(pp, rm);
4111 4111 ++pp;
4112 4112 }
4113 4113 }
4114 4114
4115 4115 return (0);
4116 4116 }
4117 4117
4118 4118 /* ARGSUSED */
4119 4119 void
4120 4120 hat_join_srd(struct hat *hat, vnode_t *evp)
4121 4121 {
4122 4122 }
4123 4123
4124 4124 /* ARGSUSED */
4125 4125 hat_region_cookie_t
4126 4126 hat_join_region(struct hat *hat,
4127 4127 caddr_t r_saddr,
4128 4128 size_t r_size,
4129 4129 void *r_obj,
4130 4130 u_offset_t r_objoff,
4131 4131 uchar_t r_perm,
4132 4132 uchar_t r_pgszc,
4133 4133 hat_rgn_cb_func_t r_cb_function,
4134 4134 uint_t flags)
4135 4135 {
4136 4136 panic("No shared region support on x86");
4137 4137 return (HAT_INVALID_REGION_COOKIE);
4138 4138 }
4139 4139
4140 4140 /* ARGSUSED */
4141 4141 void
4142 4142 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4143 4143 {
4144 4144 panic("No shared region support on x86");
4145 4145 }
4146 4146
4147 4147 /* ARGSUSED */
4148 4148 void
4149 4149 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4150 4150 {
4151 4151 panic("No shared region support on x86");
4152 4152 }
4153 4153
4154 4154
4155 4155 /*
4156 4156 * Kernel Physical Mapping (kpm) facility
4157 4157 *
4158 4158 * Most of the routines needed to support segkpm are almost no-ops on the
4159 4159 * x86 platform. We map in the entire segment when it is created and leave
4160 4160 * it mapped in, so there is no additional work required to set up and tear
4161 4161 * down individual mappings. All of these routines were created to support
4162 4162 * SPARC platforms that have to avoid aliasing in their virtually indexed
4163 4163 * caches.
4164 4164 *
4165 4165 * Most of the routines have sanity checks in them (e.g. verifying that the
4166 4166 * passed-in page is locked). We don't actually care about most of these
4167 4167 * checks on x86, but we leave them in place to identify problems in the
4168 4168 * upper levels.
4169 4169 */
4170 4170
4171 4171 /*
4172 4172 * Map in a locked page and return the vaddr.
4173 4173 */
4174 4174 /*ARGSUSED*/
4175 4175 caddr_t
4176 4176 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4177 4177 {
4178 4178 caddr_t vaddr;
4179 4179
4180 4180 #ifdef DEBUG
4181 4181 if (kpm_enable == 0) {
4182 4182 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4183 4183 return ((caddr_t)NULL);
4184 4184 }
4185 4185
4186 4186 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4187 4187 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4188 4188 return ((caddr_t)NULL);
4189 4189 }
4190 4190 #endif
4191 4191
4192 4192 vaddr = hat_kpm_page2va(pp, 1);
4193 4193
4194 4194 return (vaddr);
4195 4195 }
4196 4196
4197 4197 /*
4198 4198 * Mapout a locked page.
4199 4199 */
4200 4200 /*ARGSUSED*/
4201 4201 void
4202 4202 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4203 4203 {
4204 4204 #ifdef DEBUG
4205 4205 if (kpm_enable == 0) {
4206 4206 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4207 4207 return;
4208 4208 }
4209 4209
4210 4210 if (IS_KPM_ADDR(vaddr) == 0) {
4211 4211 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4212 4212 return;
4213 4213 }
4214 4214
4215 4215 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4216 4216 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4217 4217 return;
4218 4218 }
4219 4219 #endif
4220 4220 }
4221 4221
4222 4222 /*
4223 4223 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4224 4224 * memory addresses that are not described by a page_t. It can
4225 4225 * also be used for normal pages that are not locked, but beware
4226 4226 * this is dangerous - no locking is performed, so the identity of
4227 4227 * the page could change. hat_kpm_mapin_pfn is not supported when
4228 4228 * vac_colors > 1, because the chosen va depends on the page identity,
4229 4229 * which could change.
4230 4230 * The caller must only pass pfn's for valid physical addresses; violation
4231 4231 * of this rule will cause panic.
4232 4232 */
4233 4233 caddr_t
4234 4234 hat_kpm_mapin_pfn(pfn_t pfn)
4235 4235 {
4236 4236 caddr_t paddr, vaddr;
4237 4237
4238 4238 if (kpm_enable == 0)
4239 4239 return ((caddr_t)NULL);
4240 4240
4241 4241 paddr = (caddr_t)ptob(pfn);
4242 4242 vaddr = (uintptr_t)kpm_vbase + paddr;
4243 4243
4244 4244 return ((caddr_t)vaddr);
4245 4245 }
4246 4246
4247 4247 /*ARGSUSED*/
4248 4248 void
4249 4249 hat_kpm_mapout_pfn(pfn_t pfn)
4250 4250 {
4251 4251 /* empty */
4252 4252 }
4253 4253
4254 4254 /*
4255 4255 * Return the kpm virtual address for a specific pfn
4256 4256 */
4257 4257 caddr_t
4258 4258 hat_kpm_pfn2va(pfn_t pfn)
4259 4259 {
4260 4260 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4261 4261
4262 4262 ASSERT(!pfn_is_foreign(pfn));
4263 4263 return ((caddr_t)vaddr);
4264 4264 }
4265 4265
4266 4266 /*
4267 4267 * Return the kpm virtual address for the page at pp.
4268 4268 */
4269 4269 /*ARGSUSED*/
4270 4270 caddr_t
4271 4271 hat_kpm_page2va(struct page *pp, int checkswap)
4272 4272 {
4273 4273 return (hat_kpm_pfn2va(pp->p_pagenum));
4274 4274 }
4275 4275
4276 4276 /*
4277 4277 * Return the page frame number for the kpm virtual address vaddr.
4278 4278 */
4279 4279 pfn_t
4280 4280 hat_kpm_va2pfn(caddr_t vaddr)
4281 4281 {
4282 4282 pfn_t pfn;
4283 4283
4284 4284 ASSERT(IS_KPM_ADDR(vaddr));
4285 4285
4286 4286 pfn = (pfn_t)btop(vaddr - kpm_vbase);
4287 4287
4288 4288 return (pfn);
4289 4289 }
4290 4290
4291 4291
4292 4292 /*
4293 4293 * Return the page for the kpm virtual address vaddr.
4294 4294 */
4295 4295 page_t *
4296 4296 hat_kpm_vaddr2page(caddr_t vaddr)
4297 4297 {
4298 4298 pfn_t pfn;
4299 4299
4300 4300 ASSERT(IS_KPM_ADDR(vaddr));
4301 4301
4302 4302 pfn = hat_kpm_va2pfn(vaddr);
4303 4303
4304 4304 return (page_numtopp_nolock(pfn));
4305 4305 }
4306 4306
4307 4307 /*
4308 4308 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4309 4309 * KPM page. This should never happen on x86
4310 4310 */
4311 4311 int
4312 4312 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4313 4313 {
4314 4314 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p",
4315 4315 (void *)hat, (void *)vaddr);
4316 4316
4317 4317 return (0);
4318 4318 }
4319 4319
4320 4320 /*ARGSUSED*/
4321 4321 void
4322 4322 hat_kpm_mseghash_clear(int nentries)
4323 4323 {}
4324 4324
4325 4325 /*ARGSUSED*/
4326 4326 void
4327 4327 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4328 4328 {}
4329 4329
4330 4330 #ifndef __xpv
4331 4331 void
4332 4332 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4333 4333 offset_t kpm_pages_off)
4334 4334 {
4335 4335 _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4336 4336 pfn_t base, end;
4337 4337
4338 4338 /*
4339 4339 * kphysm_add_memory_dynamic() does not set nkpmpgs
4340 4340 * when page_t memory is externally allocated. That
4341 4341 * code must properly calculate nkpmpgs in all cases
4342 4342 * if nkpmpgs needs to be used at some point.
4343 4343 */
4344 4344
4345 4345 /*
4346 4346 * The meta (page_t) pages for dynamically added memory are allocated
4347 4347 * either from the incoming memory itself or from existing memory.
4348 4348 * In the former case the base of the incoming pages will be different
4349 4349 * than the base of the dynamic segment so call memseg_get_start() to
4350 4350 * get the actual base of the incoming memory for each case.
4351 4351 */
4352 4352
4353 4353 base = memseg_get_start(msp);
4354 4354 end = msp->pages_end;
4355 4355
4356 4356 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4357 4357 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4358 4358 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4359 4359 }
4360 4360
4361 4361 void
4362 4362 hat_kpm_addmem_mseg_insert(struct memseg *msp)
4363 4363 {
4364 4364 _NOTE(ARGUNUSED(msp));
4365 4365 }
4366 4366
4367 4367 void
4368 4368 hat_kpm_addmem_memsegs_update(struct memseg *msp)
4369 4369 {
4370 4370 _NOTE(ARGUNUSED(msp));
4371 4371 }
4372 4372
4373 4373 /*
4374 4374 * Return end of metadata for an already setup memseg.
4375 4375 * X86 platforms don't need per-page meta data to support kpm.
4376 4376 */
4377 4377 caddr_t
4378 4378 hat_kpm_mseg_reuse(struct memseg *msp)
4379 4379 {
4380 4380 return ((caddr_t)msp->epages);
4381 4381 }
4382 4382
4383 4383 void
4384 4384 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4385 4385 {
4386 4386 _NOTE(ARGUNUSED(msp, mspp));
4387 4387 ASSERT(0);
4388 4388 }
4389 4389
4390 4390 void
4391 4391 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4392 4392 struct memseg *lo, struct memseg *mid, struct memseg *hi)
4393 4393 {
4394 4394 _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4395 4395 ASSERT(0);
4396 4396 }
4397 4397
4398 4398 /*
4399 4399 * Walk the memsegs chain, applying func to each memseg span.
4400 4400 */
4401 4401 void
4402 4402 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4403 4403 {
4404 4404 pfn_t pbase, pend;
4405 4405 void *base;
4406 4406 size_t size;
4407 4407 struct memseg *msp;
4408 4408
4409 4409 for (msp = memsegs; msp; msp = msp->next) {
4410 4410 pbase = msp->pages_base;
4411 4411 pend = msp->pages_end;
4412 4412 base = ptob(pbase) + kpm_vbase;
4413 4413 size = ptob(pend - pbase);
4414 4414 func(arg, base, size);
4415 4415 }
4416 4416 }
4417 4417
4418 4418 #else /* __xpv */
4419 4419
4420 4420 /*
4421 4421 * There are specific Hypervisor calls to establish and remove mappings
4422 4422 * to grant table references and the privcmd driver. We have to ensure
4423 4423 * that a page table actually exists.
4424 4424 */
4425 4425 void
4426 4426 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4427 4427 {
4428 4428 maddr_t base_ma;
4429 4429 htable_t *ht;
4430 4430 uint_t entry;
4431 4431
4432 4432 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4433 4433 XPV_DISALLOW_MIGRATE();
4434 4434 ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4435 4435
4436 4436 /*
4437 4437 * if an address for pte_ma is passed in, return the MA of the pte
4438 4438 * for this specific address. This address is only valid as long
4439 4439 * as the htable stays locked.
4440 4440 */
4441 4441 if (pte_ma != NULL) {
4442 4442 entry = htable_va2entry((uintptr_t)addr, ht);
4443 4443 base_ma = pa_to_ma(ptob(ht->ht_pfn));
4444 4444 *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4445 4445 }
4446 4446 XPV_ALLOW_MIGRATE();
4447 4447 }
4448 4448
4449 4449 void
4450 4450 hat_release_mapping(hat_t *hat, caddr_t addr)
4451 4451 {
4452 4452 htable_t *ht;
4453 4453
4454 4454 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4455 4455 XPV_DISALLOW_MIGRATE();
4456 4456 ht = htable_lookup(hat, (uintptr_t)addr, 0);
4457 4457 ASSERT(ht != NULL);
4458 4458 ASSERT(ht->ht_busy >= 2);
4459 4459 htable_release(ht);
4460 4460 htable_release(ht);
4461 4461 XPV_ALLOW_MIGRATE();
4462 4462 }
4463 4463 #endif /* __xpv */
↓ open down ↓ |
3216 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX