1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright (c) 2010, Intel Corporation.
26 * All rights reserved.
27 */
28 /*
29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
31 */
32
33 /*
34 * VM - Hardware Address Translation management for i386 and amd64
35 *
36 * Implementation of the interfaces described in <common/vm/hat.h>
37 *
38 * Nearly all the details of how the hardware is managed should not be
39 * visible outside this layer except for misc. machine specific functions
40 * that work in conjunction with this code.
41 *
42 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
43 */
44
45 #include <sys/machparam.h>
46 #include <sys/machsystm.h>
47 #include <sys/mman.h>
48 #include <sys/types.h>
49 #include <sys/systm.h>
50 #include <sys/cpuvar.h>
51 #include <sys/thread.h>
52 #include <sys/proc.h>
53 #include <sys/cpu.h>
54 #include <sys/kmem.h>
55 #include <sys/disp.h>
56 #include <sys/shm.h>
57 #include <sys/sysmacros.h>
58 #include <sys/machparam.h>
59 #include <sys/vmem.h>
60 #include <sys/vmsystm.h>
61 #include <sys/promif.h>
62 #include <sys/var.h>
63 #include <sys/x86_archext.h>
64 #include <sys/atomic.h>
65 #include <sys/bitmap.h>
66 #include <sys/controlregs.h>
67 #include <sys/bootconf.h>
68 #include <sys/bootsvcs.h>
69 #include <sys/bootinfo.h>
70 #include <sys/archsystm.h>
71
72 #include <vm/seg_kmem.h>
73 #include <vm/hat_i86.h>
74 #include <vm/as.h>
75 #include <vm/seg.h>
76 #include <vm/page.h>
77 #include <vm/seg_kp.h>
78 #include <vm/seg_kpm.h>
79 #include <vm/vm_dep.h>
80 #ifdef __xpv
81 #include <sys/hypervisor.h>
82 #endif
83 #include <vm/kboot_mmu.h>
84 #include <vm/seg_spt.h>
85
86 #include <sys/cmn_err.h>
87
88 /*
89 * Basic parameters for hat operation.
90 */
91 struct hat_mmu_info mmu;
92
93 /*
94 * The page that is the kernel's top level pagetable.
95 *
96 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
97 * on this 4K page for its top level page table. The remaining groups of
98 * 4 entries are used for per processor copies of user VLP pagetables for
99 * running threads. See hat_switch() and reload_pae32() for details.
100 *
101 * vlp_page[0..3] - level==2 PTEs for kernel HAT
102 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
103 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1
104 * etc...
105 */
106 static x86pte_t *vlp_page;
107
108 /*
109 * forward declaration of internal utility routines
110 */
111 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
112 x86pte_t new);
113
114 /*
115 * The kernel address space exists in all HATs. To implement this the
116 * kernel reserves a fixed number of entries in the topmost level(s) of page
117 * tables. The values are setup during startup and then copied to every user
118 * hat created by hat_alloc(). This means that kernelbase must be:
119 *
120 * 4Meg aligned for 32 bit kernels
121 * 512Gig aligned for x86_64 64 bit kernel
122 *
123 * The hat_kernel_range_ts describe what needs to be copied from kernel hat
124 * to each user hat.
125 */
126 typedef struct hat_kernel_range {
127 level_t hkr_level;
128 uintptr_t hkr_start_va;
129 uintptr_t hkr_end_va; /* zero means to end of memory */
130 } hat_kernel_range_t;
131 #define NUM_KERNEL_RANGE 2
132 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
133 static int num_kernel_ranges;
134
135 uint_t use_boot_reserve = 1; /* cleared after early boot process */
136 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
137
138 /*
139 * enable_1gpg: controls 1g page support for user applications.
140 * By default, 1g pages are exported to user applications. enable_1gpg can
141 * be set to 0 to not export.
142 */
143 int enable_1gpg = 1;
144
145 /*
146 * AMD shanghai processors provide better management of 1gb ptes in its tlb.
147 * By default, 1g page support will be disabled for pre-shanghai AMD
148 * processors that don't have optimal tlb support for the 1g page size.
149 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
150 * processors.
151 */
152 int chk_optimal_1gtlb = 1;
153
154
155 #ifdef DEBUG
156 uint_t map1gcnt;
157 #endif
158
159
160 /*
161 * A cpuset for all cpus. This is used for kernel address cross calls, since
162 * the kernel addresses apply to all cpus.
163 */
164 cpuset_t khat_cpuset;
165
166 /*
167 * management stuff for hat structures
168 */
169 kmutex_t hat_list_lock;
170 kcondvar_t hat_list_cv;
171 kmem_cache_t *hat_cache;
172 kmem_cache_t *hat_hash_cache;
173 kmem_cache_t *vlp_hash_cache;
174
175 /*
176 * Simple statistics
177 */
178 struct hatstats hatstat;
179
180 /*
181 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
182 * correctly. For such hypervisors we must set PT_USER for kernel
183 * entries ourselves (normally the emulation would set PT_USER for
184 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is
185 * thus set appropriately. Note that dboot/kbm is OK, as only the full
186 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
187 * incorrect.
188 */
189 int pt_kern;
190
191 /*
192 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
193 */
194 extern void atomic_orb(uchar_t *addr, uchar_t val);
195 extern void atomic_andb(uchar_t *addr, uchar_t val);
196
197 #ifndef __xpv
198 extern pfn_t memseg_get_start(struct memseg *);
199 #endif
200
201 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask)
202 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD)
203 #define PP_ISREF(pp) PP_GETRM(pp, P_REF)
204 #define PP_ISRO(pp) PP_GETRM(pp, P_RO)
205
206 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm)
207 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD)
208 #define PP_SETREF(pp) PP_SETRM(pp, P_REF)
209 #define PP_SETRO(pp) PP_SETRM(pp, P_RO)
210
211 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm))
212 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD)
213 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF)
214 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO)
215 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO)
216
217 /*
218 * kmem cache constructor for struct hat
219 */
220 /*ARGSUSED*/
221 static int
222 hati_constructor(void *buf, void *handle, int kmflags)
223 {
224 hat_t *hat = buf;
225
226 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
227 bzero(hat->hat_pages_mapped,
228 sizeof (pgcnt_t) * (mmu.max_page_level + 1));
229 hat->hat_ism_pgcnt = 0;
230 hat->hat_stats = 0;
231 hat->hat_flags = 0;
232 CPUSET_ZERO(hat->hat_cpus);
233 hat->hat_htable = NULL;
234 hat->hat_ht_hash = NULL;
235 return (0);
236 }
237
238 /*
239 * Allocate a hat structure for as. We also create the top level
240 * htable and initialize it to contain the kernel hat entries.
241 */
242 hat_t *
243 hat_alloc(struct as *as)
244 {
245 hat_t *hat;
246 htable_t *ht; /* top level htable */
247 uint_t use_vlp;
248 uint_t r;
249 hat_kernel_range_t *rp;
250 uintptr_t va;
251 uintptr_t eva;
252 uint_t start;
253 uint_t cnt;
254 htable_t *src;
255
256 /*
257 * Once we start creating user process HATs we can enable
258 * the htable_steal() code.
259 */
260 if (can_steal_post_boot == 0)
261 can_steal_post_boot = 1;
262
263 ASSERT(AS_WRITE_HELD(as));
264 hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
265 hat->hat_as = as;
266 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
267 ASSERT(hat->hat_flags == 0);
268
269 #if defined(__xpv)
270 /*
271 * No VLP stuff on the hypervisor due to the 64-bit split top level
272 * page tables. On 32-bit it's not needed as the hypervisor takes
273 * care of copying the top level PTEs to a below 4Gig page.
274 */
275 use_vlp = 0;
276 #else /* __xpv */
277 /* 32 bit processes uses a VLP style hat when running with PAE */
278 #if defined(__amd64)
279 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
280 #elif defined(__i386)
281 use_vlp = mmu.pae_hat;
282 #endif
283 #endif /* __xpv */
284 if (use_vlp) {
285 hat->hat_flags = HAT_VLP;
286 bzero(hat->hat_vlp_ptes, VLP_SIZE);
287 }
288
289 /*
290 * Allocate the htable hash
291 */
292 if ((hat->hat_flags & HAT_VLP)) {
293 hat->hat_num_hash = mmu.vlp_hash_cnt;
294 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
295 } else {
296 hat->hat_num_hash = mmu.hash_cnt;
297 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
298 }
299 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
300
301 /*
302 * Initialize Kernel HAT entries at the top of the top level page
303 * tables for the new hat.
304 */
305 hat->hat_htable = NULL;
306 hat->hat_ht_cached = NULL;
307 XPV_DISALLOW_MIGRATE();
308 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
309 hat->hat_htable = ht;
310
311 #if defined(__amd64)
312 if (hat->hat_flags & HAT_VLP)
313 goto init_done;
314 #endif
315
316 for (r = 0; r < num_kernel_ranges; ++r) {
317 rp = &kernel_ranges[r];
318 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
319 va += cnt * LEVEL_SIZE(rp->hkr_level)) {
320
321 if (rp->hkr_level == TOP_LEVEL(hat))
322 ht = hat->hat_htable;
323 else
324 ht = htable_create(hat, va, rp->hkr_level,
325 NULL);
326
327 start = htable_va2entry(va, ht);
328 cnt = HTABLE_NUM_PTES(ht) - start;
329 eva = va +
330 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
331 if (rp->hkr_end_va != 0 &&
332 (eva > rp->hkr_end_va || eva == 0))
333 cnt = htable_va2entry(rp->hkr_end_va, ht) -
334 start;
335
336 #if defined(__i386) && !defined(__xpv)
337 if (ht->ht_flags & HTABLE_VLP) {
338 bcopy(&vlp_page[start],
339 &hat->hat_vlp_ptes[start],
340 cnt * sizeof (x86pte_t));
341 continue;
342 }
343 #endif
344 src = htable_lookup(kas.a_hat, va, rp->hkr_level);
345 ASSERT(src != NULL);
346 x86pte_copy(src, ht, start, cnt);
347 htable_release(src);
348 }
349 }
350
351 init_done:
352
353 #if defined(__xpv)
354 /*
355 * Pin top level page tables after initializing them
356 */
357 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
358 #if defined(__amd64)
359 xen_pin(hat->hat_user_ptable, mmu.max_level);
360 #endif
361 #endif
362 XPV_ALLOW_MIGRATE();
363
364 /*
365 * Put it at the start of the global list of all hats (used by stealing)
366 *
367 * kas.a_hat is not in the list but is instead used to find the
368 * first and last items in the list.
369 *
370 * - kas.a_hat->hat_next points to the start of the user hats.
371 * The list ends where hat->hat_next == NULL
372 *
373 * - kas.a_hat->hat_prev points to the last of the user hats.
374 * The list begins where hat->hat_prev == NULL
375 */
376 mutex_enter(&hat_list_lock);
377 hat->hat_prev = NULL;
378 hat->hat_next = kas.a_hat->hat_next;
379 if (hat->hat_next)
380 hat->hat_next->hat_prev = hat;
381 else
382 kas.a_hat->hat_prev = hat;
383 kas.a_hat->hat_next = hat;
384 mutex_exit(&hat_list_lock);
385
386 return (hat);
387 }
388
389 /*
390 * process has finished executing but as has not been cleaned up yet.
391 */
392 /*ARGSUSED*/
393 void
394 hat_free_start(hat_t *hat)
395 {
396 ASSERT(AS_WRITE_HELD(hat->hat_as));
397
398 /*
399 * If the hat is currently a stealing victim, wait for the stealing
400 * to finish. Once we mark it as HAT_FREEING, htable_steal()
401 * won't look at its pagetables anymore.
402 */
403 mutex_enter(&hat_list_lock);
404 while (hat->hat_flags & HAT_VICTIM)
405 cv_wait(&hat_list_cv, &hat_list_lock);
406 hat->hat_flags |= HAT_FREEING;
407 mutex_exit(&hat_list_lock);
408 }
409
410 /*
411 * An address space is being destroyed, so we destroy the associated hat.
412 */
413 void
414 hat_free_end(hat_t *hat)
415 {
416 kmem_cache_t *cache;
417
418 ASSERT(hat->hat_flags & HAT_FREEING);
419
420 /*
421 * must not be running on the given hat
422 */
423 ASSERT(CPU->cpu_current_hat != hat);
424
425 /*
426 * Remove it from the list of HATs
427 */
428 mutex_enter(&hat_list_lock);
429 if (hat->hat_prev)
430 hat->hat_prev->hat_next = hat->hat_next;
431 else
432 kas.a_hat->hat_next = hat->hat_next;
433 if (hat->hat_next)
434 hat->hat_next->hat_prev = hat->hat_prev;
435 else
436 kas.a_hat->hat_prev = hat->hat_prev;
437 mutex_exit(&hat_list_lock);
438 hat->hat_next = hat->hat_prev = NULL;
439
440 #if defined(__xpv)
441 /*
442 * On the hypervisor, unpin top level page table(s)
443 */
444 xen_unpin(hat->hat_htable->ht_pfn);
445 #if defined(__amd64)
446 xen_unpin(hat->hat_user_ptable);
447 #endif
448 #endif
449
450 /*
451 * Make a pass through the htables freeing them all up.
452 */
453 htable_purge_hat(hat);
454
455 /*
456 * Decide which kmem cache the hash table came from, then free it.
457 */
458 if (hat->hat_flags & HAT_VLP)
459 cache = vlp_hash_cache;
460 else
461 cache = hat_hash_cache;
462 kmem_cache_free(cache, hat->hat_ht_hash);
463 hat->hat_ht_hash = NULL;
464
465 hat->hat_flags = 0;
466 kmem_cache_free(hat_cache, hat);
467 }
468
469 /*
470 * round kernelbase down to a supported value to use for _userlimit
471 *
472 * userlimit must be aligned down to an entry in the top level htable.
473 * The one exception is for 32 bit HAT's running PAE.
474 */
475 uintptr_t
476 hat_kernelbase(uintptr_t va)
477 {
478 #if defined(__i386)
479 va &= LEVEL_MASK(1);
480 #endif
481 if (IN_VA_HOLE(va))
482 panic("_userlimit %p will fall in VA hole\n", (void *)va);
483 return (va);
484 }
485
486 /*
487 *
488 */
489 static void
490 set_max_page_level()
491 {
492 level_t lvl;
493
494 if (!kbm_largepage_support) {
495 lvl = 0;
496 } else {
497 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
498 lvl = 2;
499 if (chk_optimal_1gtlb &&
500 cpuid_opteron_erratum(CPU, 6671130)) {
501 lvl = 1;
502 }
503 if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
504 LEVEL_SHIFT(0))) {
505 lvl = 1;
506 }
507 } else {
508 lvl = 1;
509 }
510 }
511 mmu.max_page_level = lvl;
512
513 if ((lvl == 2) && (enable_1gpg == 0))
514 mmu.umax_page_level = 1;
515 else
516 mmu.umax_page_level = lvl;
517 }
518
519 /*
520 * Initialize hat data structures based on processor MMU information.
521 */
522 void
523 mmu_init(void)
524 {
525 uint_t max_htables;
526 uint_t pa_bits;
527 uint_t va_bits;
528 int i;
529
530 /*
531 * If CPU enabled the page table global bit, use it for the kernel
532 * This is bit 7 in CR4 (PGE - Page Global Enable).
533 */
534 if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
535 (getcr4() & CR4_PGE) != 0)
536 mmu.pt_global = PT_GLOBAL;
537
538 /*
539 * Detect NX and PAE usage.
540 */
541 mmu.pae_hat = kbm_pae_support;
542 if (kbm_nx_support)
543 mmu.pt_nx = PT_NX;
544 else
545 mmu.pt_nx = 0;
546
547 /*
548 * Use CPU info to set various MMU parameters
549 */
550 cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
551
552 if (va_bits < sizeof (void *) * NBBY) {
553 mmu.hole_start = (1ul << (va_bits - 1));
554 mmu.hole_end = 0ul - mmu.hole_start - 1;
555 } else {
556 mmu.hole_end = 0;
557 mmu.hole_start = mmu.hole_end - 1;
558 }
559 #if defined(OPTERON_ERRATUM_121)
560 /*
561 * If erratum 121 has already been detected at this time, hole_start
562 * contains the value to be subtracted from mmu.hole_start.
563 */
564 ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
565 hole_start = mmu.hole_start - hole_start;
566 #else
567 hole_start = mmu.hole_start;
568 #endif
569 hole_end = mmu.hole_end;
570
571 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
572 if (mmu.pae_hat == 0 && pa_bits > 32)
573 mmu.highest_pfn = PFN_4G - 1;
574
575 if (mmu.pae_hat) {
576 mmu.pte_size = 8; /* 8 byte PTEs */
577 mmu.pte_size_shift = 3;
578 } else {
579 mmu.pte_size = 4; /* 4 byte PTEs */
580 mmu.pte_size_shift = 2;
581 }
582
583 if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
584 panic("Processor does not support PAE");
585
586 if (!is_x86_feature(x86_featureset, X86FSET_CX8))
587 panic("Processor does not support cmpxchg8b instruction");
588
589 #if defined(__amd64)
590
591 mmu.num_level = 4;
592 mmu.max_level = 3;
593 mmu.ptes_per_table = 512;
594 mmu.top_level_count = 512;
595
596 mmu.level_shift[0] = 12;
597 mmu.level_shift[1] = 21;
598 mmu.level_shift[2] = 30;
599 mmu.level_shift[3] = 39;
600
601 #elif defined(__i386)
602
603 if (mmu.pae_hat) {
604 mmu.num_level = 3;
605 mmu.max_level = 2;
606 mmu.ptes_per_table = 512;
607 mmu.top_level_count = 4;
608
609 mmu.level_shift[0] = 12;
610 mmu.level_shift[1] = 21;
611 mmu.level_shift[2] = 30;
612
613 } else {
614 mmu.num_level = 2;
615 mmu.max_level = 1;
616 mmu.ptes_per_table = 1024;
617 mmu.top_level_count = 1024;
618
619 mmu.level_shift[0] = 12;
620 mmu.level_shift[1] = 22;
621 }
622
623 #endif /* __i386 */
624
625 for (i = 0; i < mmu.num_level; ++i) {
626 mmu.level_size[i] = 1UL << mmu.level_shift[i];
627 mmu.level_offset[i] = mmu.level_size[i] - 1;
628 mmu.level_mask[i] = ~mmu.level_offset[i];
629 }
630
631 set_max_page_level();
632
633 mmu_page_sizes = mmu.max_page_level + 1;
634 mmu_exported_page_sizes = mmu.umax_page_level + 1;
635
636 /* restrict legacy applications from using pagesizes 1g and above */
637 mmu_legacy_page_sizes =
638 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
639
640
641 for (i = 0; i <= mmu.max_page_level; ++i) {
642 mmu.pte_bits[i] = PT_VALID | pt_kern;
643 if (i > 0)
644 mmu.pte_bits[i] |= PT_PAGESIZE;
645 }
646
647 /*
648 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
649 */
650 for (i = 1; i < mmu.num_level; ++i)
651 mmu.ptp_bits[i] = PT_PTPBITS;
652
653 #if defined(__i386)
654 mmu.ptp_bits[2] = PT_VALID;
655 #endif
656
657 /*
658 * Compute how many hash table entries to have per process for htables.
659 * We start with 1 page's worth of entries.
660 *
661 * If physical memory is small, reduce the amount need to cover it.
662 */
663 max_htables = physmax / mmu.ptes_per_table;
664 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
665 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
666 mmu.hash_cnt >>= 1;
667 mmu.vlp_hash_cnt = mmu.hash_cnt;
668
669 #if defined(__amd64)
670 /*
671 * If running in 64 bits and physical memory is large,
672 * increase the size of the cache to cover all of memory for
673 * a 64 bit process.
674 */
675 #define HASH_MAX_LENGTH 4
676 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
677 mmu.hash_cnt <<= 1;
678 #endif
679 }
680
681
682 /*
683 * initialize hat data structures
684 */
685 void
686 hat_init()
687 {
688 #if defined(__i386)
689 /*
690 * _userlimit must be aligned correctly
691 */
692 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
693 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
694 (void *)_userlimit, (void *)LEVEL_SIZE(1));
695 halt("hat_init(): Unable to continue");
696 }
697 #endif
698
699 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
700
701 /*
702 * initialize kmem caches
703 */
704 htable_init();
705 hment_init();
706
707 hat_cache = kmem_cache_create("hat_t",
708 sizeof (hat_t), 0, hati_constructor, NULL, NULL,
709 NULL, 0, 0);
710
711 hat_hash_cache = kmem_cache_create("HatHash",
712 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
713 NULL, 0, 0);
714
715 /*
716 * VLP hats can use a smaller hash table size on large memroy machines
717 */
718 if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
719 vlp_hash_cache = hat_hash_cache;
720 } else {
721 vlp_hash_cache = kmem_cache_create("HatVlpHash",
722 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
723 NULL, 0, 0);
724 }
725
726 /*
727 * Set up the kernel's hat
728 */
729 AS_LOCK_ENTER(&kas, RW_WRITER);
730 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
731 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
732 kas.a_hat->hat_as = &kas;
733 kas.a_hat->hat_flags = 0;
734 AS_LOCK_EXIT(&kas);
735
736 CPUSET_ZERO(khat_cpuset);
737 CPUSET_ADD(khat_cpuset, CPU->cpu_id);
738
739 /*
740 * The kernel hat's next pointer serves as the head of the hat list .
741 * The kernel hat's prev pointer tracks the last hat on the list for
742 * htable_steal() to use.
743 */
744 kas.a_hat->hat_next = NULL;
745 kas.a_hat->hat_prev = NULL;
746
747 /*
748 * Allocate an htable hash bucket for the kernel
749 * XX64 - tune for 64 bit procs
750 */
751 kas.a_hat->hat_num_hash = mmu.hash_cnt;
752 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
753 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
754
755 /*
756 * zero out the top level and cached htable pointers
757 */
758 kas.a_hat->hat_ht_cached = NULL;
759 kas.a_hat->hat_htable = NULL;
760
761 /*
762 * Pre-allocate hrm_hashtab before enabling the collection of
763 * refmod statistics. Allocating on the fly would mean us
764 * running the risk of suffering recursive mutex enters or
765 * deadlocks.
766 */
767 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
768 KM_SLEEP);
769 }
770
771 /*
772 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
773 *
774 * Each CPU has a set of 2 pagetables that are reused for any 32 bit
775 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
776 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
777 */
778 /*ARGSUSED*/
779 static void
780 hat_vlp_setup(struct cpu *cpu)
781 {
782 #if defined(__amd64) && !defined(__xpv)
783 struct hat_cpu_info *hci = cpu->cpu_hat_info;
784 pfn_t pfn;
785
786 /*
787 * allocate the level==2 page table for the bottom most
788 * 512Gig of address space (this is where 32 bit apps live)
789 */
790 ASSERT(hci != NULL);
791 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
792
793 /*
794 * Allocate a top level pagetable and copy the kernel's
795 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
796 */
797 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
798 hci->hci_vlp_pfn =
799 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
800 ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
801 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
802
803 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
804 ASSERT(pfn != PFN_INVALID);
805 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
806 #endif /* __amd64 && !__xpv */
807 }
808
809 /*ARGSUSED*/
810 static void
811 hat_vlp_teardown(cpu_t *cpu)
812 {
813 #if defined(__amd64) && !defined(__xpv)
814 struct hat_cpu_info *hci;
815
816 if ((hci = cpu->cpu_hat_info) == NULL)
817 return;
818 if (hci->hci_vlp_l2ptes)
819 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
820 if (hci->hci_vlp_l3ptes)
821 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
822 #endif
823 }
824
825 #define NEXT_HKR(r, l, s, e) { \
826 kernel_ranges[r].hkr_level = l; \
827 kernel_ranges[r].hkr_start_va = s; \
828 kernel_ranges[r].hkr_end_va = e; \
829 ++r; \
830 }
831
832 /*
833 * Finish filling in the kernel hat.
834 * Pre fill in all top level kernel page table entries for the kernel's
835 * part of the address range. From this point on we can't use any new
836 * kernel large pages if they need PTE's at max_level
837 *
838 * create the kmap mappings.
839 */
840 void
841 hat_init_finish(void)
842 {
843 size_t size;
844 uint_t r = 0;
845 uintptr_t va;
846 hat_kernel_range_t *rp;
847
848
849 /*
850 * We are now effectively running on the kernel hat.
851 * Clearing use_boot_reserve shuts off using the pre-allocated boot
852 * reserve for all HAT allocations. From here on, the reserves are
853 * only used when avoiding recursion in kmem_alloc().
854 */
855 use_boot_reserve = 0;
856 htable_adjust_reserve();
857
858 /*
859 * User HATs are initialized with copies of all kernel mappings in
860 * higher level page tables. Ensure that those entries exist.
861 */
862 #if defined(__amd64)
863
864 NEXT_HKR(r, 3, kernelbase, 0);
865 #if defined(__xpv)
866 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
867 #endif
868
869 #elif defined(__i386)
870
871 #if !defined(__xpv)
872 if (mmu.pae_hat) {
873 va = kernelbase;
874 if ((va & LEVEL_MASK(2)) != va) {
875 va = P2ROUNDUP(va, LEVEL_SIZE(2));
876 NEXT_HKR(r, 1, kernelbase, va);
877 }
878 if (va != 0)
879 NEXT_HKR(r, 2, va, 0);
880 } else
881 #endif /* __xpv */
882 NEXT_HKR(r, 1, kernelbase, 0);
883
884 #endif /* __i386 */
885
886 num_kernel_ranges = r;
887
888 /*
889 * Create all the kernel pagetables that will have entries
890 * shared to user HATs.
891 */
892 for (r = 0; r < num_kernel_ranges; ++r) {
893 rp = &kernel_ranges[r];
894 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
895 va += LEVEL_SIZE(rp->hkr_level)) {
896 htable_t *ht;
897
898 if (IN_HYPERVISOR_VA(va))
899 continue;
900
901 /* can/must skip if a page mapping already exists */
902 if (rp->hkr_level <= mmu.max_page_level &&
903 (ht = htable_getpage(kas.a_hat, va, NULL)) !=
904 NULL) {
905 htable_release(ht);
906 continue;
907 }
908
909 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
910 NULL);
911 }
912 }
913
914 /*
915 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
916 * page holding the top level pagetable. We use the remainder for
917 * the "per CPU" page tables for VLP processes.
918 * Map the top level kernel pagetable into the kernel to make
919 * it easy to use bcopy access these tables.
920 */
921 if (mmu.pae_hat) {
922 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
923 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
924 kas.a_hat->hat_htable->ht_pfn,
925 #if !defined(__xpv)
926 PROT_WRITE |
927 #endif
928 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
929 HAT_LOAD | HAT_LOAD_NOCONSIST);
930 }
931 hat_vlp_setup(CPU);
932
933 /*
934 * Create kmap (cached mappings of kernel PTEs)
935 * for 32 bit we map from segmap_start .. ekernelheap
936 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
937 */
938 #if defined(__i386)
939 size = (uintptr_t)ekernelheap - segmap_start;
940 #elif defined(__amd64)
941 size = segmapsize;
942 #endif
943 hat_kmap_init((uintptr_t)segmap_start, size);
944 }
945
946 /*
947 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
948 * are 32 bit, so for safety we must use atomic_cas_64() to install these.
949 */
950 #ifdef __i386
951 static void
952 reload_pae32(hat_t *hat, cpu_t *cpu)
953 {
954 x86pte_t *src;
955 x86pte_t *dest;
956 x86pte_t pte;
957 int i;
958
959 /*
960 * Load the 4 entries of the level 2 page table into this
961 * cpu's range of the vlp_page and point cr3 at them.
962 */
963 ASSERT(mmu.pae_hat);
964 src = hat->hat_vlp_ptes;
965 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
966 for (i = 0; i < VLP_NUM_PTES; ++i) {
967 for (;;) {
968 pte = dest[i];
969 if (pte == src[i])
970 break;
971 if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
972 break;
973 }
974 }
975 }
976 #endif
977
978 /*
979 * Switch to a new active hat, maintaining bit masks to track active CPUs.
980 *
981 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
982 * remains a 32-bit value.
983 */
984 void
985 hat_switch(hat_t *hat)
986 {
987 uint64_t newcr3;
988 cpu_t *cpu = CPU;
989 hat_t *old = cpu->cpu_current_hat;
990
991 /*
992 * set up this information first, so we don't miss any cross calls
993 */
994 if (old != NULL) {
995 if (old == hat)
996 return;
997 if (old != kas.a_hat)
998 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
999 }
1000
1001 /*
1002 * Add this CPU to the active set for this HAT.
1003 */
1004 if (hat != kas.a_hat) {
1005 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1006 }
1007 cpu->cpu_current_hat = hat;
1008
1009 /*
1010 * now go ahead and load cr3
1011 */
1012 if (hat->hat_flags & HAT_VLP) {
1013 #if defined(__amd64)
1014 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1015
1016 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1017 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1018 #elif defined(__i386)
1019 reload_pae32(hat, cpu);
1020 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1021 (cpu->cpu_id + 1) * VLP_SIZE;
1022 #endif
1023 } else {
1024 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1025 }
1026 #ifdef __xpv
1027 {
1028 struct mmuext_op t[2];
1029 uint_t retcnt;
1030 uint_t opcnt = 1;
1031
1032 t[0].cmd = MMUEXT_NEW_BASEPTR;
1033 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1034 #if defined(__amd64)
1035 /*
1036 * There's an interesting problem here, as to what to
1037 * actually specify when switching to the kernel hat.
1038 * For now we'll reuse the kernel hat again.
1039 */
1040 t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1041 if (hat == kas.a_hat)
1042 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1043 else
1044 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1045 ++opcnt;
1046 #endif /* __amd64 */
1047 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1048 panic("HYPERVISOR_mmu_update() failed");
1049 ASSERT(retcnt == opcnt);
1050
1051 }
1052 #else
1053 setcr3(newcr3);
1054 #endif
1055 ASSERT(cpu == CPU);
1056 }
1057
1058 /*
1059 * Utility to return a valid x86pte_t from protections, pfn, and level number
1060 */
1061 static x86pte_t
1062 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1063 {
1064 x86pte_t pte;
1065 uint_t cache_attr = attr & HAT_ORDER_MASK;
1066
1067 pte = MAKEPTE(pfn, level);
1068
1069 if (attr & PROT_WRITE)
1070 PTE_SET(pte, PT_WRITABLE);
1071
1072 if (attr & PROT_USER)
1073 PTE_SET(pte, PT_USER);
1074
1075 if (!(attr & PROT_EXEC))
1076 PTE_SET(pte, mmu.pt_nx);
1077
1078 /*
1079 * Set the software bits used track ref/mod sync's and hments.
1080 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1081 */
1082 if (flags & HAT_LOAD_NOCONSIST)
1083 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1084 else if (attr & HAT_NOSYNC)
1085 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1086
1087 /*
1088 * Set the caching attributes in the PTE. The combination
1089 * of attributes are poorly defined, so we pay attention
1090 * to them in the given order.
1091 *
1092 * The test for HAT_STRICTORDER is different because it's defined
1093 * as "0" - which was a stupid thing to do, but is too late to change!
1094 */
1095 if (cache_attr == HAT_STRICTORDER) {
1096 PTE_SET(pte, PT_NOCACHE);
1097 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1098 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1099 /* nothing to set */;
1100 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1101 PTE_SET(pte, PT_NOCACHE);
1102 if (is_x86_feature(x86_featureset, X86FSET_PAT))
1103 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1104 else
1105 PTE_SET(pte, PT_WRITETHRU);
1106 } else {
1107 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1108 }
1109
1110 return (pte);
1111 }
1112
1113 /*
1114 * Duplicate address translations of the parent to the child.
1115 * This function really isn't used anymore.
1116 */
1117 /*ARGSUSED*/
1118 int
1119 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1120 {
1121 ASSERT((uintptr_t)addr < kernelbase);
1122 ASSERT(new != kas.a_hat);
1123 ASSERT(old != kas.a_hat);
1124 return (0);
1125 }
1126
1127 /*
1128 * returns number of bytes that have valid mappings in hat.
1129 */
1130 size_t
1131 hat_get_mapped_size(hat_t *hat)
1132 {
1133 size_t total = 0;
1134 int l;
1135
1136 for (l = 0; l <= mmu.max_page_level; l++)
1137 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1138 total += hat->hat_ism_pgcnt;
1139
1140 return (total);
1141 }
1142
1143 /*
1144 * enable/disable collection of stats for hat.
1145 */
1146 int
1147 hat_stats_enable(hat_t *hat)
1148 {
1149 atomic_inc_32(&hat->hat_stats);
1150 return (1);
1151 }
1152
1153 void
1154 hat_stats_disable(hat_t *hat)
1155 {
1156 atomic_dec_32(&hat->hat_stats);
1157 }
1158
1159 /*
1160 * Utility to sync the ref/mod bits from a page table entry to the page_t
1161 * We must be holding the mapping list lock when this is called.
1162 */
1163 static void
1164 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1165 {
1166 uint_t rm = 0;
1167 pgcnt_t pgcnt;
1168
1169 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1170 return;
1171
1172 if (PTE_GET(pte, PT_REF))
1173 rm |= P_REF;
1174
1175 if (PTE_GET(pte, PT_MOD))
1176 rm |= P_MOD;
1177
1178 if (rm == 0)
1179 return;
1180
1181 /*
1182 * sync to all constituent pages of a large page
1183 */
1184 ASSERT(x86_hm_held(pp));
1185 pgcnt = page_get_pagecnt(level);
1186 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1187 for (; pgcnt > 0; --pgcnt) {
1188 /*
1189 * hat_page_demote() can't decrease
1190 * pszc below this mapping size
1191 * since this large mapping existed after we
1192 * took mlist lock.
1193 */
1194 ASSERT(pp->p_szc >= level);
1195 hat_page_setattr(pp, rm);
1196 ++pp;
1197 }
1198 }
1199
1200 /*
1201 * This the set of PTE bits for PFN, permissions and caching
1202 * that are allowed to change on a HAT_LOAD_REMAP
1203 */
1204 #define PT_REMAP_BITS \
1205 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \
1206 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1207
1208 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1209 /*
1210 * Do the low-level work to get a mapping entered into a HAT's pagetables
1211 * and in the mapping list of the associated page_t.
1212 */
1213 static int
1214 hati_pte_map(
1215 htable_t *ht,
1216 uint_t entry,
1217 page_t *pp,
1218 x86pte_t pte,
1219 int flags,
1220 void *pte_ptr)
1221 {
1222 hat_t *hat = ht->ht_hat;
1223 x86pte_t old_pte;
1224 level_t l = ht->ht_level;
1225 hment_t *hm;
1226 uint_t is_consist;
1227 uint_t is_locked;
1228 int rv = 0;
1229
1230 /*
1231 * Is this a consistent (ie. need mapping list lock) mapping?
1232 */
1233 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1234
1235 /*
1236 * Track locked mapping count in the htable. Do this first,
1237 * as we track locking even if there already is a mapping present.
1238 */
1239 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1240 if (is_locked)
1241 HTABLE_LOCK_INC(ht);
1242
1243 /*
1244 * Acquire the page's mapping list lock and get an hment to use.
1245 * Note that hment_prepare() might return NULL.
1246 */
1247 if (is_consist) {
1248 x86_hm_enter(pp);
1249 hm = hment_prepare(ht, entry, pp);
1250 }
1251
1252 /*
1253 * Set the new pte, retrieving the old one at the same time.
1254 */
1255 old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1256
1257 /*
1258 * Did we get a large page / page table collision?
1259 */
1260 if (old_pte == LPAGE_ERROR) {
1261 if (is_locked)
1262 HTABLE_LOCK_DEC(ht);
1263 rv = -1;
1264 goto done;
1265 }
1266
1267 /*
1268 * If the mapping didn't change there is nothing more to do.
1269 */
1270 if (PTE_EQUIV(pte, old_pte))
1271 goto done;
1272
1273 /*
1274 * Install a new mapping in the page's mapping list
1275 */
1276 if (!PTE_ISVALID(old_pte)) {
1277 if (is_consist) {
1278 hment_assign(ht, entry, pp, hm);
1279 x86_hm_exit(pp);
1280 } else {
1281 ASSERT(flags & HAT_LOAD_NOCONSIST);
1282 }
1283 #if defined(__amd64)
1284 if (ht->ht_flags & HTABLE_VLP) {
1285 cpu_t *cpu = CPU;
1286 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1287 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1288 }
1289 #endif
1290 HTABLE_INC(ht->ht_valid_cnt);
1291 PGCNT_INC(hat, l);
1292 return (rv);
1293 }
1294
1295 /*
1296 * Remap's are more complicated:
1297 * - HAT_LOAD_REMAP must be specified if changing the pfn.
1298 * We also require that NOCONSIST be specified.
1299 * - Otherwise only permission or caching bits may change.
1300 */
1301 if (!PTE_ISPAGE(old_pte, l))
1302 panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1303
1304 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1305 REMAPASSERT(flags & HAT_LOAD_REMAP);
1306 REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1307 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1308 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1309 pf_is_memory(PTE2PFN(pte, l)));
1310 REMAPASSERT(!is_consist);
1311 }
1312
1313 /*
1314 * We only let remaps change the certain bits in the PTE.
1315 */
1316 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1317 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1318 old_pte, pte);
1319
1320 /*
1321 * We don't create any mapping list entries on a remap, so release
1322 * any allocated hment after we drop the mapping list lock.
1323 */
1324 done:
1325 if (is_consist) {
1326 x86_hm_exit(pp);
1327 if (hm != NULL)
1328 hment_free(hm);
1329 }
1330 return (rv);
1331 }
1332
1333 /*
1334 * Internal routine to load a single page table entry. This only fails if
1335 * we attempt to overwrite a page table link with a large page.
1336 */
1337 static int
1338 hati_load_common(
1339 hat_t *hat,
1340 uintptr_t va,
1341 page_t *pp,
1342 uint_t attr,
1343 uint_t flags,
1344 level_t level,
1345 pfn_t pfn)
1346 {
1347 htable_t *ht;
1348 uint_t entry;
1349 x86pte_t pte;
1350 int rv = 0;
1351
1352 /*
1353 * The number 16 is arbitrary and here to catch a recursion problem
1354 * early before we blow out the kernel stack.
1355 */
1356 ++curthread->t_hatdepth;
1357 ASSERT(curthread->t_hatdepth < 16);
1358
1359 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1360
1361 if (flags & HAT_LOAD_SHARE)
1362 hat->hat_flags |= HAT_SHARED;
1363
1364 /*
1365 * Find the page table that maps this page if it already exists.
1366 */
1367 ht = htable_lookup(hat, va, level);
1368
1369 /*
1370 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1371 */
1372 if (pp == NULL)
1373 flags |= HAT_LOAD_NOCONSIST;
1374
1375 if (ht == NULL) {
1376 ht = htable_create(hat, va, level, NULL);
1377 ASSERT(ht != NULL);
1378 }
1379 entry = htable_va2entry(va, ht);
1380
1381 /*
1382 * a bunch of paranoid error checking
1383 */
1384 ASSERT(ht->ht_busy > 0);
1385 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1386 panic("hati_load_common: bad htable %p, va %p",
1387 (void *)ht, (void *)va);
1388 ASSERT(ht->ht_level == level);
1389
1390 /*
1391 * construct the new PTE
1392 */
1393 if (hat == kas.a_hat)
1394 attr &= ~PROT_USER;
1395 pte = hati_mkpte(pfn, attr, level, flags);
1396 if (hat == kas.a_hat && va >= kernelbase)
1397 PTE_SET(pte, mmu.pt_global);
1398
1399 /*
1400 * establish the mapping
1401 */
1402 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1403
1404 /*
1405 * release the htable and any reserves
1406 */
1407 htable_release(ht);
1408 --curthread->t_hatdepth;
1409 return (rv);
1410 }
1411
1412 /*
1413 * special case of hat_memload to deal with some kernel addrs for performance
1414 */
1415 static void
1416 hat_kmap_load(
1417 caddr_t addr,
1418 page_t *pp,
1419 uint_t attr,
1420 uint_t flags)
1421 {
1422 uintptr_t va = (uintptr_t)addr;
1423 x86pte_t pte;
1424 pfn_t pfn = page_pptonum(pp);
1425 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr);
1426 htable_t *ht;
1427 uint_t entry;
1428 void *pte_ptr;
1429
1430 /*
1431 * construct the requested PTE
1432 */
1433 attr &= ~PROT_USER;
1434 attr |= HAT_STORECACHING_OK;
1435 pte = hati_mkpte(pfn, attr, 0, flags);
1436 PTE_SET(pte, mmu.pt_global);
1437
1438 /*
1439 * Figure out the pte_ptr and htable and use common code to finish up
1440 */
1441 if (mmu.pae_hat)
1442 pte_ptr = mmu.kmap_ptes + pg_off;
1443 else
1444 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1445 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1446 LEVEL_SHIFT(1)];
1447 entry = htable_va2entry(va, ht);
1448 ++curthread->t_hatdepth;
1449 ASSERT(curthread->t_hatdepth < 16);
1450 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1451 --curthread->t_hatdepth;
1452 }
1453
1454 /*
1455 * hat_memload() - load a translation to the given page struct
1456 *
1457 * Flags for hat_memload/hat_devload/hat_*attr.
1458 *
1459 * HAT_LOAD Default flags to load a translation to the page.
1460 *
1461 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(),
1462 * and hat_devload().
1463 *
1464 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1465 * sets PT_NOCONSIST
1466 *
1467 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables
1468 * that map some user pages (not kas) is shared by more
1469 * than one process (eg. ISM).
1470 *
1471 * HAT_LOAD_REMAP Reload a valid pte with a different page frame.
1472 *
1473 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this
1474 * point, it's setting up mapping to allocate internal
1475 * hat layer data structures. This flag forces hat layer
1476 * to tap its reserves in order to prevent infinite
1477 * recursion.
1478 *
1479 * The following is a protection attribute (like PROT_READ, etc.)
1480 *
1481 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits
1482 * are never cleared.
1483 *
1484 * Installing new valid PTE's and creation of the mapping list
1485 * entry are controlled under the same lock. It's derived from the
1486 * page_t being mapped.
1487 */
1488 static uint_t supported_memload_flags =
1489 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1490 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1491
1492 void
1493 hat_memload(
1494 hat_t *hat,
1495 caddr_t addr,
1496 page_t *pp,
1497 uint_t attr,
1498 uint_t flags)
1499 {
1500 uintptr_t va = (uintptr_t)addr;
1501 level_t level = 0;
1502 pfn_t pfn = page_pptonum(pp);
1503
1504 XPV_DISALLOW_MIGRATE();
1505 ASSERT(IS_PAGEALIGNED(va));
1506 ASSERT(hat == kas.a_hat || va < _userlimit);
1507 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1508 ASSERT((flags & supported_memload_flags) == flags);
1509
1510 ASSERT(!IN_VA_HOLE(va));
1511 ASSERT(!PP_ISFREE(pp));
1512
1513 /*
1514 * kernel address special case for performance.
1515 */
1516 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1517 ASSERT(hat == kas.a_hat);
1518 hat_kmap_load(addr, pp, attr, flags);
1519 XPV_ALLOW_MIGRATE();
1520 return;
1521 }
1522
1523 /*
1524 * This is used for memory with normal caching enabled, so
1525 * always set HAT_STORECACHING_OK.
1526 */
1527 attr |= HAT_STORECACHING_OK;
1528 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1529 panic("unexpected hati_load_common() failure");
1530 XPV_ALLOW_MIGRATE();
1531 }
1532
1533 /* ARGSUSED */
1534 void
1535 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1536 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1537 {
1538 hat_memload(hat, addr, pp, attr, flags);
1539 }
1540
1541 /*
1542 * Load the given array of page structs using large pages when possible
1543 */
1544 void
1545 hat_memload_array(
1546 hat_t *hat,
1547 caddr_t addr,
1548 size_t len,
1549 page_t **pages,
1550 uint_t attr,
1551 uint_t flags)
1552 {
1553 uintptr_t va = (uintptr_t)addr;
1554 uintptr_t eaddr = va + len;
1555 level_t level;
1556 size_t pgsize;
1557 pgcnt_t pgindx = 0;
1558 pfn_t pfn;
1559 pgcnt_t i;
1560
1561 XPV_DISALLOW_MIGRATE();
1562 ASSERT(IS_PAGEALIGNED(va));
1563 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1564 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1565 ASSERT((flags & supported_memload_flags) == flags);
1566
1567 /*
1568 * memload is used for memory with full caching enabled, so
1569 * set HAT_STORECACHING_OK.
1570 */
1571 attr |= HAT_STORECACHING_OK;
1572
1573 /*
1574 * handle all pages using largest possible pagesize
1575 */
1576 while (va < eaddr) {
1577 /*
1578 * decide what level mapping to use (ie. pagesize)
1579 */
1580 pfn = page_pptonum(pages[pgindx]);
1581 for (level = mmu.max_page_level; ; --level) {
1582 pgsize = LEVEL_SIZE(level);
1583 if (level == 0)
1584 break;
1585
1586 if (!IS_P2ALIGNED(va, pgsize) ||
1587 (eaddr - va) < pgsize ||
1588 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1589 continue;
1590
1591 /*
1592 * To use a large mapping of this size, all the
1593 * pages we are passed must be sequential subpages
1594 * of the large page.
1595 * hat_page_demote() can't change p_szc because
1596 * all pages are locked.
1597 */
1598 if (pages[pgindx]->p_szc >= level) {
1599 for (i = 0; i < mmu_btop(pgsize); ++i) {
1600 if (pfn + i !=
1601 page_pptonum(pages[pgindx + i]))
1602 break;
1603 ASSERT(pages[pgindx + i]->p_szc >=
1604 level);
1605 ASSERT(pages[pgindx] + i ==
1606 pages[pgindx + i]);
1607 }
1608 if (i == mmu_btop(pgsize)) {
1609 #ifdef DEBUG
1610 if (level == 2)
1611 map1gcnt++;
1612 #endif
1613 break;
1614 }
1615 }
1616 }
1617
1618 /*
1619 * Load this page mapping. If the load fails, try a smaller
1620 * pagesize.
1621 */
1622 ASSERT(!IN_VA_HOLE(va));
1623 while (hati_load_common(hat, va, pages[pgindx], attr,
1624 flags, level, pfn) != 0) {
1625 if (level == 0)
1626 panic("unexpected hati_load_common() failure");
1627 --level;
1628 pgsize = LEVEL_SIZE(level);
1629 }
1630
1631 /*
1632 * move to next page
1633 */
1634 va += pgsize;
1635 pgindx += mmu_btop(pgsize);
1636 }
1637 XPV_ALLOW_MIGRATE();
1638 }
1639
1640 /* ARGSUSED */
1641 void
1642 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1643 struct page **pps, uint_t attr, uint_t flags,
1644 hat_region_cookie_t rcookie)
1645 {
1646 hat_memload_array(hat, addr, len, pps, attr, flags);
1647 }
1648
1649 /*
1650 * void hat_devload(hat, addr, len, pf, attr, flags)
1651 * load/lock the given page frame number
1652 *
1653 * Advisory ordering attributes. Apply only to device mappings.
1654 *
1655 * HAT_STRICTORDER: the CPU must issue the references in order, as the
1656 * programmer specified. This is the default.
1657 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1658 * of reordering; store or load with store or load).
1659 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1660 * to consecutive locations (for example, turn two consecutive byte
1661 * stores into one halfword store), and it may batch individual loads
1662 * (for example, turn two consecutive byte loads into one halfword load).
1663 * This also implies re-ordering.
1664 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1665 * until another store occurs. The default is to fetch new data
1666 * on every load. This also implies merging.
1667 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1668 * the device (perhaps with other data) at a later time. The default is
1669 * to push the data right away. This also implies load caching.
1670 *
1671 * Equivalent of hat_memload(), but can be used for device memory where
1672 * there are no page_t's and we support additional flags (write merging, etc).
1673 * Note that we can have large page mappings with this interface.
1674 */
1675 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1676 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1677 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1678
1679 void
1680 hat_devload(
1681 hat_t *hat,
1682 caddr_t addr,
1683 size_t len,
1684 pfn_t pfn,
1685 uint_t attr,
1686 int flags)
1687 {
1688 uintptr_t va = ALIGN2PAGE(addr);
1689 uintptr_t eva = va + len;
1690 level_t level;
1691 size_t pgsize;
1692 page_t *pp;
1693 int f; /* per PTE copy of flags - maybe modified */
1694 uint_t a; /* per PTE copy of attr */
1695
1696 XPV_DISALLOW_MIGRATE();
1697 ASSERT(IS_PAGEALIGNED(va));
1698 ASSERT(hat == kas.a_hat || eva <= _userlimit);
1699 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1700 ASSERT((flags & supported_devload_flags) == flags);
1701
1702 /*
1703 * handle all pages
1704 */
1705 while (va < eva) {
1706
1707 /*
1708 * decide what level mapping to use (ie. pagesize)
1709 */
1710 for (level = mmu.max_page_level; ; --level) {
1711 pgsize = LEVEL_SIZE(level);
1712 if (level == 0)
1713 break;
1714 if (IS_P2ALIGNED(va, pgsize) &&
1715 (eva - va) >= pgsize &&
1716 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1717 #ifdef DEBUG
1718 if (level == 2)
1719 map1gcnt++;
1720 #endif
1721 break;
1722 }
1723 }
1724
1725 /*
1726 * If this is just memory then allow caching (this happens
1727 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1728 * to override that. If we don't have a page_t then make sure
1729 * NOCONSIST is set.
1730 */
1731 a = attr;
1732 f = flags;
1733 if (!pf_is_memory(pfn))
1734 f |= HAT_LOAD_NOCONSIST;
1735 else if (!(a & HAT_PLAT_NOCACHE))
1736 a |= HAT_STORECACHING_OK;
1737
1738 if (f & HAT_LOAD_NOCONSIST)
1739 pp = NULL;
1740 else
1741 pp = page_numtopp_nolock(pfn);
1742
1743 /*
1744 * Check to make sure we are really trying to map a valid
1745 * memory page. The caller wishing to intentionally map
1746 * free memory pages will have passed the HAT_LOAD_NOCONSIST
1747 * flag, then pp will be NULL.
1748 */
1749 if (pp != NULL) {
1750 if (PP_ISFREE(pp)) {
1751 panic("hat_devload: loading "
1752 "a mapping to free page %p", (void *)pp);
1753 }
1754
1755 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1756 panic("hat_devload: loading a mapping "
1757 "to an unlocked page %p",
1758 (void *)pp);
1759 }
1760 }
1761
1762 /*
1763 * load this page mapping
1764 */
1765 ASSERT(!IN_VA_HOLE(va));
1766 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1767 if (level == 0)
1768 panic("unexpected hati_load_common() failure");
1769 --level;
1770 pgsize = LEVEL_SIZE(level);
1771 }
1772
1773 /*
1774 * move to next page
1775 */
1776 va += pgsize;
1777 pfn += mmu_btop(pgsize);
1778 }
1779 XPV_ALLOW_MIGRATE();
1780 }
1781
1782 /*
1783 * void hat_unlock(hat, addr, len)
1784 * unlock the mappings to a given range of addresses
1785 *
1786 * Locks are tracked by ht_lock_cnt in the htable.
1787 */
1788 void
1789 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1790 {
1791 uintptr_t vaddr = (uintptr_t)addr;
1792 uintptr_t eaddr = vaddr + len;
1793 htable_t *ht = NULL;
1794
1795 /*
1796 * kernel entries are always locked, we don't track lock counts
1797 */
1798 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1799 ASSERT(IS_PAGEALIGNED(vaddr));
1800 ASSERT(IS_PAGEALIGNED(eaddr));
1801 if (hat == kas.a_hat)
1802 return;
1803 if (eaddr > _userlimit)
1804 panic("hat_unlock() address out of range - above _userlimit");
1805
1806 XPV_DISALLOW_MIGRATE();
1807 ASSERT(AS_LOCK_HELD(hat->hat_as));
1808 while (vaddr < eaddr) {
1809 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1810 if (ht == NULL)
1811 break;
1812
1813 ASSERT(!IN_VA_HOLE(vaddr));
1814
1815 if (ht->ht_lock_cnt < 1)
1816 panic("hat_unlock(): lock_cnt < 1, "
1817 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1818 HTABLE_LOCK_DEC(ht);
1819
1820 vaddr += LEVEL_SIZE(ht->ht_level);
1821 }
1822 if (ht)
1823 htable_release(ht);
1824 XPV_ALLOW_MIGRATE();
1825 }
1826
1827 /* ARGSUSED */
1828 void
1829 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1830 hat_region_cookie_t rcookie)
1831 {
1832 panic("No shared region support on x86");
1833 }
1834
1835 #if !defined(__xpv)
1836 /*
1837 * Cross call service routine to demap a virtual page on
1838 * the current CPU or flush all mappings in TLB.
1839 */
1840 /*ARGSUSED*/
1841 static int
1842 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1843 {
1844 hat_t *hat = (hat_t *)a1;
1845 caddr_t addr = (caddr_t)a2;
1846 size_t len = (size_t)a3;
1847
1848 /*
1849 * If the target hat isn't the kernel and this CPU isn't operating
1850 * in the target hat, we can ignore the cross call.
1851 */
1852 if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1853 return (0);
1854
1855 /*
1856 * For a normal address, we flush a range of contiguous mappings
1857 */
1858 if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1859 for (size_t i = 0; i < len; i += MMU_PAGESIZE)
1860 mmu_tlbflush_entry(addr + i);
1861 return (0);
1862 }
1863
1864 /*
1865 * Otherwise we reload cr3 to effect a complete TLB flush.
1866 *
1867 * A reload of cr3 on a VLP process also means we must also recopy in
1868 * the pte values from the struct hat
1869 */
1870 if (hat->hat_flags & HAT_VLP) {
1871 #if defined(__amd64)
1872 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1873
1874 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1875 #elif defined(__i386)
1876 reload_pae32(hat, CPU);
1877 #endif
1878 }
1879 reload_cr3();
1880 return (0);
1881 }
1882
1883 /*
1884 * Flush all TLB entries, including global (ie. kernel) ones.
1885 */
1886 static void
1887 flush_all_tlb_entries(void)
1888 {
1889 ulong_t cr4 = getcr4();
1890
1891 if (cr4 & CR4_PGE) {
1892 setcr4(cr4 & ~(ulong_t)CR4_PGE);
1893 setcr4(cr4);
1894
1895 /*
1896 * 32 bit PAE also needs to always reload_cr3()
1897 */
1898 if (mmu.max_level == 2)
1899 reload_cr3();
1900 } else {
1901 reload_cr3();
1902 }
1903 }
1904
1905 #define TLB_CPU_HALTED (01ul)
1906 #define TLB_INVAL_ALL (02ul)
1907 #define CAS_TLB_INFO(cpu, old, new) \
1908 atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1909
1910 /*
1911 * Record that a CPU is going idle
1912 */
1913 void
1914 tlb_going_idle(void)
1915 {
1916 atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
1917 }
1918
1919 /*
1920 * Service a delayed TLB flush if coming out of being idle.
1921 * It will be called from cpu idle notification with interrupt disabled.
1922 */
1923 void
1924 tlb_service(void)
1925 {
1926 ulong_t tlb_info;
1927 ulong_t found;
1928
1929 /*
1930 * We only have to do something if coming out of being idle.
1931 */
1932 tlb_info = CPU->cpu_m.mcpu_tlb_info;
1933 if (tlb_info & TLB_CPU_HALTED) {
1934 ASSERT(CPU->cpu_current_hat == kas.a_hat);
1935
1936 /*
1937 * Atomic clear and fetch of old state.
1938 */
1939 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
1940 ASSERT(found & TLB_CPU_HALTED);
1941 tlb_info = found;
1942 SMT_PAUSE();
1943 }
1944 if (tlb_info & TLB_INVAL_ALL)
1945 flush_all_tlb_entries();
1946 }
1947 }
1948 #endif /* !__xpv */
1949
1950 /*
1951 * Internal routine to do cross calls to invalidate a range of pages on
1952 * all CPUs using a given hat.
1953 */
1954 void
1955 hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len)
1956 {
1957 extern int flushes_require_xcalls; /* from mp_startup.c */
1958 cpuset_t justme;
1959 cpuset_t cpus_to_shootdown;
1960 #ifndef __xpv
1961 cpuset_t check_cpus;
1962 cpu_t *cpup;
1963 int c;
1964 #endif
1965
1966 /*
1967 * If the hat is being destroyed, there are no more users, so
1968 * demap need not do anything.
1969 */
1970 if (hat->hat_flags & HAT_FREEING)
1971 return;
1972
1973 /*
1974 * If demapping from a shared pagetable, we best demap the
1975 * entire set of user TLBs, since we don't know what addresses
1976 * these were shared at.
1977 */
1978 if (hat->hat_flags & HAT_SHARED) {
1979 hat = kas.a_hat;
1980 va = DEMAP_ALL_ADDR;
1981 }
1982
1983 /*
1984 * if not running with multiple CPUs, don't use cross calls
1985 */
1986 if (panicstr || !flushes_require_xcalls) {
1987 #ifdef __xpv
1988 if (va == DEMAP_ALL_ADDR) {
1989 xen_flush_tlb();
1990 } else {
1991 for (size_t i = 0; i < len; i += MMU_PAGESIZE)
1992 xen_flush_va((caddr_t)(va + i));
1993 }
1994 #else
1995 (void) hati_demap_func((xc_arg_t)hat,
1996 (xc_arg_t)va, (xc_arg_t)len);
1997 #endif
1998 return;
1999 }
2000
2001
2002 /*
2003 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2004 * Otherwise it's just CPUs currently executing in this hat.
2005 */
2006 kpreempt_disable();
2007 CPUSET_ONLY(justme, CPU->cpu_id);
2008 if (hat == kas.a_hat)
2009 cpus_to_shootdown = khat_cpuset;
2010 else
2011 cpus_to_shootdown = hat->hat_cpus;
2012
2013 #ifndef __xpv
2014 /*
2015 * If any CPUs in the set are idle, just request a delayed flush
2016 * and avoid waking them up.
2017 */
2018 check_cpus = cpus_to_shootdown;
2019 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2020 ulong_t tlb_info;
2021
2022 if (!CPU_IN_SET(check_cpus, c))
2023 continue;
2024 CPUSET_DEL(check_cpus, c);
2025 cpup = cpu[c];
2026 if (cpup == NULL)
2027 continue;
2028
2029 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2030 while (tlb_info == TLB_CPU_HALTED) {
2031 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2032 TLB_CPU_HALTED | TLB_INVAL_ALL);
2033 SMT_PAUSE();
2034 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2035 }
2036 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2037 HATSTAT_INC(hs_tlb_inval_delayed);
2038 CPUSET_DEL(cpus_to_shootdown, c);
2039 }
2040 }
2041 #endif
2042
2043 if (CPUSET_ISNULL(cpus_to_shootdown) ||
2044 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2045
2046 #ifdef __xpv
2047 if (va == DEMAP_ALL_ADDR) {
2048 xen_flush_tlb();
2049 } else {
2050 for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2051 xen_flush_va((caddr_t)(va + i));
2052 }
2053 #else
2054 (void) hati_demap_func((xc_arg_t)hat,
2055 (xc_arg_t)va, (xc_arg_t)len);
2056 #endif
2057
2058 } else {
2059
2060 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2061 #ifdef __xpv
2062 if (va == DEMAP_ALL_ADDR) {
2063 xen_gflush_tlb(cpus_to_shootdown);
2064 } else {
2065 for (size_t i = 0; i < len; i += MMU_PAGESIZE) {
2066 xen_gflush_va((caddr_t)(va + i),
2067 cpus_to_shootdown);
2068 }
2069 }
2070 #else
2071 xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len,
2072 CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2073 #endif
2074
2075 }
2076 kpreempt_enable();
2077 }
2078
2079 void
2080 hat_tlb_inval(hat_t *hat, uintptr_t va)
2081 {
2082 hat_tlb_inval_range(hat, va, MMU_PAGESIZE);
2083 }
2084
2085 /*
2086 * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2087 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't
2088 * handle releasing of the htables.
2089 */
2090 void
2091 hat_pte_unmap(
2092 htable_t *ht,
2093 uint_t entry,
2094 uint_t flags,
2095 x86pte_t old_pte,
2096 void *pte_ptr,
2097 boolean_t tlb)
2098 {
2099 hat_t *hat = ht->ht_hat;
2100 hment_t *hm = NULL;
2101 page_t *pp = NULL;
2102 level_t l = ht->ht_level;
2103 pfn_t pfn;
2104
2105 /*
2106 * We always track the locking counts, even if nothing is unmapped
2107 */
2108 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2109 ASSERT(ht->ht_lock_cnt > 0);
2110 HTABLE_LOCK_DEC(ht);
2111 }
2112
2113 /*
2114 * Figure out which page's mapping list lock to acquire using the PFN
2115 * passed in "old" PTE. We then attempt to invalidate the PTE.
2116 * If another thread, probably a hat_pageunload, has asynchronously
2117 * unmapped/remapped this address we'll loop here.
2118 */
2119 ASSERT(ht->ht_busy > 0);
2120 while (PTE_ISVALID(old_pte)) {
2121 pfn = PTE2PFN(old_pte, l);
2122 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2123 pp = NULL;
2124 } else {
2125 #ifdef __xpv
2126 if (pfn == PFN_INVALID)
2127 panic("Invalid PFN, but not PT_NOCONSIST");
2128 #endif
2129 pp = page_numtopp_nolock(pfn);
2130 if (pp == NULL) {
2131 panic("no page_t, not NOCONSIST: old_pte="
2132 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2133 old_pte, (uintptr_t)ht, entry,
2134 (uintptr_t)pte_ptr);
2135 }
2136 x86_hm_enter(pp);
2137 }
2138
2139 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
2140
2141 /*
2142 * If the page hadn't changed we've unmapped it and can proceed
2143 */
2144 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2145 break;
2146
2147 /*
2148 * Otherwise, we'll have to retry with the current old_pte.
2149 * Drop the hment lock, since the pfn may have changed.
2150 */
2151 if (pp != NULL) {
2152 x86_hm_exit(pp);
2153 pp = NULL;
2154 } else {
2155 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2156 }
2157 }
2158
2159 /*
2160 * If the old mapping wasn't valid, there's nothing more to do
2161 */
2162 if (!PTE_ISVALID(old_pte)) {
2163 if (pp != NULL)
2164 x86_hm_exit(pp);
2165 return;
2166 }
2167
2168 /*
2169 * Take care of syncing any MOD/REF bits and removing the hment.
2170 */
2171 if (pp != NULL) {
2172 if (!(flags & HAT_UNLOAD_NOSYNC))
2173 hati_sync_pte_to_page(pp, old_pte, l);
2174 hm = hment_remove(pp, ht, entry);
2175 x86_hm_exit(pp);
2176 if (hm != NULL)
2177 hment_free(hm);
2178 }
2179
2180 /*
2181 * Handle book keeping in the htable and hat
2182 */
2183 ASSERT(ht->ht_valid_cnt > 0);
2184 HTABLE_DEC(ht->ht_valid_cnt);
2185 PGCNT_DEC(hat, l);
2186 }
2187
2188 /*
2189 * very cheap unload implementation to special case some kernel addresses
2190 */
2191 static void
2192 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2193 {
2194 uintptr_t va = (uintptr_t)addr;
2195 uintptr_t eva = va + len;
2196 pgcnt_t pg_index;
2197 htable_t *ht;
2198 uint_t entry;
2199 x86pte_t *pte_ptr;
2200 x86pte_t old_pte;
2201
2202 for (; va < eva; va += MMU_PAGESIZE) {
2203 /*
2204 * Get the PTE
2205 */
2206 pg_index = mmu_btop(va - mmu.kmap_addr);
2207 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2208 old_pte = GET_PTE(pte_ptr);
2209
2210 /*
2211 * get the htable / entry
2212 */
2213 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2214 >> LEVEL_SHIFT(1)];
2215 entry = htable_va2entry(va, ht);
2216
2217 /*
2218 * use mostly common code to unmap it.
2219 */
2220 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
2221 }
2222 }
2223
2224
2225 /*
2226 * unload a range of virtual address space (no callback)
2227 */
2228 void
2229 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2230 {
2231 uintptr_t va = (uintptr_t)addr;
2232
2233 XPV_DISALLOW_MIGRATE();
2234 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2235
2236 /*
2237 * special case for performance.
2238 */
2239 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2240 ASSERT(hat == kas.a_hat);
2241 hat_kmap_unload(addr, len, flags);
2242 } else {
2243 hat_unload_callback(hat, addr, len, flags, NULL);
2244 }
2245 XPV_ALLOW_MIGRATE();
2246 }
2247
2248 /*
2249 * Do the callbacks for ranges being unloaded.
2250 */
2251 typedef struct range_info {
2252 uintptr_t rng_va;
2253 ulong_t rng_cnt;
2254 level_t rng_level;
2255 } range_info_t;
2256
2257 /*
2258 * Invalidate the TLB, and perform the callback to the upper level VM system,
2259 * for the specified ranges of contiguous pages.
2260 */
2261 static void
2262 handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range)
2263 {
2264 while (cnt > 0) {
2265 size_t len;
2266
2267 --cnt;
2268 len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level);
2269 hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len);
2270
2271 if (cb != NULL) {
2272 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2273 cb->hcb_end_addr = cb->hcb_start_addr;
2274 cb->hcb_end_addr += len;
2275 cb->hcb_function(cb);
2276 }
2277 }
2278 }
2279
2280 /*
2281 * Unload a given range of addresses (has optional callback)
2282 *
2283 * Flags:
2284 * define HAT_UNLOAD 0x00
2285 * define HAT_UNLOAD_NOSYNC 0x02
2286 * define HAT_UNLOAD_UNLOCK 0x04
2287 * define HAT_UNLOAD_OTHER 0x08 - not used
2288 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD
2289 */
2290 #define MAX_UNLOAD_CNT (8)
2291 void
2292 hat_unload_callback(
2293 hat_t *hat,
2294 caddr_t addr,
2295 size_t len,
2296 uint_t flags,
2297 hat_callback_t *cb)
2298 {
2299 uintptr_t vaddr = (uintptr_t)addr;
2300 uintptr_t eaddr = vaddr + len;
2301 htable_t *ht = NULL;
2302 uint_t entry;
2303 uintptr_t contig_va = (uintptr_t)-1L;
2304 range_info_t r[MAX_UNLOAD_CNT];
2305 uint_t r_cnt = 0;
2306 x86pte_t old_pte;
2307
2308 XPV_DISALLOW_MIGRATE();
2309 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2310 ASSERT(IS_PAGEALIGNED(vaddr));
2311 ASSERT(IS_PAGEALIGNED(eaddr));
2312
2313 /*
2314 * Special case a single page being unloaded for speed. This happens
2315 * quite frequently, COW faults after a fork() for example.
2316 */
2317 if (cb == NULL && len == MMU_PAGESIZE) {
2318 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2319 if (ht != NULL) {
2320 if (PTE_ISVALID(old_pte)) {
2321 hat_pte_unmap(ht, entry, flags, old_pte,
2322 NULL, B_TRUE);
2323 }
2324 htable_release(ht);
2325 }
2326 XPV_ALLOW_MIGRATE();
2327 return;
2328 }
2329
2330 while (vaddr < eaddr) {
2331 old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2332 if (ht == NULL)
2333 break;
2334
2335 ASSERT(!IN_VA_HOLE(vaddr));
2336
2337 if (vaddr < (uintptr_t)addr)
2338 panic("hat_unload_callback(): unmap inside large page");
2339
2340 /*
2341 * We'll do the call backs for contiguous ranges
2342 */
2343 if (vaddr != contig_va ||
2344 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2345 if (r_cnt == MAX_UNLOAD_CNT) {
2346 handle_ranges(hat, cb, r_cnt, r);
2347 r_cnt = 0;
2348 }
2349 r[r_cnt].rng_va = vaddr;
2350 r[r_cnt].rng_cnt = 0;
2351 r[r_cnt].rng_level = ht->ht_level;
2352 ++r_cnt;
2353 }
2354
2355 /*
2356 * Unload one mapping (for a single page) from the page tables.
2357 * Note that we do not remove the mapping from the TLB yet,
2358 * as indicated by the tlb=FALSE argument to hat_pte_unmap().
2359 * handle_ranges() will clear the TLB entries with one call to
2360 * hat_tlb_inval_range() per contiguous range. This is
2361 * safe because the page can not be reused until the
2362 * callback is made (or we return).
2363 */
2364 entry = htable_va2entry(vaddr, ht);
2365 hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
2366 ASSERT(ht->ht_level <= mmu.max_page_level);
2367 vaddr += LEVEL_SIZE(ht->ht_level);
2368 contig_va = vaddr;
2369 ++r[r_cnt - 1].rng_cnt;
2370 }
2371 if (ht)
2372 htable_release(ht);
2373
2374 /*
2375 * handle last range for callbacks
2376 */
2377 if (r_cnt > 0)
2378 handle_ranges(hat, cb, r_cnt, r);
2379 XPV_ALLOW_MIGRATE();
2380 }
2381
2382 /*
2383 * Invalidate a virtual address translation on a slave CPU during
2384 * panic() dumps.
2385 */
2386 void
2387 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2388 {
2389 ssize_t sz;
2390 caddr_t endva = va + size;
2391
2392 while (va < endva) {
2393 sz = hat_getpagesize(hat, va);
2394 if (sz < 0) {
2395 #ifdef __xpv
2396 xen_flush_tlb();
2397 #else
2398 flush_all_tlb_entries();
2399 #endif
2400 break;
2401 }
2402 #ifdef __xpv
2403 xen_flush_va(va);
2404 #else
2405 mmu_tlbflush_entry(va);
2406 #endif
2407 va += sz;
2408 }
2409 }
2410
2411 /*
2412 * synchronize mapping with software data structures
2413 *
2414 * This interface is currently only used by the working set monitor
2415 * driver.
2416 */
2417 /*ARGSUSED*/
2418 void
2419 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2420 {
2421 uintptr_t vaddr = (uintptr_t)addr;
2422 uintptr_t eaddr = vaddr + len;
2423 htable_t *ht = NULL;
2424 uint_t entry;
2425 x86pte_t pte;
2426 x86pte_t save_pte;
2427 x86pte_t new;
2428 page_t *pp;
2429
2430 ASSERT(!IN_VA_HOLE(vaddr));
2431 ASSERT(IS_PAGEALIGNED(vaddr));
2432 ASSERT(IS_PAGEALIGNED(eaddr));
2433 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2434
2435 XPV_DISALLOW_MIGRATE();
2436 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2437 try_again:
2438 pte = htable_walk(hat, &ht, &vaddr, eaddr);
2439 if (ht == NULL)
2440 break;
2441 entry = htable_va2entry(vaddr, ht);
2442
2443 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2444 PTE_GET(pte, PT_REF | PT_MOD) == 0)
2445 continue;
2446
2447 /*
2448 * We need to acquire the mapping list lock to protect
2449 * against hat_pageunload(), hat_unload(), etc.
2450 */
2451 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2452 if (pp == NULL)
2453 break;
2454 x86_hm_enter(pp);
2455 save_pte = pte;
2456 pte = x86pte_get(ht, entry);
2457 if (pte != save_pte) {
2458 x86_hm_exit(pp);
2459 goto try_again;
2460 }
2461 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2462 PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2463 x86_hm_exit(pp);
2464 continue;
2465 }
2466
2467 /*
2468 * Need to clear ref or mod bits. We may compete with
2469 * hardware updating the R/M bits and have to try again.
2470 */
2471 if (flags == HAT_SYNC_ZERORM) {
2472 new = pte;
2473 PTE_CLR(new, PT_REF | PT_MOD);
2474 pte = hati_update_pte(ht, entry, pte, new);
2475 if (pte != 0) {
2476 x86_hm_exit(pp);
2477 goto try_again;
2478 }
2479 } else {
2480 /*
2481 * sync the PTE to the page_t
2482 */
2483 hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2484 }
2485 x86_hm_exit(pp);
2486 }
2487 if (ht)
2488 htable_release(ht);
2489 XPV_ALLOW_MIGRATE();
2490 }
2491
2492 /*
2493 * void hat_map(hat, addr, len, flags)
2494 */
2495 /*ARGSUSED*/
2496 void
2497 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2498 {
2499 /* does nothing */
2500 }
2501
2502 /*
2503 * uint_t hat_getattr(hat, addr, *attr)
2504 * returns attr for <hat,addr> in *attr. returns 0 if there was a
2505 * mapping and *attr is valid, nonzero if there was no mapping and
2506 * *attr is not valid.
2507 */
2508 uint_t
2509 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2510 {
2511 uintptr_t vaddr = ALIGN2PAGE(addr);
2512 htable_t *ht = NULL;
2513 x86pte_t pte;
2514
2515 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2516
2517 if (IN_VA_HOLE(vaddr))
2518 return ((uint_t)-1);
2519
2520 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2521 if (ht == NULL)
2522 return ((uint_t)-1);
2523
2524 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2525 htable_release(ht);
2526 return ((uint_t)-1);
2527 }
2528
2529 *attr = PROT_READ;
2530 if (PTE_GET(pte, PT_WRITABLE))
2531 *attr |= PROT_WRITE;
2532 if (PTE_GET(pte, PT_USER))
2533 *attr |= PROT_USER;
2534 if (!PTE_GET(pte, mmu.pt_nx))
2535 *attr |= PROT_EXEC;
2536 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2537 *attr |= HAT_NOSYNC;
2538 htable_release(ht);
2539 return (0);
2540 }
2541
2542 /*
2543 * hat_updateattr() applies the given attribute change to an existing mapping
2544 */
2545 #define HAT_LOAD_ATTR 1
2546 #define HAT_SET_ATTR 2
2547 #define HAT_CLR_ATTR 3
2548
2549 static void
2550 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2551 {
2552 uintptr_t vaddr = (uintptr_t)addr;
2553 uintptr_t eaddr = (uintptr_t)addr + len;
2554 htable_t *ht = NULL;
2555 uint_t entry;
2556 x86pte_t oldpte, newpte;
2557 page_t *pp;
2558
2559 XPV_DISALLOW_MIGRATE();
2560 ASSERT(IS_PAGEALIGNED(vaddr));
2561 ASSERT(IS_PAGEALIGNED(eaddr));
2562 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2563 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2564 try_again:
2565 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2566 if (ht == NULL)
2567 break;
2568 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2569 continue;
2570
2571 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2572 if (pp == NULL)
2573 continue;
2574 x86_hm_enter(pp);
2575
2576 newpte = oldpte;
2577 /*
2578 * We found a page table entry in the desired range,
2579 * figure out the new attributes.
2580 */
2581 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2582 if ((attr & PROT_WRITE) &&
2583 !PTE_GET(oldpte, PT_WRITABLE))
2584 newpte |= PT_WRITABLE;
2585
2586 if ((attr & HAT_NOSYNC) &&
2587 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2588 newpte |= PT_NOSYNC;
2589
2590 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2591 newpte &= ~mmu.pt_nx;
2592 }
2593
2594 if (what == HAT_LOAD_ATTR) {
2595 if (!(attr & PROT_WRITE) &&
2596 PTE_GET(oldpte, PT_WRITABLE))
2597 newpte &= ~PT_WRITABLE;
2598
2599 if (!(attr & HAT_NOSYNC) &&
2600 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2601 newpte &= ~PT_SOFTWARE;
2602
2603 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2604 newpte |= mmu.pt_nx;
2605 }
2606
2607 if (what == HAT_CLR_ATTR) {
2608 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2609 newpte &= ~PT_WRITABLE;
2610
2611 if ((attr & HAT_NOSYNC) &&
2612 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2613 newpte &= ~PT_SOFTWARE;
2614
2615 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2616 newpte |= mmu.pt_nx;
2617 }
2618
2619 /*
2620 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2621 * x86pte_set() depends on this.
2622 */
2623 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2624 newpte |= PT_REF | PT_MOD;
2625
2626 /*
2627 * what about PROT_READ or others? this code only handles:
2628 * EXEC, WRITE, NOSYNC
2629 */
2630
2631 /*
2632 * If new PTE really changed, update the table.
2633 */
2634 if (newpte != oldpte) {
2635 entry = htable_va2entry(vaddr, ht);
2636 oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2637 if (oldpte != 0) {
2638 x86_hm_exit(pp);
2639 goto try_again;
2640 }
2641 }
2642 x86_hm_exit(pp);
2643 }
2644 if (ht)
2645 htable_release(ht);
2646 XPV_ALLOW_MIGRATE();
2647 }
2648
2649 /*
2650 * Various wrappers for hat_updateattr()
2651 */
2652 void
2653 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2654 {
2655 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2656 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2657 }
2658
2659 void
2660 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2661 {
2662 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2663 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2664 }
2665
2666 void
2667 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2668 {
2669 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2670 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2671 }
2672
2673 void
2674 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2675 {
2676 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2677 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2678 }
2679
2680 /*
2681 * size_t hat_getpagesize(hat, addr)
2682 * returns pagesize in bytes for <hat, addr>. returns -1 of there is
2683 * no mapping. This is an advisory call.
2684 */
2685 ssize_t
2686 hat_getpagesize(hat_t *hat, caddr_t addr)
2687 {
2688 uintptr_t vaddr = ALIGN2PAGE(addr);
2689 htable_t *ht;
2690 size_t pagesize;
2691
2692 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2693 if (IN_VA_HOLE(vaddr))
2694 return (-1);
2695 ht = htable_getpage(hat, vaddr, NULL);
2696 if (ht == NULL)
2697 return (-1);
2698 pagesize = LEVEL_SIZE(ht->ht_level);
2699 htable_release(ht);
2700 return (pagesize);
2701 }
2702
2703
2704
2705 /*
2706 * pfn_t hat_getpfnum(hat, addr)
2707 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2708 */
2709 pfn_t
2710 hat_getpfnum(hat_t *hat, caddr_t addr)
2711 {
2712 uintptr_t vaddr = ALIGN2PAGE(addr);
2713 htable_t *ht;
2714 uint_t entry;
2715 pfn_t pfn = PFN_INVALID;
2716
2717 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2718 if (khat_running == 0)
2719 return (PFN_INVALID);
2720
2721 if (IN_VA_HOLE(vaddr))
2722 return (PFN_INVALID);
2723
2724 XPV_DISALLOW_MIGRATE();
2725 /*
2726 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2727 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2728 * this up.
2729 */
2730 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2731 x86pte_t pte;
2732 pgcnt_t pg_index;
2733
2734 pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2735 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2736 if (PTE_ISVALID(pte))
2737 /*LINTED [use of constant 0 causes a lint warning] */
2738 pfn = PTE2PFN(pte, 0);
2739 XPV_ALLOW_MIGRATE();
2740 return (pfn);
2741 }
2742
2743 ht = htable_getpage(hat, vaddr, &entry);
2744 if (ht == NULL) {
2745 XPV_ALLOW_MIGRATE();
2746 return (PFN_INVALID);
2747 }
2748 ASSERT(vaddr >= ht->ht_vaddr);
2749 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2750 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2751 if (ht->ht_level > 0)
2752 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2753 htable_release(ht);
2754 XPV_ALLOW_MIGRATE();
2755 return (pfn);
2756 }
2757
2758 /*
2759 * int hat_probe(hat, addr)
2760 * return 0 if no valid mapping is present. Faster version
2761 * of hat_getattr in certain architectures.
2762 */
2763 int
2764 hat_probe(hat_t *hat, caddr_t addr)
2765 {
2766 uintptr_t vaddr = ALIGN2PAGE(addr);
2767 uint_t entry;
2768 htable_t *ht;
2769 pgcnt_t pg_off;
2770
2771 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2772 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2773 if (IN_VA_HOLE(vaddr))
2774 return (0);
2775
2776 /*
2777 * Most common use of hat_probe is from segmap. We special case it
2778 * for performance.
2779 */
2780 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2781 pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2782 if (mmu.pae_hat)
2783 return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2784 else
2785 return (PTE_ISVALID(
2786 ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2787 }
2788
2789 ht = htable_getpage(hat, vaddr, &entry);
2790 htable_release(ht);
2791 return (ht != NULL);
2792 }
2793
2794 /*
2795 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2796 */
2797 static int
2798 is_it_dism(hat_t *hat, caddr_t va)
2799 {
2800 struct seg *seg;
2801 struct shm_data *shmd;
2802 struct spt_data *sptd;
2803
2804 seg = as_findseg(hat->hat_as, va, 0);
2805 ASSERT(seg != NULL);
2806 ASSERT(seg->s_base <= va);
2807 shmd = (struct shm_data *)seg->s_data;
2808 ASSERT(shmd != NULL);
2809 sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2810 ASSERT(sptd != NULL);
2811 if (sptd->spt_flags & SHM_PAGEABLE)
2812 return (1);
2813 return (0);
2814 }
2815
2816 /*
2817 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2818 * except that we use the ism_hat's existing mappings to determine the pages
2819 * and protections to use for this hat. If we find a full properly aligned
2820 * and sized pagetable, we will attempt to share the pagetable itself.
2821 */
2822 /*ARGSUSED*/
2823 int
2824 hat_share(
2825 hat_t *hat,
2826 caddr_t addr,
2827 hat_t *ism_hat,
2828 caddr_t src_addr,
2829 size_t len, /* almost useless value, see below.. */
2830 uint_t ismszc)
2831 {
2832 uintptr_t vaddr_start = (uintptr_t)addr;
2833 uintptr_t vaddr;
2834 uintptr_t eaddr = vaddr_start + len;
2835 uintptr_t ism_addr_start = (uintptr_t)src_addr;
2836 uintptr_t ism_addr = ism_addr_start;
2837 uintptr_t e_ism_addr = ism_addr + len;
2838 htable_t *ism_ht = NULL;
2839 htable_t *ht;
2840 x86pte_t pte;
2841 page_t *pp;
2842 pfn_t pfn;
2843 level_t l;
2844 pgcnt_t pgcnt;
2845 uint_t prot;
2846 int is_dism;
2847 int flags;
2848
2849 /*
2850 * We might be asked to share an empty DISM hat by as_dup()
2851 */
2852 ASSERT(hat != kas.a_hat);
2853 ASSERT(eaddr <= _userlimit);
2854 if (!(ism_hat->hat_flags & HAT_SHARED)) {
2855 ASSERT(hat_get_mapped_size(ism_hat) == 0);
2856 return (0);
2857 }
2858 XPV_DISALLOW_MIGRATE();
2859
2860 /*
2861 * The SPT segment driver often passes us a size larger than there are
2862 * valid mappings. That's because it rounds the segment size up to a
2863 * large pagesize, even if the actual memory mapped by ism_hat is less.
2864 */
2865 ASSERT(IS_PAGEALIGNED(vaddr_start));
2866 ASSERT(IS_PAGEALIGNED(ism_addr_start));
2867 ASSERT(ism_hat->hat_flags & HAT_SHARED);
2868 is_dism = is_it_dism(hat, addr);
2869 while (ism_addr < e_ism_addr) {
2870 /*
2871 * use htable_walk to get the next valid ISM mapping
2872 */
2873 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2874 if (ism_ht == NULL)
2875 break;
2876
2877 /*
2878 * First check to see if we already share the page table.
2879 */
2880 l = ism_ht->ht_level;
2881 vaddr = vaddr_start + (ism_addr - ism_addr_start);
2882 ht = htable_lookup(hat, vaddr, l);
2883 if (ht != NULL) {
2884 if (ht->ht_flags & HTABLE_SHARED_PFN)
2885 goto shared;
2886 htable_release(ht);
2887 goto not_shared;
2888 }
2889
2890 /*
2891 * Can't ever share top table.
2892 */
2893 if (l == mmu.max_level)
2894 goto not_shared;
2895
2896 /*
2897 * Avoid level mismatches later due to DISM faults.
2898 */
2899 if (is_dism && l > 0)
2900 goto not_shared;
2901
2902 /*
2903 * addresses and lengths must align
2904 * table must be fully populated
2905 * no lower level page tables
2906 */
2907 if (ism_addr != ism_ht->ht_vaddr ||
2908 (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2909 goto not_shared;
2910
2911 /*
2912 * The range of address space must cover a full table.
2913 */
2914 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
2915 goto not_shared;
2916
2917 /*
2918 * All entries in the ISM page table must be leaf PTEs.
2919 */
2920 if (l > 0) {
2921 int e;
2922
2923 /*
2924 * We know the 0th is from htable_walk() above.
2925 */
2926 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
2927 x86pte_t pte;
2928 pte = x86pte_get(ism_ht, e);
2929 if (!PTE_ISPAGE(pte, l))
2930 goto not_shared;
2931 }
2932 }
2933
2934 /*
2935 * share the page table
2936 */
2937 ht = htable_create(hat, vaddr, l, ism_ht);
2938 shared:
2939 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
2940 ASSERT(ht->ht_shares == ism_ht);
2941 hat->hat_ism_pgcnt +=
2942 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
2943 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
2944 ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
2945 htable_release(ht);
2946 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
2947 htable_release(ism_ht);
2948 ism_ht = NULL;
2949 continue;
2950
2951 not_shared:
2952 /*
2953 * Unable to share the page table. Instead we will
2954 * create new mappings from the values in the ISM mappings.
2955 * Figure out what level size mappings to use;
2956 */
2957 for (l = ism_ht->ht_level; l > 0; --l) {
2958 if (LEVEL_SIZE(l) <= eaddr - vaddr &&
2959 (vaddr & LEVEL_OFFSET(l)) == 0)
2960 break;
2961 }
2962
2963 /*
2964 * The ISM mapping might be larger than the share area,
2965 * be careful to truncate it if needed.
2966 */
2967 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
2968 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
2969 } else {
2970 pgcnt = mmu_btop(eaddr - vaddr);
2971 l = 0;
2972 }
2973
2974 pfn = PTE2PFN(pte, ism_ht->ht_level);
2975 ASSERT(pfn != PFN_INVALID);
2976 while (pgcnt > 0) {
2977 /*
2978 * Make a new pte for the PFN for this level.
2979 * Copy protections for the pte from the ISM pte.
2980 */
2981 pp = page_numtopp_nolock(pfn);
2982 ASSERT(pp != NULL);
2983
2984 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
2985 if (PTE_GET(pte, PT_WRITABLE))
2986 prot |= PROT_WRITE;
2987 if (!PTE_GET(pte, PT_NX))
2988 prot |= PROT_EXEC;
2989
2990 flags = HAT_LOAD;
2991 if (!is_dism)
2992 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
2993 while (hati_load_common(hat, vaddr, pp, prot, flags,
2994 l, pfn) != 0) {
2995 if (l == 0)
2996 panic("hati_load_common() failure");
2997 --l;
2998 }
2999
3000 vaddr += LEVEL_SIZE(l);
3001 ism_addr += LEVEL_SIZE(l);
3002 pfn += mmu_btop(LEVEL_SIZE(l));
3003 pgcnt -= mmu_btop(LEVEL_SIZE(l));
3004 }
3005 }
3006 if (ism_ht != NULL)
3007 htable_release(ism_ht);
3008 XPV_ALLOW_MIGRATE();
3009 return (0);
3010 }
3011
3012
3013 /*
3014 * hat_unshare() is similar to hat_unload_callback(), but
3015 * we have to look for empty shared pagetables. Note that
3016 * hat_unshare() is always invoked against an entire segment.
3017 */
3018 /*ARGSUSED*/
3019 void
3020 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3021 {
3022 uint64_t vaddr = (uintptr_t)addr;
3023 uintptr_t eaddr = vaddr + len;
3024 htable_t *ht = NULL;
3025 uint_t need_demaps = 0;
3026 int flags = HAT_UNLOAD_UNMAP;
3027 level_t l;
3028
3029 ASSERT(hat != kas.a_hat);
3030 ASSERT(eaddr <= _userlimit);
3031 ASSERT(IS_PAGEALIGNED(vaddr));
3032 ASSERT(IS_PAGEALIGNED(eaddr));
3033 XPV_DISALLOW_MIGRATE();
3034
3035 /*
3036 * First go through and remove any shared pagetables.
3037 *
3038 * Note that it's ok to delay the TLB shootdown till the entire range is
3039 * finished, because if hat_pageunload() were to unload a shared
3040 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3041 */
3042 l = mmu.max_page_level;
3043 if (l == mmu.max_level)
3044 --l;
3045 for (; l >= 0; --l) {
3046 for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3047 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3048 ASSERT(!IN_VA_HOLE(vaddr));
3049 /*
3050 * find a pagetable that maps the current address
3051 */
3052 ht = htable_lookup(hat, vaddr, l);
3053 if (ht == NULL)
3054 continue;
3055 if (ht->ht_flags & HTABLE_SHARED_PFN) {
3056 /*
3057 * clear page count, set valid_cnt to 0,
3058 * let htable_release() finish the job
3059 */
3060 hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3061 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3062 ht->ht_valid_cnt = 0;
3063 need_demaps = 1;
3064 }
3065 htable_release(ht);
3066 }
3067 }
3068
3069 /*
3070 * flush the TLBs - since we're probably dealing with MANY mappings
3071 * we do just one CR3 reload.
3072 */
3073 if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3074 hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3075
3076 /*
3077 * Now go back and clean up any unaligned mappings that
3078 * couldn't share pagetables.
3079 */
3080 if (!is_it_dism(hat, addr))
3081 flags |= HAT_UNLOAD_UNLOCK;
3082 hat_unload(hat, addr, len, flags);
3083 XPV_ALLOW_MIGRATE();
3084 }
3085
3086
3087 /*
3088 * hat_reserve() does nothing
3089 */
3090 /*ARGSUSED*/
3091 void
3092 hat_reserve(struct as *as, caddr_t addr, size_t len)
3093 {
3094 }
3095
3096
3097 /*
3098 * Called when all mappings to a page should have write permission removed.
3099 * Mostly stolen from hat_pagesync()
3100 */
3101 static void
3102 hati_page_clrwrt(struct page *pp)
3103 {
3104 hment_t *hm = NULL;
3105 htable_t *ht;
3106 uint_t entry;
3107 x86pte_t old;
3108 x86pte_t new;
3109 uint_t pszc = 0;
3110
3111 XPV_DISALLOW_MIGRATE();
3112 next_size:
3113 /*
3114 * walk thru the mapping list clearing write permission
3115 */
3116 x86_hm_enter(pp);
3117 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3118 if (ht->ht_level < pszc)
3119 continue;
3120 old = x86pte_get(ht, entry);
3121
3122 for (;;) {
3123 /*
3124 * Is this mapping of interest?
3125 */
3126 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3127 PTE_GET(old, PT_WRITABLE) == 0)
3128 break;
3129
3130 /*
3131 * Clear ref/mod writable bits. This requires cross
3132 * calls to ensure any executing TLBs see cleared bits.
3133 */
3134 new = old;
3135 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3136 old = hati_update_pte(ht, entry, old, new);
3137 if (old != 0)
3138 continue;
3139
3140 break;
3141 }
3142 }
3143 x86_hm_exit(pp);
3144 while (pszc < pp->p_szc) {
3145 page_t *tpp;
3146 pszc++;
3147 tpp = PP_GROUPLEADER(pp, pszc);
3148 if (pp != tpp) {
3149 pp = tpp;
3150 goto next_size;
3151 }
3152 }
3153 XPV_ALLOW_MIGRATE();
3154 }
3155
3156 /*
3157 * void hat_page_setattr(pp, flag)
3158 * void hat_page_clrattr(pp, flag)
3159 * used to set/clr ref/mod bits.
3160 */
3161 void
3162 hat_page_setattr(struct page *pp, uint_t flag)
3163 {
3164 vnode_t *vp = pp->p_vnode;
3165 kmutex_t *vphm = NULL;
3166 page_t **listp;
3167 int noshuffle;
3168
3169 noshuffle = flag & P_NSH;
3170 flag &= ~P_NSH;
3171
3172 if (PP_GETRM(pp, flag) == flag)
3173 return;
3174
3175 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3176 !noshuffle) {
3177 vphm = page_vnode_mutex(vp);
3178 mutex_enter(vphm);
3179 }
3180
3181 PP_SETRM(pp, flag);
3182
3183 if (vphm != NULL) {
3184
3185 /*
3186 * Some File Systems examine v_pages for NULL w/o
3187 * grabbing the vphm mutex. Must not let it become NULL when
3188 * pp is the only page on the list.
3189 */
3190 if (pp->p_vpnext != pp) {
3191 page_vpsub(&vp->v_pages, pp);
3192 if (vp->v_pages != NULL)
3193 listp = &vp->v_pages->p_vpprev->p_vpnext;
3194 else
3195 listp = &vp->v_pages;
3196 page_vpadd(listp, pp);
3197 }
3198 mutex_exit(vphm);
3199 }
3200 }
3201
3202 void
3203 hat_page_clrattr(struct page *pp, uint_t flag)
3204 {
3205 vnode_t *vp = pp->p_vnode;
3206 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3207
3208 /*
3209 * Caller is expected to hold page's io lock for VMODSORT to work
3210 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3211 * bit is cleared.
3212 * We don't have assert to avoid tripping some existing third party
3213 * code. The dirty page is moved back to top of the v_page list
3214 * after IO is done in pvn_write_done().
3215 */
3216 PP_CLRRM(pp, flag);
3217
3218 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3219
3220 /*
3221 * VMODSORT works by removing write permissions and getting
3222 * a fault when a page is made dirty. At this point
3223 * we need to remove write permission from all mappings
3224 * to this page.
3225 */
3226 hati_page_clrwrt(pp);
3227 }
3228 }
3229
3230 /*
3231 * If flag is specified, returns 0 if attribute is disabled
3232 * and non zero if enabled. If flag specifes multiple attributes
3233 * then returns 0 if ALL attributes are disabled. This is an advisory
3234 * call.
3235 */
3236 uint_t
3237 hat_page_getattr(struct page *pp, uint_t flag)
3238 {
3239 return (PP_GETRM(pp, flag));
3240 }
3241
3242
3243 /*
3244 * common code used by hat_pageunload() and hment_steal()
3245 */
3246 hment_t *
3247 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3248 {
3249 x86pte_t old_pte;
3250 pfn_t pfn = pp->p_pagenum;
3251 hment_t *hm;
3252
3253 /*
3254 * We need to acquire a hold on the htable in order to
3255 * do the invalidate. We know the htable must exist, since
3256 * unmap's don't release the htable until after removing any
3257 * hment. Having x86_hm_enter() keeps that from proceeding.
3258 */
3259 htable_acquire(ht);
3260
3261 /*
3262 * Invalidate the PTE and remove the hment.
3263 */
3264 old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3265 if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3266 panic("x86pte_inval() failure found PTE = " FMT_PTE
3267 " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3268 old_pte, pfn, (uintptr_t)ht, entry);
3269 }
3270
3271 /*
3272 * Clean up all the htable information for this mapping
3273 */
3274 ASSERT(ht->ht_valid_cnt > 0);
3275 HTABLE_DEC(ht->ht_valid_cnt);
3276 PGCNT_DEC(ht->ht_hat, ht->ht_level);
3277
3278 /*
3279 * sync ref/mod bits to the page_t
3280 */
3281 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3282 hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3283
3284 /*
3285 * Remove the mapping list entry for this page.
3286 */
3287 hm = hment_remove(pp, ht, entry);
3288
3289 /*
3290 * drop the mapping list lock so that we might free the
3291 * hment and htable.
3292 */
3293 x86_hm_exit(pp);
3294 htable_release(ht);
3295 return (hm);
3296 }
3297
3298 extern int vpm_enable;
3299 /*
3300 * Unload all translations to a page. If the page is a subpage of a large
3301 * page, the large page mappings are also removed.
3302 *
3303 * The forceflags are unused.
3304 */
3305
3306 /*ARGSUSED*/
3307 static int
3308 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3309 {
3310 page_t *cur_pp = pp;
3311 hment_t *hm;
3312 hment_t *prev;
3313 htable_t *ht;
3314 uint_t entry;
3315 level_t level;
3316
3317 XPV_DISALLOW_MIGRATE();
3318
3319 /*
3320 * prevent recursion due to kmem_free()
3321 */
3322 ++curthread->t_hatdepth;
3323 ASSERT(curthread->t_hatdepth < 16);
3324
3325 #if defined(__amd64)
3326 /*
3327 * clear the vpm ref.
3328 */
3329 if (vpm_enable) {
3330 pp->p_vpmref = 0;
3331 }
3332 #endif
3333 /*
3334 * The loop with next_size handles pages with multiple pagesize mappings
3335 */
3336 next_size:
3337 for (;;) {
3338
3339 /*
3340 * Get a mapping list entry
3341 */
3342 x86_hm_enter(cur_pp);
3343 for (prev = NULL; ; prev = hm) {
3344 hm = hment_walk(cur_pp, &ht, &entry, prev);
3345 if (hm == NULL) {
3346 x86_hm_exit(cur_pp);
3347
3348 /*
3349 * If not part of a larger page, we're done.
3350 */
3351 if (cur_pp->p_szc <= pg_szcd) {
3352 ASSERT(curthread->t_hatdepth > 0);
3353 --curthread->t_hatdepth;
3354 XPV_ALLOW_MIGRATE();
3355 return (0);
3356 }
3357
3358 /*
3359 * Else check the next larger page size.
3360 * hat_page_demote() may decrease p_szc
3361 * but that's ok we'll just take an extra
3362 * trip discover there're no larger mappings
3363 * and return.
3364 */
3365 ++pg_szcd;
3366 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3367 goto next_size;
3368 }
3369
3370 /*
3371 * If this mapping size matches, remove it.
3372 */
3373 level = ht->ht_level;
3374 if (level == pg_szcd)
3375 break;
3376 }
3377
3378 /*
3379 * Remove the mapping list entry for this page.
3380 * Note this does the x86_hm_exit() for us.
3381 */
3382 hm = hati_page_unmap(cur_pp, ht, entry);
3383 if (hm != NULL)
3384 hment_free(hm);
3385 }
3386 }
3387
3388 int
3389 hat_pageunload(struct page *pp, uint_t forceflag)
3390 {
3391 ASSERT(PAGE_EXCL(pp));
3392 return (hati_pageunload(pp, 0, forceflag));
3393 }
3394
3395 /*
3396 * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3397 * page level that included pp.
3398 *
3399 * pp must be locked EXCL. Even though no other constituent pages are locked
3400 * it's legal to unload large mappings to pp because all constituent pages of
3401 * large locked mappings have to be locked SHARED. therefore if we have EXCL
3402 * lock on one of constituent pages none of the large mappings to pp are
3403 * locked.
3404 *
3405 * Change (always decrease) p_szc field starting from the last constituent
3406 * page and ending with root constituent page so that root's pszc always shows
3407 * the area where hat_page_demote() may be active.
3408 *
3409 * This mechanism is only used for file system pages where it's not always
3410 * possible to get EXCL locks on all constituent pages to demote the size code
3411 * (as is done for anonymous or kernel large pages).
3412 */
3413 void
3414 hat_page_demote(page_t *pp)
3415 {
3416 uint_t pszc;
3417 uint_t rszc;
3418 uint_t szc;
3419 page_t *rootpp;
3420 page_t *firstpp;
3421 page_t *lastpp;
3422 pgcnt_t pgcnt;
3423
3424 ASSERT(PAGE_EXCL(pp));
3425 ASSERT(!PP_ISFREE(pp));
3426 ASSERT(page_szc_lock_assert(pp));
3427
3428 if (pp->p_szc == 0)
3429 return;
3430
3431 rootpp = PP_GROUPLEADER(pp, 1);
3432 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3433
3434 /*
3435 * all large mappings to pp are gone
3436 * and no new can be setup since pp is locked exclusively.
3437 *
3438 * Lock the root to make sure there's only one hat_page_demote()
3439 * outstanding within the area of this root's pszc.
3440 *
3441 * Second potential hat_page_demote() is already eliminated by upper
3442 * VM layer via page_szc_lock() but we don't rely on it and use our
3443 * own locking (so that upper layer locking can be changed without
3444 * assumptions that hat depends on upper layer VM to prevent multiple
3445 * hat_page_demote() to be issued simultaneously to the same large
3446 * page).
3447 */
3448 again:
3449 pszc = pp->p_szc;
3450 if (pszc == 0)
3451 return;
3452 rootpp = PP_GROUPLEADER(pp, pszc);
3453 x86_hm_enter(rootpp);
3454 /*
3455 * If root's p_szc is different from pszc we raced with another
3456 * hat_page_demote(). Drop the lock and try to find the root again.
3457 * If root's p_szc is greater than pszc previous hat_page_demote() is
3458 * not done yet. Take and release mlist lock of root's root to wait
3459 * for previous hat_page_demote() to complete.
3460 */
3461 if ((rszc = rootpp->p_szc) != pszc) {
3462 x86_hm_exit(rootpp);
3463 if (rszc > pszc) {
3464 /* p_szc of a locked non free page can't increase */
3465 ASSERT(pp != rootpp);
3466
3467 rootpp = PP_GROUPLEADER(rootpp, rszc);
3468 x86_hm_enter(rootpp);
3469 x86_hm_exit(rootpp);
3470 }
3471 goto again;
3472 }
3473 ASSERT(pp->p_szc == pszc);
3474
3475 /*
3476 * Decrement by 1 p_szc of every constituent page of a region that
3477 * covered pp. For example if original szc is 3 it gets changed to 2
3478 * everywhere except in region 2 that covered pp. Region 2 that
3479 * covered pp gets demoted to 1 everywhere except in region 1 that
3480 * covered pp. The region 1 that covered pp is demoted to region
3481 * 0. It's done this way because from region 3 we removed level 3
3482 * mappings, from region 2 that covered pp we removed level 2 mappings
3483 * and from region 1 that covered pp we removed level 1 mappings. All
3484 * changes are done from from high pfn's to low pfn's so that roots
3485 * are changed last allowing one to know the largest region where
3486 * hat_page_demote() is stil active by only looking at the root page.
3487 *
3488 * This algorithm is implemented in 2 while loops. First loop changes
3489 * p_szc of pages to the right of pp's level 1 region and second
3490 * loop changes p_szc of pages of level 1 region that covers pp
3491 * and all pages to the left of level 1 region that covers pp.
3492 * In the first loop p_szc keeps dropping with every iteration
3493 * and in the second loop it keeps increasing with every iteration.
3494 *
3495 * First loop description: Demote pages to the right of pp outside of
3496 * level 1 region that covers pp. In every iteration of the while
3497 * loop below find the last page of szc region and the first page of
3498 * (szc - 1) region that is immediately to the right of (szc - 1)
3499 * region that covers pp. From last such page to first such page
3500 * change every page's szc to szc - 1. Decrement szc and continue
3501 * looping until szc is 1. If pp belongs to the last (szc - 1) region
3502 * of szc region skip to the next iteration.
3503 */
3504 szc = pszc;
3505 while (szc > 1) {
3506 lastpp = PP_GROUPLEADER(pp, szc);
3507 pgcnt = page_get_pagecnt(szc);
3508 lastpp += pgcnt - 1;
3509 firstpp = PP_GROUPLEADER(pp, (szc - 1));
3510 pgcnt = page_get_pagecnt(szc - 1);
3511 if (lastpp - firstpp < pgcnt) {
3512 szc--;
3513 continue;
3514 }
3515 firstpp += pgcnt;
3516 while (lastpp != firstpp) {
3517 ASSERT(lastpp->p_szc == pszc);
3518 lastpp->p_szc = szc - 1;
3519 lastpp--;
3520 }
3521 firstpp->p_szc = szc - 1;
3522 szc--;
3523 }
3524
3525 /*
3526 * Second loop description:
3527 * First iteration changes p_szc to 0 of every
3528 * page of level 1 region that covers pp.
3529 * Subsequent iterations find last page of szc region
3530 * immediately to the left of szc region that covered pp
3531 * and first page of (szc + 1) region that covers pp.
3532 * From last to first page change p_szc of every page to szc.
3533 * Increment szc and continue looping until szc is pszc.
3534 * If pp belongs to the fist szc region of (szc + 1) region
3535 * skip to the next iteration.
3536 *
3537 */
3538 szc = 0;
3539 while (szc < pszc) {
3540 firstpp = PP_GROUPLEADER(pp, (szc + 1));
3541 if (szc == 0) {
3542 pgcnt = page_get_pagecnt(1);
3543 lastpp = firstpp + (pgcnt - 1);
3544 } else {
3545 lastpp = PP_GROUPLEADER(pp, szc);
3546 if (firstpp == lastpp) {
3547 szc++;
3548 continue;
3549 }
3550 lastpp--;
3551 pgcnt = page_get_pagecnt(szc);
3552 }
3553 while (lastpp != firstpp) {
3554 ASSERT(lastpp->p_szc == pszc);
3555 lastpp->p_szc = szc;
3556 lastpp--;
3557 }
3558 firstpp->p_szc = szc;
3559 if (firstpp == rootpp)
3560 break;
3561 szc++;
3562 }
3563 x86_hm_exit(rootpp);
3564 }
3565
3566 /*
3567 * get hw stats from hardware into page struct and reset hw stats
3568 * returns attributes of page
3569 * Flags for hat_pagesync, hat_getstat, hat_sync
3570 *
3571 * define HAT_SYNC_ZERORM 0x01
3572 *
3573 * Additional flags for hat_pagesync
3574 *
3575 * define HAT_SYNC_STOPON_REF 0x02
3576 * define HAT_SYNC_STOPON_MOD 0x04
3577 * define HAT_SYNC_STOPON_RM 0x06
3578 * define HAT_SYNC_STOPON_SHARED 0x08
3579 */
3580 uint_t
3581 hat_pagesync(struct page *pp, uint_t flags)
3582 {
3583 hment_t *hm = NULL;
3584 htable_t *ht;
3585 uint_t entry;
3586 x86pte_t old, save_old;
3587 x86pte_t new;
3588 uchar_t nrmbits = P_REF|P_MOD|P_RO;
3589 extern ulong_t po_share;
3590 page_t *save_pp = pp;
3591 uint_t pszc = 0;
3592
3593 ASSERT(PAGE_LOCKED(pp) || panicstr);
3594
3595 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3596 return (pp->p_nrm & nrmbits);
3597
3598 if ((flags & HAT_SYNC_ZERORM) == 0) {
3599
3600 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3601 return (pp->p_nrm & nrmbits);
3602
3603 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3604 return (pp->p_nrm & nrmbits);
3605
3606 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3607 hat_page_getshare(pp) > po_share) {
3608 if (PP_ISRO(pp))
3609 PP_SETREF(pp);
3610 return (pp->p_nrm & nrmbits);
3611 }
3612 }
3613
3614 XPV_DISALLOW_MIGRATE();
3615 next_size:
3616 /*
3617 * walk thru the mapping list syncing (and clearing) ref/mod bits.
3618 */
3619 x86_hm_enter(pp);
3620 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3621 if (ht->ht_level < pszc)
3622 continue;
3623 old = x86pte_get(ht, entry);
3624 try_again:
3625
3626 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3627
3628 if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3629 continue;
3630
3631 save_old = old;
3632 if ((flags & HAT_SYNC_ZERORM) != 0) {
3633
3634 /*
3635 * Need to clear ref or mod bits. Need to demap
3636 * to make sure any executing TLBs see cleared bits.
3637 */
3638 new = old;
3639 PTE_CLR(new, PT_REF | PT_MOD);
3640 old = hati_update_pte(ht, entry, old, new);
3641 if (old != 0)
3642 goto try_again;
3643
3644 old = save_old;
3645 }
3646
3647 /*
3648 * Sync the PTE
3649 */
3650 if (!(flags & HAT_SYNC_ZERORM) &&
3651 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3652 hati_sync_pte_to_page(pp, old, ht->ht_level);
3653
3654 /*
3655 * can stop short if we found a ref'd or mod'd page
3656 */
3657 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3658 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3659 x86_hm_exit(pp);
3660 goto done;
3661 }
3662 }
3663 x86_hm_exit(pp);
3664 while (pszc < pp->p_szc) {
3665 page_t *tpp;
3666 pszc++;
3667 tpp = PP_GROUPLEADER(pp, pszc);
3668 if (pp != tpp) {
3669 pp = tpp;
3670 goto next_size;
3671 }
3672 }
3673 done:
3674 XPV_ALLOW_MIGRATE();
3675 return (save_pp->p_nrm & nrmbits);
3676 }
3677
3678 /*
3679 * returns approx number of mappings to this pp. A return of 0 implies
3680 * there are no mappings to the page.
3681 */
3682 ulong_t
3683 hat_page_getshare(page_t *pp)
3684 {
3685 uint_t cnt;
3686 cnt = hment_mapcnt(pp);
3687 #if defined(__amd64)
3688 if (vpm_enable && pp->p_vpmref) {
3689 cnt += 1;
3690 }
3691 #endif
3692 return (cnt);
3693 }
3694
3695 /*
3696 * Return 1 the number of mappings exceeds sh_thresh. Return 0
3697 * otherwise.
3698 */
3699 int
3700 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3701 {
3702 return (hat_page_getshare(pp) > sh_thresh);
3703 }
3704
3705 /*
3706 * hat_softlock isn't supported anymore
3707 */
3708 /*ARGSUSED*/
3709 faultcode_t
3710 hat_softlock(
3711 hat_t *hat,
3712 caddr_t addr,
3713 size_t *len,
3714 struct page **page_array,
3715 uint_t flags)
3716 {
3717 return (FC_NOSUPPORT);
3718 }
3719
3720
3721
3722 /*
3723 * Routine to expose supported HAT features to platform independent code.
3724 */
3725 /*ARGSUSED*/
3726 int
3727 hat_supported(enum hat_features feature, void *arg)
3728 {
3729 switch (feature) {
3730
3731 case HAT_SHARED_PT: /* this is really ISM */
3732 return (1);
3733
3734 case HAT_DYNAMIC_ISM_UNMAP:
3735 return (0);
3736
3737 case HAT_VMODSORT:
3738 return (1);
3739
3740 case HAT_SHARED_REGIONS:
3741 return (0);
3742
3743 default:
3744 panic("hat_supported() - unknown feature");
3745 }
3746 return (0);
3747 }
3748
3749 /*
3750 * Called when a thread is exiting and has been switched to the kernel AS
3751 */
3752 void
3753 hat_thread_exit(kthread_t *thd)
3754 {
3755 ASSERT(thd->t_procp->p_as == &kas);
3756 XPV_DISALLOW_MIGRATE();
3757 hat_switch(thd->t_procp->p_as->a_hat);
3758 XPV_ALLOW_MIGRATE();
3759 }
3760
3761 /*
3762 * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3763 */
3764 /*ARGSUSED*/
3765 void
3766 hat_setup(hat_t *hat, int flags)
3767 {
3768 XPV_DISALLOW_MIGRATE();
3769 kpreempt_disable();
3770
3771 hat_switch(hat);
3772
3773 kpreempt_enable();
3774 XPV_ALLOW_MIGRATE();
3775 }
3776
3777 /*
3778 * Prepare for a CPU private mapping for the given address.
3779 *
3780 * The address can only be used from a single CPU and can be remapped
3781 * using hat_mempte_remap(). Return the address of the PTE.
3782 *
3783 * We do the htable_create() if necessary and increment the valid count so
3784 * the htable can't disappear. We also hat_devload() the page table into
3785 * kernel so that the PTE is quickly accessed.
3786 */
3787 hat_mempte_t
3788 hat_mempte_setup(caddr_t addr)
3789 {
3790 uintptr_t va = (uintptr_t)addr;
3791 htable_t *ht;
3792 uint_t entry;
3793 x86pte_t oldpte;
3794 hat_mempte_t p;
3795
3796 ASSERT(IS_PAGEALIGNED(va));
3797 ASSERT(!IN_VA_HOLE(va));
3798 ++curthread->t_hatdepth;
3799 XPV_DISALLOW_MIGRATE();
3800 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3801 if (ht == NULL) {
3802 ht = htable_create(kas.a_hat, va, 0, NULL);
3803 entry = htable_va2entry(va, ht);
3804 ASSERT(ht->ht_level == 0);
3805 oldpte = x86pte_get(ht, entry);
3806 }
3807 if (PTE_ISVALID(oldpte))
3808 panic("hat_mempte_setup(): address already mapped"
3809 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3810
3811 /*
3812 * increment ht_valid_cnt so that the pagetable can't disappear
3813 */
3814 HTABLE_INC(ht->ht_valid_cnt);
3815
3816 /*
3817 * return the PTE physical address to the caller.
3818 */
3819 htable_release(ht);
3820 XPV_ALLOW_MIGRATE();
3821 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3822 --curthread->t_hatdepth;
3823 return (p);
3824 }
3825
3826 /*
3827 * Release a CPU private mapping for the given address.
3828 * We decrement the htable valid count so it might be destroyed.
3829 */
3830 /*ARGSUSED1*/
3831 void
3832 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3833 {
3834 htable_t *ht;
3835
3836 XPV_DISALLOW_MIGRATE();
3837 /*
3838 * invalidate any left over mapping and decrement the htable valid count
3839 */
3840 #ifdef __xpv
3841 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3842 UVMF_INVLPG | UVMF_LOCAL))
3843 panic("HYPERVISOR_update_va_mapping() failed");
3844 #else
3845 {
3846 x86pte_t *pteptr;
3847
3848 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3849 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3850 if (mmu.pae_hat)
3851 *pteptr = 0;
3852 else
3853 *(x86pte32_t *)pteptr = 0;
3854 mmu_tlbflush_entry(addr);
3855 x86pte_mapout();
3856 }
3857 #endif
3858
3859 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3860 if (ht == NULL)
3861 panic("hat_mempte_release(): invalid address");
3862 ASSERT(ht->ht_level == 0);
3863 HTABLE_DEC(ht->ht_valid_cnt);
3864 htable_release(ht);
3865 XPV_ALLOW_MIGRATE();
3866 }
3867
3868 /*
3869 * Apply a temporary CPU private mapping to a page. We flush the TLB only
3870 * on this CPU, so this ought to have been called with preemption disabled.
3871 */
3872 void
3873 hat_mempte_remap(
3874 pfn_t pfn,
3875 caddr_t addr,
3876 hat_mempte_t pte_pa,
3877 uint_t attr,
3878 uint_t flags)
3879 {
3880 uintptr_t va = (uintptr_t)addr;
3881 x86pte_t pte;
3882
3883 /*
3884 * Remap the given PTE to the new page's PFN. Invalidate only
3885 * on this CPU.
3886 */
3887 #ifdef DEBUG
3888 htable_t *ht;
3889 uint_t entry;
3890
3891 ASSERT(IS_PAGEALIGNED(va));
3892 ASSERT(!IN_VA_HOLE(va));
3893 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3894 ASSERT(ht != NULL);
3895 ASSERT(ht->ht_level == 0);
3896 ASSERT(ht->ht_valid_cnt > 0);
3897 ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3898 htable_release(ht);
3899 #endif
3900 XPV_DISALLOW_MIGRATE();
3901 pte = hati_mkpte(pfn, attr, 0, flags);
3902 #ifdef __xpv
3903 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3904 panic("HYPERVISOR_update_va_mapping() failed");
3905 #else
3906 {
3907 x86pte_t *pteptr;
3908
3909 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3910 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3911 if (mmu.pae_hat)
3912 *(x86pte_t *)pteptr = pte;
3913 else
3914 *(x86pte32_t *)pteptr = (x86pte32_t)pte;
3915 mmu_tlbflush_entry(addr);
3916 x86pte_mapout();
3917 }
3918 #endif
3919 XPV_ALLOW_MIGRATE();
3920 }
3921
3922
3923
3924 /*
3925 * Hat locking functions
3926 * XXX - these two functions are currently being used by hatstats
3927 * they can be removed by using a per-as mutex for hatstats.
3928 */
3929 void
3930 hat_enter(hat_t *hat)
3931 {
3932 mutex_enter(&hat->hat_mutex);
3933 }
3934
3935 void
3936 hat_exit(hat_t *hat)
3937 {
3938 mutex_exit(&hat->hat_mutex);
3939 }
3940
3941 /*
3942 * HAT part of cpu initialization.
3943 */
3944 void
3945 hat_cpu_online(struct cpu *cpup)
3946 {
3947 if (cpup != CPU) {
3948 x86pte_cpu_init(cpup);
3949 hat_vlp_setup(cpup);
3950 }
3951 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
3952 }
3953
3954 /*
3955 * HAT part of cpu deletion.
3956 * (currently, we only call this after the cpu is safely passivated.)
3957 */
3958 void
3959 hat_cpu_offline(struct cpu *cpup)
3960 {
3961 ASSERT(cpup != CPU);
3962
3963 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
3964 hat_vlp_teardown(cpup);
3965 x86pte_cpu_fini(cpup);
3966 }
3967
3968 /*
3969 * Function called after all CPUs are brought online.
3970 * Used to remove low address boot mappings.
3971 */
3972 void
3973 clear_boot_mappings(uintptr_t low, uintptr_t high)
3974 {
3975 uintptr_t vaddr = low;
3976 htable_t *ht = NULL;
3977 level_t level;
3978 uint_t entry;
3979 x86pte_t pte;
3980
3981 /*
3982 * On 1st CPU we can unload the prom mappings, basically we blow away
3983 * all virtual mappings under _userlimit.
3984 */
3985 while (vaddr < high) {
3986 pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
3987 if (ht == NULL)
3988 break;
3989
3990 level = ht->ht_level;
3991 entry = htable_va2entry(vaddr, ht);
3992 ASSERT(level <= mmu.max_page_level);
3993 ASSERT(PTE_ISPAGE(pte, level));
3994
3995 /*
3996 * Unload the mapping from the page tables.
3997 */
3998 (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3999 ASSERT(ht->ht_valid_cnt > 0);
4000 HTABLE_DEC(ht->ht_valid_cnt);
4001 PGCNT_DEC(ht->ht_hat, ht->ht_level);
4002
4003 vaddr += LEVEL_SIZE(ht->ht_level);
4004 }
4005 if (ht)
4006 htable_release(ht);
4007 }
4008
4009 /*
4010 * Atomically update a new translation for a single page. If the
4011 * currently installed PTE doesn't match the value we expect to find,
4012 * it's not updated and we return the PTE we found.
4013 *
4014 * If activating nosync or NOWRITE and the page was modified we need to sync
4015 * with the page_t. Also sync with page_t if clearing ref/mod bits.
4016 */
4017 static x86pte_t
4018 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4019 {
4020 page_t *pp;
4021 uint_t rm = 0;
4022 x86pte_t replaced;
4023
4024 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4025 PTE_GET(expected, PT_MOD | PT_REF) &&
4026 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4027 !PTE_GET(new, PT_MOD | PT_REF))) {
4028
4029 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4030 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4031 ASSERT(pp != NULL);
4032 if (PTE_GET(expected, PT_MOD))
4033 rm |= P_MOD;
4034 if (PTE_GET(expected, PT_REF))
4035 rm |= P_REF;
4036 PTE_CLR(new, PT_MOD | PT_REF);
4037 }
4038
4039 replaced = x86pte_update(ht, entry, expected, new);
4040 if (replaced != expected)
4041 return (replaced);
4042
4043 if (rm) {
4044 /*
4045 * sync to all constituent pages of a large page
4046 */
4047 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4048 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4049 while (pgcnt-- > 0) {
4050 /*
4051 * hat_page_demote() can't decrease
4052 * pszc below this mapping size
4053 * since large mapping existed after we
4054 * took mlist lock.
4055 */
4056 ASSERT(pp->p_szc >= ht->ht_level);
4057 hat_page_setattr(pp, rm);
4058 ++pp;
4059 }
4060 }
4061
4062 return (0);
4063 }
4064
4065 /* ARGSUSED */
4066 void
4067 hat_join_srd(struct hat *hat, vnode_t *evp)
4068 {
4069 }
4070
4071 /* ARGSUSED */
4072 hat_region_cookie_t
4073 hat_join_region(struct hat *hat,
4074 caddr_t r_saddr,
4075 size_t r_size,
4076 void *r_obj,
4077 u_offset_t r_objoff,
4078 uchar_t r_perm,
4079 uchar_t r_pgszc,
4080 hat_rgn_cb_func_t r_cb_function,
4081 uint_t flags)
4082 {
4083 panic("No shared region support on x86");
4084 return (HAT_INVALID_REGION_COOKIE);
4085 }
4086
4087 /* ARGSUSED */
4088 void
4089 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4090 {
4091 panic("No shared region support on x86");
4092 }
4093
4094 /* ARGSUSED */
4095 void
4096 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4097 {
4098 panic("No shared region support on x86");
4099 }
4100
4101
4102 /*
4103 * Kernel Physical Mapping (kpm) facility
4104 *
4105 * Most of the routines needed to support segkpm are almost no-ops on the
4106 * x86 platform. We map in the entire segment when it is created and leave
4107 * it mapped in, so there is no additional work required to set up and tear
4108 * down individual mappings. All of these routines were created to support
4109 * SPARC platforms that have to avoid aliasing in their virtually indexed
4110 * caches.
4111 *
4112 * Most of the routines have sanity checks in them (e.g. verifying that the
4113 * passed-in page is locked). We don't actually care about most of these
4114 * checks on x86, but we leave them in place to identify problems in the
4115 * upper levels.
4116 */
4117
4118 /*
4119 * Map in a locked page and return the vaddr.
4120 */
4121 /*ARGSUSED*/
4122 caddr_t
4123 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4124 {
4125 caddr_t vaddr;
4126
4127 #ifdef DEBUG
4128 if (kpm_enable == 0) {
4129 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4130 return ((caddr_t)NULL);
4131 }
4132
4133 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4134 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4135 return ((caddr_t)NULL);
4136 }
4137 #endif
4138
4139 vaddr = hat_kpm_page2va(pp, 1);
4140
4141 return (vaddr);
4142 }
4143
4144 /*
4145 * Mapout a locked page.
4146 */
4147 /*ARGSUSED*/
4148 void
4149 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4150 {
4151 #ifdef DEBUG
4152 if (kpm_enable == 0) {
4153 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4154 return;
4155 }
4156
4157 if (IS_KPM_ADDR(vaddr) == 0) {
4158 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4159 return;
4160 }
4161
4162 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4163 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4164 return;
4165 }
4166 #endif
4167 }
4168
4169 /*
4170 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4171 * memory addresses that are not described by a page_t. It can
4172 * also be used for normal pages that are not locked, but beware
4173 * this is dangerous - no locking is performed, so the identity of
4174 * the page could change. hat_kpm_mapin_pfn is not supported when
4175 * vac_colors > 1, because the chosen va depends on the page identity,
4176 * which could change.
4177 * The caller must only pass pfn's for valid physical addresses; violation
4178 * of this rule will cause panic.
4179 */
4180 caddr_t
4181 hat_kpm_mapin_pfn(pfn_t pfn)
4182 {
4183 caddr_t paddr, vaddr;
4184
4185 if (kpm_enable == 0)
4186 return ((caddr_t)NULL);
4187
4188 paddr = (caddr_t)ptob(pfn);
4189 vaddr = (uintptr_t)kpm_vbase + paddr;
4190
4191 return ((caddr_t)vaddr);
4192 }
4193
4194 /*ARGSUSED*/
4195 void
4196 hat_kpm_mapout_pfn(pfn_t pfn)
4197 {
4198 /* empty */
4199 }
4200
4201 /*
4202 * Return the kpm virtual address for a specific pfn
4203 */
4204 caddr_t
4205 hat_kpm_pfn2va(pfn_t pfn)
4206 {
4207 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4208
4209 ASSERT(!pfn_is_foreign(pfn));
4210 return ((caddr_t)vaddr);
4211 }
4212
4213 /*
4214 * Return the kpm virtual address for the page at pp.
4215 */
4216 /*ARGSUSED*/
4217 caddr_t
4218 hat_kpm_page2va(struct page *pp, int checkswap)
4219 {
4220 return (hat_kpm_pfn2va(pp->p_pagenum));
4221 }
4222
4223 /*
4224 * Return the page frame number for the kpm virtual address vaddr.
4225 */
4226 pfn_t
4227 hat_kpm_va2pfn(caddr_t vaddr)
4228 {
4229 pfn_t pfn;
4230
4231 ASSERT(IS_KPM_ADDR(vaddr));
4232
4233 pfn = (pfn_t)btop(vaddr - kpm_vbase);
4234
4235 return (pfn);
4236 }
4237
4238
4239 /*
4240 * Return the page for the kpm virtual address vaddr.
4241 */
4242 page_t *
4243 hat_kpm_vaddr2page(caddr_t vaddr)
4244 {
4245 pfn_t pfn;
4246
4247 ASSERT(IS_KPM_ADDR(vaddr));
4248
4249 pfn = hat_kpm_va2pfn(vaddr);
4250
4251 return (page_numtopp_nolock(pfn));
4252 }
4253
4254 /*
4255 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4256 * KPM page. This should never happen on x86
4257 */
4258 int
4259 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4260 {
4261 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p",
4262 (void *)hat, (void *)vaddr);
4263
4264 return (0);
4265 }
4266
4267 /*ARGSUSED*/
4268 void
4269 hat_kpm_mseghash_clear(int nentries)
4270 {}
4271
4272 /*ARGSUSED*/
4273 void
4274 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4275 {}
4276
4277 #ifndef __xpv
4278 void
4279 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4280 offset_t kpm_pages_off)
4281 {
4282 _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4283 pfn_t base, end;
4284
4285 /*
4286 * kphysm_add_memory_dynamic() does not set nkpmpgs
4287 * when page_t memory is externally allocated. That
4288 * code must properly calculate nkpmpgs in all cases
4289 * if nkpmpgs needs to be used at some point.
4290 */
4291
4292 /*
4293 * The meta (page_t) pages for dynamically added memory are allocated
4294 * either from the incoming memory itself or from existing memory.
4295 * In the former case the base of the incoming pages will be different
4296 * than the base of the dynamic segment so call memseg_get_start() to
4297 * get the actual base of the incoming memory for each case.
4298 */
4299
4300 base = memseg_get_start(msp);
4301 end = msp->pages_end;
4302
4303 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4304 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4305 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4306 }
4307
4308 void
4309 hat_kpm_addmem_mseg_insert(struct memseg *msp)
4310 {
4311 _NOTE(ARGUNUSED(msp));
4312 }
4313
4314 void
4315 hat_kpm_addmem_memsegs_update(struct memseg *msp)
4316 {
4317 _NOTE(ARGUNUSED(msp));
4318 }
4319
4320 /*
4321 * Return end of metadata for an already setup memseg.
4322 * X86 platforms don't need per-page meta data to support kpm.
4323 */
4324 caddr_t
4325 hat_kpm_mseg_reuse(struct memseg *msp)
4326 {
4327 return ((caddr_t)msp->epages);
4328 }
4329
4330 void
4331 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4332 {
4333 _NOTE(ARGUNUSED(msp, mspp));
4334 ASSERT(0);
4335 }
4336
4337 void
4338 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4339 struct memseg *lo, struct memseg *mid, struct memseg *hi)
4340 {
4341 _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4342 ASSERT(0);
4343 }
4344
4345 /*
4346 * Walk the memsegs chain, applying func to each memseg span.
4347 */
4348 void
4349 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4350 {
4351 pfn_t pbase, pend;
4352 void *base;
4353 size_t size;
4354 struct memseg *msp;
4355
4356 for (msp = memsegs; msp; msp = msp->next) {
4357 pbase = msp->pages_base;
4358 pend = msp->pages_end;
4359 base = ptob(pbase) + kpm_vbase;
4360 size = ptob(pend - pbase);
4361 func(arg, base, size);
4362 }
4363 }
4364
4365 #else /* __xpv */
4366
4367 /*
4368 * There are specific Hypervisor calls to establish and remove mappings
4369 * to grant table references and the privcmd driver. We have to ensure
4370 * that a page table actually exists.
4371 */
4372 void
4373 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4374 {
4375 maddr_t base_ma;
4376 htable_t *ht;
4377 uint_t entry;
4378
4379 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4380 XPV_DISALLOW_MIGRATE();
4381 ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4382
4383 /*
4384 * if an address for pte_ma is passed in, return the MA of the pte
4385 * for this specific address. This address is only valid as long
4386 * as the htable stays locked.
4387 */
4388 if (pte_ma != NULL) {
4389 entry = htable_va2entry((uintptr_t)addr, ht);
4390 base_ma = pa_to_ma(ptob(ht->ht_pfn));
4391 *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4392 }
4393 XPV_ALLOW_MIGRATE();
4394 }
4395
4396 void
4397 hat_release_mapping(hat_t *hat, caddr_t addr)
4398 {
4399 htable_t *ht;
4400
4401 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4402 XPV_DISALLOW_MIGRATE();
4403 ht = htable_lookup(hat, (uintptr_t)addr, 0);
4404 ASSERT(ht != NULL);
4405 ASSERT(ht->ht_busy >= 2);
4406 htable_release(ht);
4407 htable_release(ht);
4408 XPV_ALLOW_MIGRATE();
4409 }
4410 #endif /* __xpv */